Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 *  Kernel Probes (KProbes)
   3 *  kernel/kprobes.c
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18 *
  19 * Copyright (C) IBM Corporation, 2002, 2004
  20 *
  21 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22 *		Probes initial implementation (includes suggestions from
  23 *		Rusty Russell).
  24 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25 *		hlists and exceptions notifier as suggested by Andi Kleen.
  26 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27 *		interface to access function arguments.
  28 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29 *		exceptions notifier to be first on the priority list.
  30 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32 *		<prasanna@in.ibm.com> added function-return probes.
  33 */
  34#include <linux/kprobes.h>
  35#include <linux/hash.h>
  36#include <linux/init.h>
  37#include <linux/slab.h>
  38#include <linux/stddef.h>
  39#include <linux/export.h>
  40#include <linux/moduleloader.h>
  41#include <linux/kallsyms.h>
  42#include <linux/freezer.h>
  43#include <linux/seq_file.h>
  44#include <linux/debugfs.h>
  45#include <linux/sysctl.h>
  46#include <linux/kdebug.h>
  47#include <linux/memory.h>
  48#include <linux/ftrace.h>
  49#include <linux/cpu.h>
  50#include <linux/jump_label.h>
  51
  52#include <asm/sections.h>
  53#include <asm/cacheflush.h>
  54#include <asm/errno.h>
  55#include <linux/uaccess.h>
  56
  57#define KPROBE_HASH_BITS 6
  58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  59
  60
  61static int kprobes_initialized;
  62static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  63static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  64
  65/* NOTE: change this value only with kprobe_mutex held */
  66static bool kprobes_all_disarmed;
  67
  68/* This protects kprobe_table and optimizing_list */
  69static DEFINE_MUTEX(kprobe_mutex);
  70static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  71static struct {
  72	raw_spinlock_t lock ____cacheline_aligned_in_smp;
  73} kretprobe_table_locks[KPROBE_TABLE_SIZE];
  74
  75kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
  76					unsigned int __unused)
  77{
  78	return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
  79}
  80
  81static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  82{
  83	return &(kretprobe_table_locks[hash].lock);
  84}
  85
  86/* Blacklist -- list of struct kprobe_blacklist_entry */
  87static LIST_HEAD(kprobe_blacklist);
  88
  89#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  90/*
  91 * kprobe->ainsn.insn points to the copy of the instruction to be
  92 * single-stepped. x86_64, POWER4 and above have no-exec support and
  93 * stepping on the instruction on a vmalloced/kmalloced/data page
  94 * is a recipe for disaster
  95 */
  96struct kprobe_insn_page {
  97	struct list_head list;
  98	kprobe_opcode_t *insns;		/* Page of instruction slots */
  99	struct kprobe_insn_cache *cache;
 100	int nused;
 101	int ngarbage;
 102	char slot_used[];
 103};
 104
 105#define KPROBE_INSN_PAGE_SIZE(slots)			\
 106	(offsetof(struct kprobe_insn_page, slot_used) +	\
 107	 (sizeof(char) * (slots)))
 108
 109static int slots_per_page(struct kprobe_insn_cache *c)
 110{
 111	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
 112}
 113
 114enum kprobe_slot_state {
 115	SLOT_CLEAN = 0,
 116	SLOT_DIRTY = 1,
 117	SLOT_USED = 2,
 118};
 119
 120void __weak *alloc_insn_page(void)
 121{
 122	return module_alloc(PAGE_SIZE);
 123}
 124
 125void __weak free_insn_page(void *page)
 126{
 127	module_memfree(page);
 128}
 129
 130struct kprobe_insn_cache kprobe_insn_slots = {
 131	.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
 132	.alloc = alloc_insn_page,
 133	.free = free_insn_page,
 134	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
 135	.insn_size = MAX_INSN_SIZE,
 136	.nr_garbage = 0,
 137};
 138static int collect_garbage_slots(struct kprobe_insn_cache *c);
 139
 140/**
 141 * __get_insn_slot() - Find a slot on an executable page for an instruction.
 142 * We allocate an executable page if there's no room on existing ones.
 143 */
 144kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
 145{
 146	struct kprobe_insn_page *kip;
 147	kprobe_opcode_t *slot = NULL;
 148
 149	/* Since the slot array is not protected by rcu, we need a mutex */
 150	mutex_lock(&c->mutex);
 151 retry:
 152	rcu_read_lock();
 153	list_for_each_entry_rcu(kip, &c->pages, list) {
 154		if (kip->nused < slots_per_page(c)) {
 155			int i;
 156			for (i = 0; i < slots_per_page(c); i++) {
 157				if (kip->slot_used[i] == SLOT_CLEAN) {
 158					kip->slot_used[i] = SLOT_USED;
 159					kip->nused++;
 160					slot = kip->insns + (i * c->insn_size);
 161					rcu_read_unlock();
 162					goto out;
 163				}
 164			}
 165			/* kip->nused is broken. Fix it. */
 166			kip->nused = slots_per_page(c);
 167			WARN_ON(1);
 168		}
 169	}
 170	rcu_read_unlock();
 171
 172	/* If there are any garbage slots, collect it and try again. */
 173	if (c->nr_garbage && collect_garbage_slots(c) == 0)
 174		goto retry;
 175
 176	/* All out of space.  Need to allocate a new page. */
 177	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
 178	if (!kip)
 179		goto out;
 180
 181	/*
 182	 * Use module_alloc so this page is within +/- 2GB of where the
 183	 * kernel image and loaded module images reside. This is required
 184	 * so x86_64 can correctly handle the %rip-relative fixups.
 185	 */
 186	kip->insns = c->alloc();
 187	if (!kip->insns) {
 188		kfree(kip);
 189		goto out;
 190	}
 191	INIT_LIST_HEAD(&kip->list);
 192	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
 193	kip->slot_used[0] = SLOT_USED;
 194	kip->nused = 1;
 195	kip->ngarbage = 0;
 196	kip->cache = c;
 197	list_add_rcu(&kip->list, &c->pages);
 198	slot = kip->insns;
 199out:
 200	mutex_unlock(&c->mutex);
 201	return slot;
 202}
 203
 204/* Return 1 if all garbages are collected, otherwise 0. */
 205static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
 206{
 207	kip->slot_used[idx] = SLOT_CLEAN;
 208	kip->nused--;
 209	if (kip->nused == 0) {
 210		/*
 211		 * Page is no longer in use.  Free it unless
 212		 * it's the last one.  We keep the last one
 213		 * so as not to have to set it up again the
 214		 * next time somebody inserts a probe.
 215		 */
 216		if (!list_is_singular(&kip->list)) {
 217			list_del_rcu(&kip->list);
 218			synchronize_rcu();
 219			kip->cache->free(kip->insns);
 220			kfree(kip);
 221		}
 222		return 1;
 223	}
 224	return 0;
 225}
 226
 227static int collect_garbage_slots(struct kprobe_insn_cache *c)
 228{
 229	struct kprobe_insn_page *kip, *next;
 230
 231	/* Ensure no-one is interrupted on the garbages */
 232	synchronize_sched();
 233
 234	list_for_each_entry_safe(kip, next, &c->pages, list) {
 235		int i;
 236		if (kip->ngarbage == 0)
 237			continue;
 238		kip->ngarbage = 0;	/* we will collect all garbages */
 239		for (i = 0; i < slots_per_page(c); i++) {
 240			if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
 241				break;
 242		}
 243	}
 244	c->nr_garbage = 0;
 245	return 0;
 246}
 247
 248void __free_insn_slot(struct kprobe_insn_cache *c,
 249		      kprobe_opcode_t *slot, int dirty)
 250{
 251	struct kprobe_insn_page *kip;
 252	long idx;
 253
 254	mutex_lock(&c->mutex);
 255	rcu_read_lock();
 256	list_for_each_entry_rcu(kip, &c->pages, list) {
 257		idx = ((long)slot - (long)kip->insns) /
 258			(c->insn_size * sizeof(kprobe_opcode_t));
 259		if (idx >= 0 && idx < slots_per_page(c))
 260			goto out;
 261	}
 262	/* Could not find this slot. */
 263	WARN_ON(1);
 264	kip = NULL;
 265out:
 266	rcu_read_unlock();
 267	/* Mark and sweep: this may sleep */
 268	if (kip) {
 269		/* Check double free */
 270		WARN_ON(kip->slot_used[idx] != SLOT_USED);
 271		if (dirty) {
 272			kip->slot_used[idx] = SLOT_DIRTY;
 273			kip->ngarbage++;
 274			if (++c->nr_garbage > slots_per_page(c))
 275				collect_garbage_slots(c);
 276		} else {
 277			collect_one_slot(kip, idx);
 278		}
 279	}
 280	mutex_unlock(&c->mutex);
 281}
 282
 283/*
 284 * Check given address is on the page of kprobe instruction slots.
 285 * This will be used for checking whether the address on a stack
 286 * is on a text area or not.
 287 */
 288bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
 289{
 290	struct kprobe_insn_page *kip;
 291	bool ret = false;
 292
 293	rcu_read_lock();
 294	list_for_each_entry_rcu(kip, &c->pages, list) {
 295		if (addr >= (unsigned long)kip->insns &&
 296		    addr < (unsigned long)kip->insns + PAGE_SIZE) {
 297			ret = true;
 298			break;
 299		}
 300	}
 301	rcu_read_unlock();
 302
 303	return ret;
 304}
 305
 306#ifdef CONFIG_OPTPROBES
 307/* For optimized_kprobe buffer */
 308struct kprobe_insn_cache kprobe_optinsn_slots = {
 309	.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
 310	.alloc = alloc_insn_page,
 311	.free = free_insn_page,
 312	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
 313	/* .insn_size is initialized later */
 314	.nr_garbage = 0,
 315};
 316#endif
 317#endif
 318
 319/* We have preemption disabled.. so it is safe to use __ versions */
 320static inline void set_kprobe_instance(struct kprobe *kp)
 321{
 322	__this_cpu_write(kprobe_instance, kp);
 323}
 324
 325static inline void reset_kprobe_instance(void)
 326{
 327	__this_cpu_write(kprobe_instance, NULL);
 328}
 329
 330/*
 331 * This routine is called either:
 332 * 	- under the kprobe_mutex - during kprobe_[un]register()
 333 * 				OR
 334 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
 335 */
 336struct kprobe *get_kprobe(void *addr)
 337{
 338	struct hlist_head *head;
 339	struct kprobe *p;
 340
 341	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
 342	hlist_for_each_entry_rcu(p, head, hlist) {
 343		if (p->addr == addr)
 344			return p;
 345	}
 346
 347	return NULL;
 348}
 349NOKPROBE_SYMBOL(get_kprobe);
 350
 351static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
 352
 353/* Return true if the kprobe is an aggregator */
 354static inline int kprobe_aggrprobe(struct kprobe *p)
 355{
 356	return p->pre_handler == aggr_pre_handler;
 357}
 358
 359/* Return true(!0) if the kprobe is unused */
 360static inline int kprobe_unused(struct kprobe *p)
 361{
 362	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
 363	       list_empty(&p->list);
 364}
 365
 366/*
 367 * Keep all fields in the kprobe consistent
 368 */
 369static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
 370{
 371	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
 372	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
 373}
 374
 375#ifdef CONFIG_OPTPROBES
 376/* NOTE: change this value only with kprobe_mutex held */
 377static bool kprobes_allow_optimization;
 378
 379/*
 380 * Call all pre_handler on the list, but ignores its return value.
 381 * This must be called from arch-dep optimized caller.
 382 */
 383void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
 384{
 385	struct kprobe *kp;
 386
 387	list_for_each_entry_rcu(kp, &p->list, list) {
 388		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
 389			set_kprobe_instance(kp);
 390			kp->pre_handler(kp, regs);
 391		}
 392		reset_kprobe_instance();
 393	}
 394}
 395NOKPROBE_SYMBOL(opt_pre_handler);
 396
 397/* Free optimized instructions and optimized_kprobe */
 398static void free_aggr_kprobe(struct kprobe *p)
 399{
 400	struct optimized_kprobe *op;
 401
 402	op = container_of(p, struct optimized_kprobe, kp);
 403	arch_remove_optimized_kprobe(op);
 404	arch_remove_kprobe(p);
 405	kfree(op);
 406}
 407
 408/* Return true(!0) if the kprobe is ready for optimization. */
 409static inline int kprobe_optready(struct kprobe *p)
 410{
 411	struct optimized_kprobe *op;
 412
 413	if (kprobe_aggrprobe(p)) {
 414		op = container_of(p, struct optimized_kprobe, kp);
 415		return arch_prepared_optinsn(&op->optinsn);
 416	}
 417
 418	return 0;
 419}
 420
 421/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
 422static inline int kprobe_disarmed(struct kprobe *p)
 423{
 424	struct optimized_kprobe *op;
 425
 426	/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
 427	if (!kprobe_aggrprobe(p))
 428		return kprobe_disabled(p);
 429
 430	op = container_of(p, struct optimized_kprobe, kp);
 431
 432	return kprobe_disabled(p) && list_empty(&op->list);
 433}
 434
 435/* Return true(!0) if the probe is queued on (un)optimizing lists */
 436static int kprobe_queued(struct kprobe *p)
 437{
 438	struct optimized_kprobe *op;
 439
 440	if (kprobe_aggrprobe(p)) {
 441		op = container_of(p, struct optimized_kprobe, kp);
 442		if (!list_empty(&op->list))
 443			return 1;
 444	}
 445	return 0;
 446}
 447
 448/*
 449 * Return an optimized kprobe whose optimizing code replaces
 450 * instructions including addr (exclude breakpoint).
 451 */
 452static struct kprobe *get_optimized_kprobe(unsigned long addr)
 453{
 454	int i;
 455	struct kprobe *p = NULL;
 456	struct optimized_kprobe *op;
 457
 458	/* Don't check i == 0, since that is a breakpoint case. */
 459	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
 460		p = get_kprobe((void *)(addr - i));
 461
 462	if (p && kprobe_optready(p)) {
 463		op = container_of(p, struct optimized_kprobe, kp);
 464		if (arch_within_optimized_kprobe(op, addr))
 465			return p;
 466	}
 467
 468	return NULL;
 469}
 470
 471/* Optimization staging list, protected by kprobe_mutex */
 472static LIST_HEAD(optimizing_list);
 473static LIST_HEAD(unoptimizing_list);
 474static LIST_HEAD(freeing_list);
 475
 476static void kprobe_optimizer(struct work_struct *work);
 477static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
 478#define OPTIMIZE_DELAY 5
 479
 480/*
 481 * Optimize (replace a breakpoint with a jump) kprobes listed on
 482 * optimizing_list.
 483 */
 484static void do_optimize_kprobes(void)
 485{
 
 486	/*
 487	 * The optimization/unoptimization refers online_cpus via
 488	 * stop_machine() and cpu-hotplug modifies online_cpus.
 489	 * And same time, text_mutex will be held in cpu-hotplug and here.
 490	 * This combination can cause a deadlock (cpu-hotplug try to lock
 491	 * text_mutex but stop_machine can not be done because online_cpus
 492	 * has been changed)
 493	 * To avoid this deadlock, caller must have locked cpu hotplug
 494	 * for preventing cpu-hotplug outside of text_mutex locking.
 495	 */
 496	lockdep_assert_cpus_held();
 497
 498	/* Optimization never be done when disarmed */
 499	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
 500	    list_empty(&optimizing_list))
 501		return;
 502
 503	mutex_lock(&text_mutex);
 504	arch_optimize_kprobes(&optimizing_list);
 505	mutex_unlock(&text_mutex);
 506}
 507
 508/*
 509 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
 510 * if need) kprobes listed on unoptimizing_list.
 511 */
 512static void do_unoptimize_kprobes(void)
 513{
 514	struct optimized_kprobe *op, *tmp;
 515
 
 516	/* See comment in do_optimize_kprobes() */
 517	lockdep_assert_cpus_held();
 518
 519	/* Unoptimization must be done anytime */
 520	if (list_empty(&unoptimizing_list))
 521		return;
 522
 523	mutex_lock(&text_mutex);
 524	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 525	/* Loop free_list for disarming */
 526	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 527		/* Disarm probes if marked disabled */
 528		if (kprobe_disabled(&op->kp))
 529			arch_disarm_kprobe(&op->kp);
 530		if (kprobe_unused(&op->kp)) {
 531			/*
 532			 * Remove unused probes from hash list. After waiting
 533			 * for synchronization, these probes are reclaimed.
 534			 * (reclaiming is done by do_free_cleaned_kprobes.)
 535			 */
 536			hlist_del_rcu(&op->kp.hlist);
 537		} else
 538			list_del_init(&op->list);
 539	}
 540	mutex_unlock(&text_mutex);
 541}
 542
 543/* Reclaim all kprobes on the free_list */
 544static void do_free_cleaned_kprobes(void)
 545{
 546	struct optimized_kprobe *op, *tmp;
 547
 548	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 549		BUG_ON(!kprobe_unused(&op->kp));
 550		list_del_init(&op->list);
 
 
 
 
 
 
 
 551		free_aggr_kprobe(&op->kp);
 552	}
 553}
 554
 555/* Start optimizer after OPTIMIZE_DELAY passed */
 556static void kick_kprobe_optimizer(void)
 557{
 558	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
 559}
 560
 561/* Kprobe jump optimizer */
 562static void kprobe_optimizer(struct work_struct *work)
 563{
 564	mutex_lock(&kprobe_mutex);
 565	cpus_read_lock();
 
 566	/* Lock modules while optimizing kprobes */
 567	mutex_lock(&module_mutex);
 568
 569	/*
 570	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
 571	 * kprobes before waiting for quiesence period.
 572	 */
 573	do_unoptimize_kprobes();
 574
 575	/*
 576	 * Step 2: Wait for quiesence period to ensure all potentially
 577	 * preempted tasks to have normally scheduled. Because optprobe
 578	 * may modify multiple instructions, there is a chance that Nth
 579	 * instruction is preempted. In that case, such tasks can return
 580	 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
 581	 * Note that on non-preemptive kernel, this is transparently converted
 582	 * to synchronoze_sched() to wait for all interrupts to have completed.
 583	 */
 584	synchronize_rcu_tasks();
 585
 586	/* Step 3: Optimize kprobes after quiesence period */
 587	do_optimize_kprobes();
 588
 589	/* Step 4: Free cleaned kprobes after quiesence period */
 590	do_free_cleaned_kprobes();
 591
 592	mutex_unlock(&module_mutex);
 
 593	cpus_read_unlock();
 594	mutex_unlock(&kprobe_mutex);
 595
 596	/* Step 5: Kick optimizer again if needed */
 597	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
 598		kick_kprobe_optimizer();
 599}
 600
 601/* Wait for completing optimization and unoptimization */
 602void wait_for_kprobe_optimizer(void)
 603{
 604	mutex_lock(&kprobe_mutex);
 605
 606	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
 607		mutex_unlock(&kprobe_mutex);
 608
 609		/* this will also make optimizing_work execute immmediately */
 610		flush_delayed_work(&optimizing_work);
 611		/* @optimizing_work might not have been queued yet, relax */
 612		cpu_relax();
 613
 614		mutex_lock(&kprobe_mutex);
 615	}
 616
 617	mutex_unlock(&kprobe_mutex);
 618}
 619
 620/* Optimize kprobe if p is ready to be optimized */
 621static void optimize_kprobe(struct kprobe *p)
 622{
 623	struct optimized_kprobe *op;
 624
 625	/* Check if the kprobe is disabled or not ready for optimization. */
 626	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
 627	    (kprobe_disabled(p) || kprobes_all_disarmed))
 628		return;
 629
 630	/* Both of break_handler and post_handler are not supported. */
 631	if (p->break_handler || p->post_handler)
 632		return;
 633
 634	op = container_of(p, struct optimized_kprobe, kp);
 635
 636	/* Check there is no other kprobes at the optimized instructions */
 637	if (arch_check_optimized_kprobe(op) < 0)
 638		return;
 639
 640	/* Check if it is already optimized. */
 641	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
 642		return;
 643	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 644
 645	if (!list_empty(&op->list))
 646		/* This is under unoptimizing. Just dequeue the probe */
 647		list_del_init(&op->list);
 648	else {
 649		list_add(&op->list, &optimizing_list);
 650		kick_kprobe_optimizer();
 651	}
 652}
 653
 654/* Short cut to direct unoptimizing */
 655static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 656{
 657	lockdep_assert_cpus_held();
 658	arch_unoptimize_kprobe(op);
 659	if (kprobe_disabled(&op->kp))
 660		arch_disarm_kprobe(&op->kp);
 661}
 662
 663/* Unoptimize a kprobe if p is optimized */
 664static void unoptimize_kprobe(struct kprobe *p, bool force)
 665{
 666	struct optimized_kprobe *op;
 667
 668	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
 669		return; /* This is not an optprobe nor optimized */
 670
 671	op = container_of(p, struct optimized_kprobe, kp);
 672	if (!kprobe_optimized(p)) {
 673		/* Unoptimized or unoptimizing case */
 674		if (force && !list_empty(&op->list)) {
 675			/*
 676			 * Only if this is unoptimizing kprobe and forced,
 677			 * forcibly unoptimize it. (No need to unoptimize
 678			 * unoptimized kprobe again :)
 679			 */
 680			list_del_init(&op->list);
 681			force_unoptimize_kprobe(op);
 682		}
 683		return;
 684	}
 685
 686	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 687	if (!list_empty(&op->list)) {
 688		/* Dequeue from the optimization queue */
 689		list_del_init(&op->list);
 690		return;
 691	}
 692	/* Optimized kprobe case */
 693	if (force)
 694		/* Forcibly update the code: this is a special case */
 695		force_unoptimize_kprobe(op);
 696	else {
 697		list_add(&op->list, &unoptimizing_list);
 698		kick_kprobe_optimizer();
 699	}
 700}
 701
 702/* Cancel unoptimizing for reusing */
 703static void reuse_unused_kprobe(struct kprobe *ap)
 704{
 705	struct optimized_kprobe *op;
 706
 707	BUG_ON(!kprobe_unused(ap));
 708	/*
 709	 * Unused kprobe MUST be on the way of delayed unoptimizing (means
 710	 * there is still a relative jump) and disabled.
 711	 */
 712	op = container_of(ap, struct optimized_kprobe, kp);
 713	if (unlikely(list_empty(&op->list)))
 714		printk(KERN_WARNING "Warning: found a stray unused "
 715			"aggrprobe@%p\n", ap->addr);
 716	/* Enable the probe again */
 717	ap->flags &= ~KPROBE_FLAG_DISABLED;
 718	/* Optimize it again (remove from op->list) */
 719	BUG_ON(!kprobe_optready(ap));
 
 
 720	optimize_kprobe(ap);
 
 721}
 722
 723/* Remove optimized instructions */
 724static void kill_optimized_kprobe(struct kprobe *p)
 725{
 726	struct optimized_kprobe *op;
 727
 728	op = container_of(p, struct optimized_kprobe, kp);
 729	if (!list_empty(&op->list))
 730		/* Dequeue from the (un)optimization queue */
 731		list_del_init(&op->list);
 732	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 733
 734	if (kprobe_unused(p)) {
 735		/* Enqueue if it is unused */
 736		list_add(&op->list, &freeing_list);
 737		/*
 738		 * Remove unused probes from the hash list. After waiting
 739		 * for synchronization, this probe is reclaimed.
 740		 * (reclaiming is done by do_free_cleaned_kprobes().)
 741		 */
 742		hlist_del_rcu(&op->kp.hlist);
 743	}
 744
 745	/* Don't touch the code, because it is already freed. */
 746	arch_remove_optimized_kprobe(op);
 747}
 748
 749static inline
 750void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 751{
 752	if (!kprobe_ftrace(p))
 753		arch_prepare_optimized_kprobe(op, p);
 754}
 755
 756/* Try to prepare optimized instructions */
 757static void prepare_optimized_kprobe(struct kprobe *p)
 758{
 759	struct optimized_kprobe *op;
 760
 761	op = container_of(p, struct optimized_kprobe, kp);
 762	__prepare_optimized_kprobe(op, p);
 763}
 764
 765/* Allocate new optimized_kprobe and try to prepare optimized instructions */
 766static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 767{
 768	struct optimized_kprobe *op;
 769
 770	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
 771	if (!op)
 772		return NULL;
 773
 774	INIT_LIST_HEAD(&op->list);
 775	op->kp.addr = p->addr;
 776	__prepare_optimized_kprobe(op, p);
 777
 778	return &op->kp;
 779}
 780
 781static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
 782
 783/*
 784 * Prepare an optimized_kprobe and optimize it
 785 * NOTE: p must be a normal registered kprobe
 786 */
 787static void try_to_optimize_kprobe(struct kprobe *p)
 788{
 789	struct kprobe *ap;
 790	struct optimized_kprobe *op;
 791
 792	/* Impossible to optimize ftrace-based kprobe */
 793	if (kprobe_ftrace(p))
 794		return;
 795
 796	/* For preparing optimization, jump_label_text_reserved() is called */
 797	cpus_read_lock();
 798	jump_label_lock();
 799	mutex_lock(&text_mutex);
 800
 801	ap = alloc_aggr_kprobe(p);
 802	if (!ap)
 803		goto out;
 804
 805	op = container_of(ap, struct optimized_kprobe, kp);
 806	if (!arch_prepared_optinsn(&op->optinsn)) {
 807		/* If failed to setup optimizing, fallback to kprobe */
 808		arch_remove_optimized_kprobe(op);
 809		kfree(op);
 810		goto out;
 811	}
 812
 813	init_aggr_kprobe(ap, p);
 814	optimize_kprobe(ap);	/* This just kicks optimizer thread */
 815
 816out:
 817	mutex_unlock(&text_mutex);
 818	jump_label_unlock();
 819	cpus_read_unlock();
 820}
 821
 822#ifdef CONFIG_SYSCTL
 823static void optimize_all_kprobes(void)
 824{
 825	struct hlist_head *head;
 826	struct kprobe *p;
 827	unsigned int i;
 828
 829	mutex_lock(&kprobe_mutex);
 830	/* If optimization is already allowed, just return */
 831	if (kprobes_allow_optimization)
 832		goto out;
 833
 834	cpus_read_lock();
 835	kprobes_allow_optimization = true;
 836	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 837		head = &kprobe_table[i];
 838		hlist_for_each_entry_rcu(p, head, hlist)
 839			if (!kprobe_disabled(p))
 840				optimize_kprobe(p);
 841	}
 842	cpus_read_unlock();
 843	printk(KERN_INFO "Kprobes globally optimized\n");
 844out:
 845	mutex_unlock(&kprobe_mutex);
 846}
 847
 848static void unoptimize_all_kprobes(void)
 849{
 850	struct hlist_head *head;
 851	struct kprobe *p;
 852	unsigned int i;
 853
 854	mutex_lock(&kprobe_mutex);
 855	/* If optimization is already prohibited, just return */
 856	if (!kprobes_allow_optimization) {
 857		mutex_unlock(&kprobe_mutex);
 858		return;
 859	}
 860
 861	cpus_read_lock();
 862	kprobes_allow_optimization = false;
 863	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 864		head = &kprobe_table[i];
 865		hlist_for_each_entry_rcu(p, head, hlist) {
 866			if (!kprobe_disabled(p))
 867				unoptimize_kprobe(p, false);
 868		}
 869	}
 870	cpus_read_unlock();
 871	mutex_unlock(&kprobe_mutex);
 872
 873	/* Wait for unoptimizing completion */
 874	wait_for_kprobe_optimizer();
 875	printk(KERN_INFO "Kprobes globally unoptimized\n");
 876}
 877
 878static DEFINE_MUTEX(kprobe_sysctl_mutex);
 879int sysctl_kprobes_optimization;
 880int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
 881				      void __user *buffer, size_t *length,
 882				      loff_t *ppos)
 883{
 884	int ret;
 885
 886	mutex_lock(&kprobe_sysctl_mutex);
 887	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
 888	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 889
 890	if (sysctl_kprobes_optimization)
 891		optimize_all_kprobes();
 892	else
 893		unoptimize_all_kprobes();
 894	mutex_unlock(&kprobe_sysctl_mutex);
 895
 896	return ret;
 897}
 898#endif /* CONFIG_SYSCTL */
 899
 900/* Put a breakpoint for a probe. Must be called with text_mutex locked */
 901static void __arm_kprobe(struct kprobe *p)
 902{
 903	struct kprobe *_p;
 904
 905	/* Check collision with other optimized kprobes */
 906	_p = get_optimized_kprobe((unsigned long)p->addr);
 907	if (unlikely(_p))
 908		/* Fallback to unoptimized kprobe */
 909		unoptimize_kprobe(_p, true);
 910
 911	arch_arm_kprobe(p);
 912	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
 913}
 914
 915/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
 916static void __disarm_kprobe(struct kprobe *p, bool reopt)
 917{
 918	struct kprobe *_p;
 919
 920	/* Try to unoptimize */
 921	unoptimize_kprobe(p, kprobes_all_disarmed);
 922
 923	if (!kprobe_queued(p)) {
 924		arch_disarm_kprobe(p);
 925		/* If another kprobe was blocked, optimize it. */
 926		_p = get_optimized_kprobe((unsigned long)p->addr);
 927		if (unlikely(_p) && reopt)
 928			optimize_kprobe(_p);
 929	}
 930	/* TODO: reoptimize others after unoptimized this probe */
 931}
 932
 933#else /* !CONFIG_OPTPROBES */
 934
 935#define optimize_kprobe(p)			do {} while (0)
 936#define unoptimize_kprobe(p, f)			do {} while (0)
 937#define kill_optimized_kprobe(p)		do {} while (0)
 938#define prepare_optimized_kprobe(p)		do {} while (0)
 939#define try_to_optimize_kprobe(p)		do {} while (0)
 940#define __arm_kprobe(p)				arch_arm_kprobe(p)
 941#define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
 942#define kprobe_disarmed(p)			kprobe_disabled(p)
 943#define wait_for_kprobe_optimizer()		do {} while (0)
 944
 945/* There should be no unused kprobes can be reused without optimization */
 946static void reuse_unused_kprobe(struct kprobe *ap)
 947{
 
 
 
 
 
 
 948	printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
 949	BUG_ON(kprobe_unused(ap));
 950}
 951
 952static void free_aggr_kprobe(struct kprobe *p)
 953{
 954	arch_remove_kprobe(p);
 955	kfree(p);
 956}
 957
 958static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 959{
 960	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
 961}
 962#endif /* CONFIG_OPTPROBES */
 963
 964#ifdef CONFIG_KPROBES_ON_FTRACE
 965static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
 966	.func = kprobe_ftrace_handler,
 
 
 
 
 
 967	.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
 968};
 
 
 969static int kprobe_ftrace_enabled;
 970
 971/* Must ensure p->addr is really on ftrace */
 972static int prepare_kprobe(struct kprobe *p)
 973{
 974	if (!kprobe_ftrace(p))
 975		return arch_prepare_kprobe(p);
 976
 977	return arch_prepare_kprobe_ftrace(p);
 978}
 979
 980/* Caller must lock kprobe_mutex */
 981static int arm_kprobe_ftrace(struct kprobe *p)
 
 982{
 983	int ret = 0;
 984
 985	ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
 986				   (unsigned long)p->addr, 0, 0);
 987	if (ret) {
 988		pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
 
 989		return ret;
 990	}
 991
 992	if (kprobe_ftrace_enabled == 0) {
 993		ret = register_ftrace_function(&kprobe_ftrace_ops);
 994		if (ret) {
 995			pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
 996			goto err_ftrace;
 997		}
 998	}
 999
1000	kprobe_ftrace_enabled++;
1001	return ret;
1002
1003err_ftrace:
1004	/*
1005	 * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
1006	 * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
1007	 * empty filter_hash which would undesirably trace all functions.
1008	 */
1009	ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
1010	return ret;
1011}
1012
 
 
 
 
 
 
 
 
 
1013/* Caller must lock kprobe_mutex */
1014static int disarm_kprobe_ftrace(struct kprobe *p)
 
1015{
1016	int ret = 0;
1017
1018	if (kprobe_ftrace_enabled == 1) {
1019		ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1020		if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
1021			return ret;
1022	}
1023
1024	kprobe_ftrace_enabled--;
1025
1026	ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1027			   (unsigned long)p->addr, 1, 0);
1028	WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
1029	return ret;
1030}
 
 
 
 
 
 
 
 
 
1031#else	/* !CONFIG_KPROBES_ON_FTRACE */
1032#define prepare_kprobe(p)	arch_prepare_kprobe(p)
1033#define arm_kprobe_ftrace(p)	(-ENODEV)
1034#define disarm_kprobe_ftrace(p)	(-ENODEV)
1035#endif
1036
1037/* Arm a kprobe with text_mutex */
1038static int arm_kprobe(struct kprobe *kp)
1039{
1040	if (unlikely(kprobe_ftrace(kp)))
1041		return arm_kprobe_ftrace(kp);
1042
1043	cpus_read_lock();
1044	mutex_lock(&text_mutex);
1045	__arm_kprobe(kp);
1046	mutex_unlock(&text_mutex);
1047	cpus_read_unlock();
1048
1049	return 0;
1050}
1051
1052/* Disarm a kprobe with text_mutex */
1053static int disarm_kprobe(struct kprobe *kp, bool reopt)
1054{
1055	if (unlikely(kprobe_ftrace(kp)))
1056		return disarm_kprobe_ftrace(kp);
1057
1058	cpus_read_lock();
1059	mutex_lock(&text_mutex);
1060	__disarm_kprobe(kp, reopt);
1061	mutex_unlock(&text_mutex);
1062	cpus_read_unlock();
1063
1064	return 0;
1065}
1066
1067/*
1068 * Aggregate handlers for multiple kprobes support - these handlers
1069 * take care of invoking the individual kprobe handlers on p->list
1070 */
1071static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1072{
1073	struct kprobe *kp;
1074
1075	list_for_each_entry_rcu(kp, &p->list, list) {
1076		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1077			set_kprobe_instance(kp);
1078			if (kp->pre_handler(kp, regs))
1079				return 1;
1080		}
1081		reset_kprobe_instance();
1082	}
1083	return 0;
1084}
1085NOKPROBE_SYMBOL(aggr_pre_handler);
1086
1087static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1088			      unsigned long flags)
1089{
1090	struct kprobe *kp;
1091
1092	list_for_each_entry_rcu(kp, &p->list, list) {
1093		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1094			set_kprobe_instance(kp);
1095			kp->post_handler(kp, regs, flags);
1096			reset_kprobe_instance();
1097		}
1098	}
1099}
1100NOKPROBE_SYMBOL(aggr_post_handler);
1101
1102static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1103			      int trapnr)
1104{
1105	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1106
1107	/*
1108	 * if we faulted "during" the execution of a user specified
1109	 * probe handler, invoke just that probe's fault handler
1110	 */
1111	if (cur && cur->fault_handler) {
1112		if (cur->fault_handler(cur, regs, trapnr))
1113			return 1;
1114	}
1115	return 0;
1116}
1117NOKPROBE_SYMBOL(aggr_fault_handler);
1118
1119static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1120{
1121	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1122	int ret = 0;
1123
1124	if (cur && cur->break_handler) {
1125		if (cur->break_handler(cur, regs))
1126			ret = 1;
1127	}
1128	reset_kprobe_instance();
1129	return ret;
1130}
1131NOKPROBE_SYMBOL(aggr_break_handler);
1132
1133/* Walks the list and increments nmissed count for multiprobe case */
1134void kprobes_inc_nmissed_count(struct kprobe *p)
1135{
1136	struct kprobe *kp;
1137	if (!kprobe_aggrprobe(p)) {
1138		p->nmissed++;
1139	} else {
1140		list_for_each_entry_rcu(kp, &p->list, list)
1141			kp->nmissed++;
1142	}
1143	return;
1144}
1145NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1146
1147void recycle_rp_inst(struct kretprobe_instance *ri,
1148		     struct hlist_head *head)
1149{
1150	struct kretprobe *rp = ri->rp;
1151
1152	/* remove rp inst off the rprobe_inst_table */
1153	hlist_del(&ri->hlist);
1154	INIT_HLIST_NODE(&ri->hlist);
1155	if (likely(rp)) {
1156		raw_spin_lock(&rp->lock);
1157		hlist_add_head(&ri->hlist, &rp->free_instances);
1158		raw_spin_unlock(&rp->lock);
1159	} else
1160		/* Unregistering */
1161		hlist_add_head(&ri->hlist, head);
1162}
1163NOKPROBE_SYMBOL(recycle_rp_inst);
1164
1165void kretprobe_hash_lock(struct task_struct *tsk,
1166			 struct hlist_head **head, unsigned long *flags)
1167__acquires(hlist_lock)
1168{
1169	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1170	raw_spinlock_t *hlist_lock;
1171
1172	*head = &kretprobe_inst_table[hash];
1173	hlist_lock = kretprobe_table_lock_ptr(hash);
1174	raw_spin_lock_irqsave(hlist_lock, *flags);
1175}
1176NOKPROBE_SYMBOL(kretprobe_hash_lock);
1177
1178static void kretprobe_table_lock(unsigned long hash,
1179				 unsigned long *flags)
1180__acquires(hlist_lock)
1181{
1182	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1183	raw_spin_lock_irqsave(hlist_lock, *flags);
1184}
1185NOKPROBE_SYMBOL(kretprobe_table_lock);
1186
1187void kretprobe_hash_unlock(struct task_struct *tsk,
1188			   unsigned long *flags)
1189__releases(hlist_lock)
1190{
1191	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1192	raw_spinlock_t *hlist_lock;
1193
1194	hlist_lock = kretprobe_table_lock_ptr(hash);
1195	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1196}
1197NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1198
1199static void kretprobe_table_unlock(unsigned long hash,
1200				   unsigned long *flags)
1201__releases(hlist_lock)
1202{
1203	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1204	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1205}
1206NOKPROBE_SYMBOL(kretprobe_table_unlock);
1207
1208/*
1209 * This function is called from finish_task_switch when task tk becomes dead,
1210 * so that we can recycle any function-return probe instances associated
1211 * with this task. These left over instances represent probed functions
1212 * that have been called but will never return.
1213 */
1214void kprobe_flush_task(struct task_struct *tk)
1215{
1216	struct kretprobe_instance *ri;
1217	struct hlist_head *head, empty_rp;
1218	struct hlist_node *tmp;
1219	unsigned long hash, flags = 0;
1220
1221	if (unlikely(!kprobes_initialized))
1222		/* Early boot.  kretprobe_table_locks not yet initialized. */
1223		return;
1224
1225	INIT_HLIST_HEAD(&empty_rp);
1226	hash = hash_ptr(tk, KPROBE_HASH_BITS);
1227	head = &kretprobe_inst_table[hash];
1228	kretprobe_table_lock(hash, &flags);
1229	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1230		if (ri->task == tk)
1231			recycle_rp_inst(ri, &empty_rp);
1232	}
1233	kretprobe_table_unlock(hash, &flags);
1234	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1235		hlist_del(&ri->hlist);
1236		kfree(ri);
1237	}
1238}
1239NOKPROBE_SYMBOL(kprobe_flush_task);
1240
1241static inline void free_rp_inst(struct kretprobe *rp)
1242{
1243	struct kretprobe_instance *ri;
1244	struct hlist_node *next;
1245
1246	hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1247		hlist_del(&ri->hlist);
1248		kfree(ri);
1249	}
1250}
1251
1252static void cleanup_rp_inst(struct kretprobe *rp)
1253{
1254	unsigned long flags, hash;
1255	struct kretprobe_instance *ri;
1256	struct hlist_node *next;
1257	struct hlist_head *head;
1258
1259	/* No race here */
1260	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1261		kretprobe_table_lock(hash, &flags);
1262		head = &kretprobe_inst_table[hash];
1263		hlist_for_each_entry_safe(ri, next, head, hlist) {
1264			if (ri->rp == rp)
1265				ri->rp = NULL;
1266		}
1267		kretprobe_table_unlock(hash, &flags);
1268	}
1269	free_rp_inst(rp);
1270}
1271NOKPROBE_SYMBOL(cleanup_rp_inst);
1272
1273/*
1274* Add the new probe to ap->list. Fail if this is the
1275* second jprobe at the address - two jprobes can't coexist
1276*/
1277static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1278{
1279	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1280
1281	if (p->break_handler || p->post_handler)
1282		unoptimize_kprobe(ap, true);	/* Fall back to normal kprobe */
1283
1284	if (p->break_handler) {
1285		if (ap->break_handler)
1286			return -EEXIST;
1287		list_add_tail_rcu(&p->list, &ap->list);
1288		ap->break_handler = aggr_break_handler;
1289	} else
1290		list_add_rcu(&p->list, &ap->list);
1291	if (p->post_handler && !ap->post_handler)
1292		ap->post_handler = aggr_post_handler;
1293
1294	return 0;
1295}
1296
1297/*
1298 * Fill in the required fields of the "manager kprobe". Replace the
1299 * earlier kprobe in the hlist with the manager kprobe
1300 */
1301static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1302{
1303	/* Copy p's insn slot to ap */
1304	copy_kprobe(p, ap);
1305	flush_insn_slot(ap);
1306	ap->addr = p->addr;
1307	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1308	ap->pre_handler = aggr_pre_handler;
1309	ap->fault_handler = aggr_fault_handler;
1310	/* We don't care the kprobe which has gone. */
1311	if (p->post_handler && !kprobe_gone(p))
1312		ap->post_handler = aggr_post_handler;
1313	if (p->break_handler && !kprobe_gone(p))
1314		ap->break_handler = aggr_break_handler;
1315
1316	INIT_LIST_HEAD(&ap->list);
1317	INIT_HLIST_NODE(&ap->hlist);
1318
1319	list_add_rcu(&p->list, &ap->list);
1320	hlist_replace_rcu(&p->hlist, &ap->hlist);
1321}
1322
1323/*
1324 * This is the second or subsequent kprobe at the address - handle
1325 * the intricacies
1326 */
1327static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1328{
1329	int ret = 0;
1330	struct kprobe *ap = orig_p;
1331
1332	cpus_read_lock();
1333
1334	/* For preparing optimization, jump_label_text_reserved() is called */
1335	jump_label_lock();
1336	mutex_lock(&text_mutex);
1337
1338	if (!kprobe_aggrprobe(orig_p)) {
1339		/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1340		ap = alloc_aggr_kprobe(orig_p);
1341		if (!ap) {
1342			ret = -ENOMEM;
1343			goto out;
1344		}
1345		init_aggr_kprobe(ap, orig_p);
1346	} else if (kprobe_unused(ap))
1347		/* This probe is going to die. Rescue it */
1348		reuse_unused_kprobe(ap);
 
 
 
1349
1350	if (kprobe_gone(ap)) {
1351		/*
1352		 * Attempting to insert new probe at the same location that
1353		 * had a probe in the module vaddr area which already
1354		 * freed. So, the instruction slot has already been
1355		 * released. We need a new slot for the new probe.
1356		 */
1357		ret = arch_prepare_kprobe(ap);
1358		if (ret)
1359			/*
1360			 * Even if fail to allocate new slot, don't need to
1361			 * free aggr_probe. It will be used next time, or
1362			 * freed by unregister_kprobe.
1363			 */
1364			goto out;
1365
1366		/* Prepare optimized instructions if possible. */
1367		prepare_optimized_kprobe(ap);
1368
1369		/*
1370		 * Clear gone flag to prevent allocating new slot again, and
1371		 * set disabled flag because it is not armed yet.
1372		 */
1373		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1374			    | KPROBE_FLAG_DISABLED;
1375	}
1376
1377	/* Copy ap's insn slot to p */
1378	copy_kprobe(ap, p);
1379	ret = add_new_kprobe(ap, p);
1380
1381out:
1382	mutex_unlock(&text_mutex);
1383	jump_label_unlock();
1384	cpus_read_unlock();
1385
1386	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1387		ap->flags &= ~KPROBE_FLAG_DISABLED;
1388		if (!kprobes_all_disarmed) {
1389			/* Arm the breakpoint again. */
1390			ret = arm_kprobe(ap);
1391			if (ret) {
1392				ap->flags |= KPROBE_FLAG_DISABLED;
1393				list_del_rcu(&p->list);
1394				synchronize_sched();
1395			}
1396		}
1397	}
1398	return ret;
1399}
1400
1401bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1402{
1403	/* The __kprobes marked functions and entry code must not be probed */
1404	return addr >= (unsigned long)__kprobes_text_start &&
1405	       addr < (unsigned long)__kprobes_text_end;
1406}
1407
1408bool within_kprobe_blacklist(unsigned long addr)
1409{
1410	struct kprobe_blacklist_entry *ent;
1411
1412	if (arch_within_kprobe_blacklist(addr))
1413		return true;
1414	/*
1415	 * If there exists a kprobe_blacklist, verify and
1416	 * fail any probe registration in the prohibited area
1417	 */
1418	list_for_each_entry(ent, &kprobe_blacklist, list) {
1419		if (addr >= ent->start_addr && addr < ent->end_addr)
1420			return true;
1421	}
 
 
1422
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423	return false;
1424}
1425
1426/*
1427 * If we have a symbol_name argument, look it up and add the offset field
1428 * to it. This way, we can specify a relative address to a symbol.
1429 * This returns encoded errors if it fails to look up symbol or invalid
1430 * combination of parameters.
1431 */
1432static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1433			const char *symbol_name, unsigned int offset)
1434{
1435	if ((symbol_name && addr) || (!symbol_name && !addr))
1436		goto invalid;
1437
1438	if (symbol_name) {
1439		addr = kprobe_lookup_name(symbol_name, offset);
1440		if (!addr)
1441			return ERR_PTR(-ENOENT);
1442	}
1443
1444	addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1445	if (addr)
1446		return addr;
1447
1448invalid:
1449	return ERR_PTR(-EINVAL);
1450}
1451
1452static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1453{
1454	return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1455}
1456
1457/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1458static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1459{
1460	struct kprobe *ap, *list_p;
1461
1462	ap = get_kprobe(p->addr);
1463	if (unlikely(!ap))
1464		return NULL;
1465
1466	if (p != ap) {
1467		list_for_each_entry_rcu(list_p, &ap->list, list)
1468			if (list_p == p)
1469			/* kprobe p is a valid probe */
1470				goto valid;
1471		return NULL;
1472	}
1473valid:
1474	return ap;
1475}
1476
1477/* Return error if the kprobe is being re-registered */
1478static inline int check_kprobe_rereg(struct kprobe *p)
1479{
1480	int ret = 0;
1481
1482	mutex_lock(&kprobe_mutex);
1483	if (__get_valid_kprobe(p))
1484		ret = -EINVAL;
1485	mutex_unlock(&kprobe_mutex);
1486
1487	return ret;
1488}
1489
1490int __weak arch_check_ftrace_location(struct kprobe *p)
1491{
1492	unsigned long ftrace_addr;
1493
1494	ftrace_addr = ftrace_location((unsigned long)p->addr);
1495	if (ftrace_addr) {
1496#ifdef CONFIG_KPROBES_ON_FTRACE
1497		/* Given address is not on the instruction boundary */
1498		if ((unsigned long)p->addr != ftrace_addr)
1499			return -EILSEQ;
1500		p->flags |= KPROBE_FLAG_FTRACE;
1501#else	/* !CONFIG_KPROBES_ON_FTRACE */
1502		return -EINVAL;
1503#endif
1504	}
1505	return 0;
1506}
1507
1508static int check_kprobe_address_safe(struct kprobe *p,
1509				     struct module **probed_mod)
1510{
1511	int ret;
1512
1513	ret = arch_check_ftrace_location(p);
1514	if (ret)
1515		return ret;
1516	jump_label_lock();
1517	preempt_disable();
1518
1519	/* Ensure it is not in reserved area nor out of text */
1520	if (!kernel_text_address((unsigned long) p->addr) ||
1521	    within_kprobe_blacklist((unsigned long) p->addr) ||
1522	    jump_label_text_reserved(p->addr, p->addr)) {
 
1523		ret = -EINVAL;
1524		goto out;
1525	}
1526
1527	/* Check if are we probing a module */
1528	*probed_mod = __module_text_address((unsigned long) p->addr);
1529	if (*probed_mod) {
1530		/*
1531		 * We must hold a refcount of the probed module while updating
1532		 * its code to prohibit unexpected unloading.
1533		 */
1534		if (unlikely(!try_module_get(*probed_mod))) {
1535			ret = -ENOENT;
1536			goto out;
1537		}
1538
1539		/*
1540		 * If the module freed .init.text, we couldn't insert
1541		 * kprobes in there.
1542		 */
1543		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1544		    (*probed_mod)->state != MODULE_STATE_COMING) {
1545			module_put(*probed_mod);
1546			*probed_mod = NULL;
1547			ret = -ENOENT;
1548		}
1549	}
1550out:
1551	preempt_enable();
1552	jump_label_unlock();
1553
1554	return ret;
1555}
1556
1557int register_kprobe(struct kprobe *p)
1558{
1559	int ret;
1560	struct kprobe *old_p;
1561	struct module *probed_mod;
1562	kprobe_opcode_t *addr;
1563
1564	/* Adjust probe address from symbol */
1565	addr = kprobe_addr(p);
1566	if (IS_ERR(addr))
1567		return PTR_ERR(addr);
1568	p->addr = addr;
1569
1570	ret = check_kprobe_rereg(p);
1571	if (ret)
1572		return ret;
1573
1574	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1575	p->flags &= KPROBE_FLAG_DISABLED;
1576	p->nmissed = 0;
1577	INIT_LIST_HEAD(&p->list);
1578
1579	ret = check_kprobe_address_safe(p, &probed_mod);
1580	if (ret)
1581		return ret;
1582
1583	mutex_lock(&kprobe_mutex);
1584
1585	old_p = get_kprobe(p->addr);
1586	if (old_p) {
1587		/* Since this may unoptimize old_p, locking text_mutex. */
1588		ret = register_aggr_kprobe(old_p, p);
1589		goto out;
1590	}
1591
1592	cpus_read_lock();
1593	/* Prevent text modification */
1594	mutex_lock(&text_mutex);
1595	ret = prepare_kprobe(p);
1596	mutex_unlock(&text_mutex);
1597	cpus_read_unlock();
1598	if (ret)
1599		goto out;
1600
1601	INIT_HLIST_NODE(&p->hlist);
1602	hlist_add_head_rcu(&p->hlist,
1603		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1604
1605	if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1606		ret = arm_kprobe(p);
1607		if (ret) {
1608			hlist_del_rcu(&p->hlist);
1609			synchronize_sched();
1610			goto out;
1611		}
1612	}
1613
1614	/* Try to optimize kprobe */
1615	try_to_optimize_kprobe(p);
1616out:
1617	mutex_unlock(&kprobe_mutex);
1618
1619	if (probed_mod)
1620		module_put(probed_mod);
1621
1622	return ret;
1623}
1624EXPORT_SYMBOL_GPL(register_kprobe);
1625
1626/* Check if all probes on the aggrprobe are disabled */
1627static int aggr_kprobe_disabled(struct kprobe *ap)
1628{
1629	struct kprobe *kp;
1630
1631	list_for_each_entry_rcu(kp, &ap->list, list)
1632		if (!kprobe_disabled(kp))
1633			/*
1634			 * There is an active probe on the list.
1635			 * We can't disable this ap.
1636			 */
1637			return 0;
1638
1639	return 1;
1640}
1641
1642/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1643static struct kprobe *__disable_kprobe(struct kprobe *p)
1644{
1645	struct kprobe *orig_p;
1646	int ret;
1647
1648	/* Get an original kprobe for return */
1649	orig_p = __get_valid_kprobe(p);
1650	if (unlikely(orig_p == NULL))
1651		return ERR_PTR(-EINVAL);
1652
1653	if (!kprobe_disabled(p)) {
1654		/* Disable probe if it is a child probe */
1655		if (p != orig_p)
1656			p->flags |= KPROBE_FLAG_DISABLED;
1657
1658		/* Try to disarm and disable this/parent probe */
1659		if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1660			/*
1661			 * If kprobes_all_disarmed is set, orig_p
1662			 * should have already been disarmed, so
1663			 * skip unneed disarming process.
1664			 */
1665			if (!kprobes_all_disarmed) {
1666				ret = disarm_kprobe(orig_p, true);
1667				if (ret) {
1668					p->flags &= ~KPROBE_FLAG_DISABLED;
1669					return ERR_PTR(ret);
1670				}
1671			}
1672			orig_p->flags |= KPROBE_FLAG_DISABLED;
1673		}
1674	}
1675
1676	return orig_p;
1677}
1678
1679/*
1680 * Unregister a kprobe without a scheduler synchronization.
1681 */
1682static int __unregister_kprobe_top(struct kprobe *p)
1683{
1684	struct kprobe *ap, *list_p;
1685
1686	/* Disable kprobe. This will disarm it if needed. */
1687	ap = __disable_kprobe(p);
1688	if (IS_ERR(ap))
1689		return PTR_ERR(ap);
1690
1691	if (ap == p)
1692		/*
1693		 * This probe is an independent(and non-optimized) kprobe
1694		 * (not an aggrprobe). Remove from the hash list.
1695		 */
1696		goto disarmed;
1697
1698	/* Following process expects this probe is an aggrprobe */
1699	WARN_ON(!kprobe_aggrprobe(ap));
1700
1701	if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1702		/*
1703		 * !disarmed could be happen if the probe is under delayed
1704		 * unoptimizing.
1705		 */
1706		goto disarmed;
1707	else {
1708		/* If disabling probe has special handlers, update aggrprobe */
1709		if (p->break_handler && !kprobe_gone(p))
1710			ap->break_handler = NULL;
1711		if (p->post_handler && !kprobe_gone(p)) {
1712			list_for_each_entry_rcu(list_p, &ap->list, list) {
1713				if ((list_p != p) && (list_p->post_handler))
1714					goto noclean;
1715			}
1716			ap->post_handler = NULL;
1717		}
1718noclean:
1719		/*
1720		 * Remove from the aggrprobe: this path will do nothing in
1721		 * __unregister_kprobe_bottom().
1722		 */
1723		list_del_rcu(&p->list);
1724		if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1725			/*
1726			 * Try to optimize this probe again, because post
1727			 * handler may have been changed.
1728			 */
1729			optimize_kprobe(ap);
1730	}
1731	return 0;
1732
1733disarmed:
1734	BUG_ON(!kprobe_disarmed(ap));
1735	hlist_del_rcu(&ap->hlist);
1736	return 0;
1737}
1738
1739static void __unregister_kprobe_bottom(struct kprobe *p)
1740{
1741	struct kprobe *ap;
1742
1743	if (list_empty(&p->list))
1744		/* This is an independent kprobe */
1745		arch_remove_kprobe(p);
1746	else if (list_is_singular(&p->list)) {
1747		/* This is the last child of an aggrprobe */
1748		ap = list_entry(p->list.next, struct kprobe, list);
1749		list_del(&p->list);
1750		free_aggr_kprobe(ap);
1751	}
1752	/* Otherwise, do nothing. */
1753}
1754
1755int register_kprobes(struct kprobe **kps, int num)
1756{
1757	int i, ret = 0;
1758
1759	if (num <= 0)
1760		return -EINVAL;
1761	for (i = 0; i < num; i++) {
1762		ret = register_kprobe(kps[i]);
1763		if (ret < 0) {
1764			if (i > 0)
1765				unregister_kprobes(kps, i);
1766			break;
1767		}
1768	}
1769	return ret;
1770}
1771EXPORT_SYMBOL_GPL(register_kprobes);
1772
1773void unregister_kprobe(struct kprobe *p)
1774{
1775	unregister_kprobes(&p, 1);
1776}
1777EXPORT_SYMBOL_GPL(unregister_kprobe);
1778
1779void unregister_kprobes(struct kprobe **kps, int num)
1780{
1781	int i;
1782
1783	if (num <= 0)
1784		return;
1785	mutex_lock(&kprobe_mutex);
1786	for (i = 0; i < num; i++)
1787		if (__unregister_kprobe_top(kps[i]) < 0)
1788			kps[i]->addr = NULL;
1789	mutex_unlock(&kprobe_mutex);
1790
1791	synchronize_sched();
1792	for (i = 0; i < num; i++)
1793		if (kps[i]->addr)
1794			__unregister_kprobe_bottom(kps[i]);
1795}
1796EXPORT_SYMBOL_GPL(unregister_kprobes);
1797
1798int __weak kprobe_exceptions_notify(struct notifier_block *self,
1799					unsigned long val, void *data)
1800{
1801	return NOTIFY_DONE;
1802}
1803NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1804
1805static struct notifier_block kprobe_exceptions_nb = {
1806	.notifier_call = kprobe_exceptions_notify,
1807	.priority = 0x7fffffff /* we need to be notified first */
1808};
1809
1810unsigned long __weak arch_deref_entry_point(void *entry)
1811{
1812	return (unsigned long)entry;
1813}
1814
1815#if 0
1816int register_jprobes(struct jprobe **jps, int num)
1817{
1818	int ret = 0, i;
1819
1820	if (num <= 0)
1821		return -EINVAL;
1822
1823	for (i = 0; i < num; i++) {
1824		ret = register_jprobe(jps[i]);
1825
1826		if (ret < 0) {
1827			if (i > 0)
1828				unregister_jprobes(jps, i);
1829			break;
1830		}
1831	}
1832
1833	return ret;
1834}
1835EXPORT_SYMBOL_GPL(register_jprobes);
1836
1837int register_jprobe(struct jprobe *jp)
1838{
1839	unsigned long addr, offset;
1840	struct kprobe *kp = &jp->kp;
1841
1842	/*
1843	 * Verify probepoint as well as the jprobe handler are
1844	 * valid function entry points.
1845	 */
1846	addr = arch_deref_entry_point(jp->entry);
1847
1848	if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
1849	    kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
1850		kp->pre_handler = setjmp_pre_handler;
1851		kp->break_handler = longjmp_break_handler;
1852		return register_kprobe(kp);
1853	}
1854
1855	return -EINVAL;
1856}
1857EXPORT_SYMBOL_GPL(register_jprobe);
1858
1859void unregister_jprobe(struct jprobe *jp)
1860{
1861	unregister_jprobes(&jp, 1);
1862}
1863EXPORT_SYMBOL_GPL(unregister_jprobe);
1864
1865void unregister_jprobes(struct jprobe **jps, int num)
1866{
1867	int i;
1868
1869	if (num <= 0)
1870		return;
1871	mutex_lock(&kprobe_mutex);
1872	for (i = 0; i < num; i++)
1873		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1874			jps[i]->kp.addr = NULL;
1875	mutex_unlock(&kprobe_mutex);
1876
1877	synchronize_sched();
1878	for (i = 0; i < num; i++) {
1879		if (jps[i]->kp.addr)
1880			__unregister_kprobe_bottom(&jps[i]->kp);
1881	}
1882}
1883EXPORT_SYMBOL_GPL(unregister_jprobes);
1884#endif
1885
1886#ifdef CONFIG_KRETPROBES
1887/*
1888 * This kprobe pre_handler is registered with every kretprobe. When probe
1889 * hits it will set up the return probe.
1890 */
1891static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1892{
1893	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1894	unsigned long hash, flags = 0;
1895	struct kretprobe_instance *ri;
1896
1897	/*
1898	 * To avoid deadlocks, prohibit return probing in NMI contexts,
1899	 * just skip the probe and increase the (inexact) 'nmissed'
1900	 * statistical counter, so that the user is informed that
1901	 * something happened:
1902	 */
1903	if (unlikely(in_nmi())) {
1904		rp->nmissed++;
1905		return 0;
1906	}
1907
1908	/* TODO: consider to only swap the RA after the last pre_handler fired */
1909	hash = hash_ptr(current, KPROBE_HASH_BITS);
1910	raw_spin_lock_irqsave(&rp->lock, flags);
1911	if (!hlist_empty(&rp->free_instances)) {
1912		ri = hlist_entry(rp->free_instances.first,
1913				struct kretprobe_instance, hlist);
1914		hlist_del(&ri->hlist);
1915		raw_spin_unlock_irqrestore(&rp->lock, flags);
1916
1917		ri->rp = rp;
1918		ri->task = current;
1919
1920		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1921			raw_spin_lock_irqsave(&rp->lock, flags);
1922			hlist_add_head(&ri->hlist, &rp->free_instances);
1923			raw_spin_unlock_irqrestore(&rp->lock, flags);
1924			return 0;
1925		}
1926
1927		arch_prepare_kretprobe(ri, regs);
1928
1929		/* XXX(hch): why is there no hlist_move_head? */
1930		INIT_HLIST_NODE(&ri->hlist);
1931		kretprobe_table_lock(hash, &flags);
1932		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1933		kretprobe_table_unlock(hash, &flags);
1934	} else {
1935		rp->nmissed++;
1936		raw_spin_unlock_irqrestore(&rp->lock, flags);
1937	}
1938	return 0;
1939}
1940NOKPROBE_SYMBOL(pre_handler_kretprobe);
1941
1942bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1943{
1944	return !offset;
1945}
1946
1947bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1948{
1949	kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1950
1951	if (IS_ERR(kp_addr))
1952		return false;
1953
1954	if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1955						!arch_kprobe_on_func_entry(offset))
1956		return false;
1957
1958	return true;
1959}
1960
1961int register_kretprobe(struct kretprobe *rp)
1962{
1963	int ret = 0;
1964	struct kretprobe_instance *inst;
1965	int i;
1966	void *addr;
1967
1968	if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1969		return -EINVAL;
1970
1971	if (kretprobe_blacklist_size) {
1972		addr = kprobe_addr(&rp->kp);
1973		if (IS_ERR(addr))
1974			return PTR_ERR(addr);
1975
1976		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1977			if (kretprobe_blacklist[i].addr == addr)
1978				return -EINVAL;
1979		}
1980	}
1981
1982	rp->kp.pre_handler = pre_handler_kretprobe;
1983	rp->kp.post_handler = NULL;
1984	rp->kp.fault_handler = NULL;
1985	rp->kp.break_handler = NULL;
1986
1987	/* Pre-allocate memory for max kretprobe instances */
1988	if (rp->maxactive <= 0) {
1989#ifdef CONFIG_PREEMPT
1990		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1991#else
1992		rp->maxactive = num_possible_cpus();
1993#endif
1994	}
1995	raw_spin_lock_init(&rp->lock);
1996	INIT_HLIST_HEAD(&rp->free_instances);
1997	for (i = 0; i < rp->maxactive; i++) {
1998		inst = kmalloc(sizeof(struct kretprobe_instance) +
1999			       rp->data_size, GFP_KERNEL);
2000		if (inst == NULL) {
2001			free_rp_inst(rp);
2002			return -ENOMEM;
2003		}
2004		INIT_HLIST_NODE(&inst->hlist);
2005		hlist_add_head(&inst->hlist, &rp->free_instances);
2006	}
2007
2008	rp->nmissed = 0;
2009	/* Establish function entry probe point */
2010	ret = register_kprobe(&rp->kp);
2011	if (ret != 0)
2012		free_rp_inst(rp);
2013	return ret;
2014}
2015EXPORT_SYMBOL_GPL(register_kretprobe);
2016
2017int register_kretprobes(struct kretprobe **rps, int num)
2018{
2019	int ret = 0, i;
2020
2021	if (num <= 0)
2022		return -EINVAL;
2023	for (i = 0; i < num; i++) {
2024		ret = register_kretprobe(rps[i]);
2025		if (ret < 0) {
2026			if (i > 0)
2027				unregister_kretprobes(rps, i);
2028			break;
2029		}
2030	}
2031	return ret;
2032}
2033EXPORT_SYMBOL_GPL(register_kretprobes);
2034
2035void unregister_kretprobe(struct kretprobe *rp)
2036{
2037	unregister_kretprobes(&rp, 1);
2038}
2039EXPORT_SYMBOL_GPL(unregister_kretprobe);
2040
2041void unregister_kretprobes(struct kretprobe **rps, int num)
2042{
2043	int i;
2044
2045	if (num <= 0)
2046		return;
2047	mutex_lock(&kprobe_mutex);
2048	for (i = 0; i < num; i++)
2049		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2050			rps[i]->kp.addr = NULL;
2051	mutex_unlock(&kprobe_mutex);
2052
2053	synchronize_sched();
2054	for (i = 0; i < num; i++) {
2055		if (rps[i]->kp.addr) {
2056			__unregister_kprobe_bottom(&rps[i]->kp);
2057			cleanup_rp_inst(rps[i]);
2058		}
2059	}
2060}
2061EXPORT_SYMBOL_GPL(unregister_kretprobes);
2062
2063#else /* CONFIG_KRETPROBES */
2064int register_kretprobe(struct kretprobe *rp)
2065{
2066	return -ENOSYS;
2067}
2068EXPORT_SYMBOL_GPL(register_kretprobe);
2069
2070int register_kretprobes(struct kretprobe **rps, int num)
2071{
2072	return -ENOSYS;
2073}
2074EXPORT_SYMBOL_GPL(register_kretprobes);
2075
2076void unregister_kretprobe(struct kretprobe *rp)
2077{
2078}
2079EXPORT_SYMBOL_GPL(unregister_kretprobe);
2080
2081void unregister_kretprobes(struct kretprobe **rps, int num)
2082{
2083}
2084EXPORT_SYMBOL_GPL(unregister_kretprobes);
2085
2086static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2087{
2088	return 0;
2089}
2090NOKPROBE_SYMBOL(pre_handler_kretprobe);
2091
2092#endif /* CONFIG_KRETPROBES */
2093
2094/* Set the kprobe gone and remove its instruction buffer. */
2095static void kill_kprobe(struct kprobe *p)
2096{
2097	struct kprobe *kp;
2098
2099	p->flags |= KPROBE_FLAG_GONE;
2100	if (kprobe_aggrprobe(p)) {
2101		/*
2102		 * If this is an aggr_kprobe, we have to list all the
2103		 * chained probes and mark them GONE.
2104		 */
2105		list_for_each_entry_rcu(kp, &p->list, list)
2106			kp->flags |= KPROBE_FLAG_GONE;
2107		p->post_handler = NULL;
2108		p->break_handler = NULL;
2109		kill_optimized_kprobe(p);
2110	}
2111	/*
2112	 * Here, we can remove insn_slot safely, because no thread calls
2113	 * the original probed function (which will be freed soon) any more.
2114	 */
2115	arch_remove_kprobe(p);
2116}
2117
2118/* Disable one kprobe */
2119int disable_kprobe(struct kprobe *kp)
2120{
2121	int ret = 0;
2122	struct kprobe *p;
2123
2124	mutex_lock(&kprobe_mutex);
2125
2126	/* Disable this kprobe */
2127	p = __disable_kprobe(kp);
2128	if (IS_ERR(p))
2129		ret = PTR_ERR(p);
2130
2131	mutex_unlock(&kprobe_mutex);
2132	return ret;
2133}
2134EXPORT_SYMBOL_GPL(disable_kprobe);
2135
2136/* Enable one kprobe */
2137int enable_kprobe(struct kprobe *kp)
2138{
2139	int ret = 0;
2140	struct kprobe *p;
2141
2142	mutex_lock(&kprobe_mutex);
2143
2144	/* Check whether specified probe is valid. */
2145	p = __get_valid_kprobe(kp);
2146	if (unlikely(p == NULL)) {
2147		ret = -EINVAL;
2148		goto out;
2149	}
2150
2151	if (kprobe_gone(kp)) {
2152		/* This kprobe has gone, we couldn't enable it. */
2153		ret = -EINVAL;
2154		goto out;
2155	}
2156
2157	if (p != kp)
2158		kp->flags &= ~KPROBE_FLAG_DISABLED;
2159
2160	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2161		p->flags &= ~KPROBE_FLAG_DISABLED;
2162		ret = arm_kprobe(p);
2163		if (ret)
2164			p->flags |= KPROBE_FLAG_DISABLED;
2165	}
2166out:
2167	mutex_unlock(&kprobe_mutex);
2168	return ret;
2169}
2170EXPORT_SYMBOL_GPL(enable_kprobe);
2171
 
2172void dump_kprobe(struct kprobe *kp)
2173{
2174	printk(KERN_WARNING "Dumping kprobe:\n");
2175	printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2176	       kp->symbol_name, kp->addr, kp->offset);
2177}
2178NOKPROBE_SYMBOL(dump_kprobe);
2179
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2180/*
2181 * Lookup and populate the kprobe_blacklist.
2182 *
2183 * Unlike the kretprobe blacklist, we'll need to determine
2184 * the range of addresses that belong to the said functions,
2185 * since a kprobe need not necessarily be at the beginning
2186 * of a function.
2187 */
2188static int __init populate_kprobe_blacklist(unsigned long *start,
2189					     unsigned long *end)
2190{
 
2191	unsigned long *iter;
2192	struct kprobe_blacklist_entry *ent;
2193	unsigned long entry, offset = 0, size = 0;
2194
2195	for (iter = start; iter < end; iter++) {
2196		entry = arch_deref_entry_point((void *)*iter);
2197
2198		if (!kernel_text_address(entry) ||
2199		    !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2200			pr_err("Failed to find blacklist at %p\n",
2201				(void *)entry);
2202			continue;
2203		}
2204
2205		ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2206		if (!ent)
2207			return -ENOMEM;
2208		ent->start_addr = entry;
2209		ent->end_addr = entry + size;
2210		INIT_LIST_HEAD(&ent->list);
2211		list_add_tail(&ent->list, &kprobe_blacklist);
2212	}
2213	return 0;
 
 
 
 
 
2214}
2215
2216/* Module notifier call back, checking kprobes on the module */
2217static int kprobes_module_callback(struct notifier_block *nb,
2218				   unsigned long val, void *data)
2219{
2220	struct module *mod = data;
2221	struct hlist_head *head;
2222	struct kprobe *p;
2223	unsigned int i;
2224	int checkcore = (val == MODULE_STATE_GOING);
2225
2226	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2227		return NOTIFY_DONE;
2228
2229	/*
2230	 * When MODULE_STATE_GOING was notified, both of module .text and
2231	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2232	 * notified, only .init.text section would be freed. We need to
2233	 * disable kprobes which have been inserted in the sections.
2234	 */
2235	mutex_lock(&kprobe_mutex);
2236	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2237		head = &kprobe_table[i];
2238		hlist_for_each_entry_rcu(p, head, hlist)
2239			if (within_module_init((unsigned long)p->addr, mod) ||
2240			    (checkcore &&
2241			     within_module_core((unsigned long)p->addr, mod))) {
2242				/*
2243				 * The vaddr this probe is installed will soon
2244				 * be vfreed buy not synced to disk. Hence,
2245				 * disarming the breakpoint isn't needed.
2246				 *
2247				 * Note, this will also move any optimized probes
2248				 * that are pending to be removed from their
2249				 * corresponding lists to the freeing_list and
2250				 * will not be touched by the delayed
2251				 * kprobe_optimizer work handler.
2252				 */
2253				kill_kprobe(p);
2254			}
2255	}
2256	mutex_unlock(&kprobe_mutex);
2257	return NOTIFY_DONE;
2258}
2259
2260static struct notifier_block kprobe_module_nb = {
2261	.notifier_call = kprobes_module_callback,
2262	.priority = 0
2263};
2264
2265/* Markers of _kprobe_blacklist section */
2266extern unsigned long __start_kprobe_blacklist[];
2267extern unsigned long __stop_kprobe_blacklist[];
2268
2269static int __init init_kprobes(void)
2270{
2271	int i, err = 0;
2272
2273	/* FIXME allocate the probe table, currently defined statically */
2274	/* initialize all list heads */
2275	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2276		INIT_HLIST_HEAD(&kprobe_table[i]);
2277		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2278		raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2279	}
2280
2281	err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2282					__stop_kprobe_blacklist);
2283	if (err) {
2284		pr_err("kprobes: failed to populate blacklist: %d\n", err);
2285		pr_err("Please take care of using kprobes.\n");
2286	}
2287
2288	if (kretprobe_blacklist_size) {
2289		/* lookup the function address from its name */
2290		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2291			kretprobe_blacklist[i].addr =
2292				kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2293			if (!kretprobe_blacklist[i].addr)
2294				printk("kretprobe: lookup failed: %s\n",
2295				       kretprobe_blacklist[i].name);
2296		}
2297	}
2298
2299#if defined(CONFIG_OPTPROBES)
2300#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2301	/* Init kprobe_optinsn_slots */
2302	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2303#endif
2304	/* By default, kprobes can be optimized */
2305	kprobes_allow_optimization = true;
2306#endif
2307
2308	/* By default, kprobes are armed */
2309	kprobes_all_disarmed = false;
2310
2311	err = arch_init_kprobes();
2312	if (!err)
2313		err = register_die_notifier(&kprobe_exceptions_nb);
2314	if (!err)
2315		err = register_module_notifier(&kprobe_module_nb);
2316
2317	kprobes_initialized = (err == 0);
2318
2319	if (!err)
2320		init_test_probes();
2321	return err;
2322}
 
2323
2324#ifdef CONFIG_DEBUG_FS
2325static void report_probe(struct seq_file *pi, struct kprobe *p,
2326		const char *sym, int offset, char *modname, struct kprobe *pp)
2327{
2328	char *kprobe_type;
 
2329
2330	if (p->pre_handler == pre_handler_kretprobe)
2331		kprobe_type = "r";
2332	else if (p->pre_handler == setjmp_pre_handler)
2333		kprobe_type = "j";
2334	else
2335		kprobe_type = "k";
2336
 
 
 
2337	if (sym)
2338		seq_printf(pi, "%p  %s  %s+0x%x  %s ",
2339			p->addr, kprobe_type, sym, offset,
2340			(modname ? modname : " "));
2341	else
2342		seq_printf(pi, "%p  %s  %p ",
2343			p->addr, kprobe_type, p->addr);
2344
2345	if (!pp)
2346		pp = p;
2347	seq_printf(pi, "%s%s%s%s\n",
2348		(kprobe_gone(p) ? "[GONE]" : ""),
2349		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2350		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2351		(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2352}
2353
2354static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2355{
2356	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2357}
2358
2359static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2360{
2361	(*pos)++;
2362	if (*pos >= KPROBE_TABLE_SIZE)
2363		return NULL;
2364	return pos;
2365}
2366
2367static void kprobe_seq_stop(struct seq_file *f, void *v)
2368{
2369	/* Nothing to do */
2370}
2371
2372static int show_kprobe_addr(struct seq_file *pi, void *v)
2373{
2374	struct hlist_head *head;
2375	struct kprobe *p, *kp;
2376	const char *sym = NULL;
2377	unsigned int i = *(loff_t *) v;
2378	unsigned long offset = 0;
2379	char *modname, namebuf[KSYM_NAME_LEN];
2380
2381	head = &kprobe_table[i];
2382	preempt_disable();
2383	hlist_for_each_entry_rcu(p, head, hlist) {
2384		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2385					&offset, &modname, namebuf);
2386		if (kprobe_aggrprobe(p)) {
2387			list_for_each_entry_rcu(kp, &p->list, list)
2388				report_probe(pi, kp, sym, offset, modname, p);
2389		} else
2390			report_probe(pi, p, sym, offset, modname, NULL);
2391	}
2392	preempt_enable();
2393	return 0;
2394}
2395
2396static const struct seq_operations kprobes_seq_ops = {
2397	.start = kprobe_seq_start,
2398	.next  = kprobe_seq_next,
2399	.stop  = kprobe_seq_stop,
2400	.show  = show_kprobe_addr
2401};
2402
2403static int kprobes_open(struct inode *inode, struct file *filp)
2404{
2405	return seq_open(filp, &kprobes_seq_ops);
2406}
2407
2408static const struct file_operations debugfs_kprobes_operations = {
2409	.open           = kprobes_open,
2410	.read           = seq_read,
2411	.llseek         = seq_lseek,
2412	.release        = seq_release,
2413};
2414
2415/* kprobes/blacklist -- shows which functions can not be probed */
2416static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2417{
2418	return seq_list_start(&kprobe_blacklist, *pos);
2419}
2420
2421static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2422{
2423	return seq_list_next(v, &kprobe_blacklist, pos);
2424}
2425
2426static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2427{
2428	struct kprobe_blacklist_entry *ent =
2429		list_entry(v, struct kprobe_blacklist_entry, list);
2430
2431	seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2432		   (void *)ent->end_addr, (void *)ent->start_addr);
 
 
 
 
 
 
 
 
2433	return 0;
2434}
2435
2436static const struct seq_operations kprobe_blacklist_seq_ops = {
2437	.start = kprobe_blacklist_seq_start,
2438	.next  = kprobe_blacklist_seq_next,
2439	.stop  = kprobe_seq_stop,	/* Reuse void function */
2440	.show  = kprobe_blacklist_seq_show,
2441};
2442
2443static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2444{
2445	return seq_open(filp, &kprobe_blacklist_seq_ops);
2446}
2447
2448static const struct file_operations debugfs_kprobe_blacklist_ops = {
2449	.open           = kprobe_blacklist_open,
2450	.read           = seq_read,
2451	.llseek         = seq_lseek,
2452	.release        = seq_release,
2453};
2454
2455static int arm_all_kprobes(void)
2456{
2457	struct hlist_head *head;
2458	struct kprobe *p;
2459	unsigned int i, total = 0, errors = 0;
2460	int err, ret = 0;
2461
2462	mutex_lock(&kprobe_mutex);
2463
2464	/* If kprobes are armed, just return */
2465	if (!kprobes_all_disarmed)
2466		goto already_enabled;
2467
2468	/*
2469	 * optimize_kprobe() called by arm_kprobe() checks
2470	 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2471	 * arm_kprobe.
2472	 */
2473	kprobes_all_disarmed = false;
2474	/* Arming kprobes doesn't optimize kprobe itself */
2475	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2476		head = &kprobe_table[i];
2477		/* Arm all kprobes on a best-effort basis */
2478		hlist_for_each_entry_rcu(p, head, hlist) {
2479			if (!kprobe_disabled(p)) {
2480				err = arm_kprobe(p);
2481				if (err)  {
2482					errors++;
2483					ret = err;
2484				}
2485				total++;
2486			}
2487		}
2488	}
2489
2490	if (errors)
2491		pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2492			errors, total);
2493	else
2494		pr_info("Kprobes globally enabled\n");
2495
2496already_enabled:
2497	mutex_unlock(&kprobe_mutex);
2498	return ret;
2499}
2500
2501static int disarm_all_kprobes(void)
2502{
2503	struct hlist_head *head;
2504	struct kprobe *p;
2505	unsigned int i, total = 0, errors = 0;
2506	int err, ret = 0;
2507
2508	mutex_lock(&kprobe_mutex);
2509
2510	/* If kprobes are already disarmed, just return */
2511	if (kprobes_all_disarmed) {
2512		mutex_unlock(&kprobe_mutex);
2513		return 0;
2514	}
2515
2516	kprobes_all_disarmed = true;
2517
2518	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2519		head = &kprobe_table[i];
2520		/* Disarm all kprobes on a best-effort basis */
2521		hlist_for_each_entry_rcu(p, head, hlist) {
2522			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2523				err = disarm_kprobe(p, false);
2524				if (err) {
2525					errors++;
2526					ret = err;
2527				}
2528				total++;
2529			}
2530		}
2531	}
2532
2533	if (errors)
2534		pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2535			errors, total);
2536	else
2537		pr_info("Kprobes globally disabled\n");
2538
2539	mutex_unlock(&kprobe_mutex);
2540
2541	/* Wait for disarming all kprobes by optimizer */
2542	wait_for_kprobe_optimizer();
2543
2544	return ret;
2545}
2546
2547/*
2548 * XXX: The debugfs bool file interface doesn't allow for callbacks
2549 * when the bool state is switched. We can reuse that facility when
2550 * available
2551 */
2552static ssize_t read_enabled_file_bool(struct file *file,
2553	       char __user *user_buf, size_t count, loff_t *ppos)
2554{
2555	char buf[3];
2556
2557	if (!kprobes_all_disarmed)
2558		buf[0] = '1';
2559	else
2560		buf[0] = '0';
2561	buf[1] = '\n';
2562	buf[2] = 0x00;
2563	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2564}
2565
2566static ssize_t write_enabled_file_bool(struct file *file,
2567	       const char __user *user_buf, size_t count, loff_t *ppos)
2568{
2569	char buf[32];
2570	size_t buf_size;
2571	int ret = 0;
2572
2573	buf_size = min(count, (sizeof(buf)-1));
2574	if (copy_from_user(buf, user_buf, buf_size))
2575		return -EFAULT;
2576
2577	buf[buf_size] = '\0';
2578	switch (buf[0]) {
2579	case 'y':
2580	case 'Y':
2581	case '1':
2582		ret = arm_all_kprobes();
2583		break;
2584	case 'n':
2585	case 'N':
2586	case '0':
2587		ret = disarm_all_kprobes();
2588		break;
2589	default:
2590		return -EINVAL;
2591	}
2592
2593	if (ret)
2594		return ret;
2595
2596	return count;
2597}
2598
2599static const struct file_operations fops_kp = {
2600	.read =         read_enabled_file_bool,
2601	.write =        write_enabled_file_bool,
2602	.llseek =	default_llseek,
2603};
2604
2605static int __init debugfs_kprobe_init(void)
2606{
2607	struct dentry *dir, *file;
2608	unsigned int value = 1;
2609
2610	dir = debugfs_create_dir("kprobes", NULL);
2611	if (!dir)
2612		return -ENOMEM;
2613
2614	file = debugfs_create_file("list", 0444, dir, NULL,
2615				&debugfs_kprobes_operations);
2616	if (!file)
2617		goto error;
2618
2619	file = debugfs_create_file("enabled", 0600, dir,
2620					&value, &fops_kp);
2621	if (!file)
2622		goto error;
2623
2624	file = debugfs_create_file("blacklist", 0444, dir, NULL,
2625				&debugfs_kprobe_blacklist_ops);
2626	if (!file)
2627		goto error;
2628
2629	return 0;
2630
2631error:
2632	debugfs_remove(dir);
2633	return -ENOMEM;
 
2634}
2635
2636late_initcall(debugfs_kprobe_init);
2637#endif /* CONFIG_DEBUG_FS */
2638
2639module_init(init_kprobes);
2640
2641/* defined in arch/.../kernel/kprobes.c */
2642EXPORT_SYMBOL_GPL(jprobe_return);
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Kernel Probes (KProbes)
   4 *  kernel/kprobes.c
   5 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 * Copyright (C) IBM Corporation, 2002, 2004
   7 *
   8 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
   9 *		Probes initial implementation (includes suggestions from
  10 *		Rusty Russell).
  11 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  12 *		hlists and exceptions notifier as suggested by Andi Kleen.
  13 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  14 *		interface to access function arguments.
  15 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  16 *		exceptions notifier to be first on the priority list.
  17 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  18 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  19 *		<prasanna@in.ibm.com> added function-return probes.
  20 */
  21#include <linux/kprobes.h>
  22#include <linux/hash.h>
  23#include <linux/init.h>
  24#include <linux/slab.h>
  25#include <linux/stddef.h>
  26#include <linux/export.h>
  27#include <linux/moduleloader.h>
  28#include <linux/kallsyms.h>
  29#include <linux/freezer.h>
  30#include <linux/seq_file.h>
  31#include <linux/debugfs.h>
  32#include <linux/sysctl.h>
  33#include <linux/kdebug.h>
  34#include <linux/memory.h>
  35#include <linux/ftrace.h>
  36#include <linux/cpu.h>
  37#include <linux/jump_label.h>
  38
  39#include <asm/sections.h>
  40#include <asm/cacheflush.h>
  41#include <asm/errno.h>
  42#include <linux/uaccess.h>
  43
  44#define KPROBE_HASH_BITS 6
  45#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  46
  47
  48static int kprobes_initialized;
  49static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  50static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  51
  52/* NOTE: change this value only with kprobe_mutex held */
  53static bool kprobes_all_disarmed;
  54
  55/* This protects kprobe_table and optimizing_list */
  56static DEFINE_MUTEX(kprobe_mutex);
  57static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  58static struct {
  59	raw_spinlock_t lock ____cacheline_aligned_in_smp;
  60} kretprobe_table_locks[KPROBE_TABLE_SIZE];
  61
  62kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
  63					unsigned int __unused)
  64{
  65	return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
  66}
  67
  68static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  69{
  70	return &(kretprobe_table_locks[hash].lock);
  71}
  72
  73/* Blacklist -- list of struct kprobe_blacklist_entry */
  74static LIST_HEAD(kprobe_blacklist);
  75
  76#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  77/*
  78 * kprobe->ainsn.insn points to the copy of the instruction to be
  79 * single-stepped. x86_64, POWER4 and above have no-exec support and
  80 * stepping on the instruction on a vmalloced/kmalloced/data page
  81 * is a recipe for disaster
  82 */
  83struct kprobe_insn_page {
  84	struct list_head list;
  85	kprobe_opcode_t *insns;		/* Page of instruction slots */
  86	struct kprobe_insn_cache *cache;
  87	int nused;
  88	int ngarbage;
  89	char slot_used[];
  90};
  91
  92#define KPROBE_INSN_PAGE_SIZE(slots)			\
  93	(offsetof(struct kprobe_insn_page, slot_used) +	\
  94	 (sizeof(char) * (slots)))
  95
  96static int slots_per_page(struct kprobe_insn_cache *c)
  97{
  98	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
  99}
 100
 101enum kprobe_slot_state {
 102	SLOT_CLEAN = 0,
 103	SLOT_DIRTY = 1,
 104	SLOT_USED = 2,
 105};
 106
 107void __weak *alloc_insn_page(void)
 108{
 109	return module_alloc(PAGE_SIZE);
 110}
 111
 112void __weak free_insn_page(void *page)
 113{
 114	module_memfree(page);
 115}
 116
 117struct kprobe_insn_cache kprobe_insn_slots = {
 118	.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
 119	.alloc = alloc_insn_page,
 120	.free = free_insn_page,
 121	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
 122	.insn_size = MAX_INSN_SIZE,
 123	.nr_garbage = 0,
 124};
 125static int collect_garbage_slots(struct kprobe_insn_cache *c);
 126
 127/**
 128 * __get_insn_slot() - Find a slot on an executable page for an instruction.
 129 * We allocate an executable page if there's no room on existing ones.
 130 */
 131kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
 132{
 133	struct kprobe_insn_page *kip;
 134	kprobe_opcode_t *slot = NULL;
 135
 136	/* Since the slot array is not protected by rcu, we need a mutex */
 137	mutex_lock(&c->mutex);
 138 retry:
 139	rcu_read_lock();
 140	list_for_each_entry_rcu(kip, &c->pages, list) {
 141		if (kip->nused < slots_per_page(c)) {
 142			int i;
 143			for (i = 0; i < slots_per_page(c); i++) {
 144				if (kip->slot_used[i] == SLOT_CLEAN) {
 145					kip->slot_used[i] = SLOT_USED;
 146					kip->nused++;
 147					slot = kip->insns + (i * c->insn_size);
 148					rcu_read_unlock();
 149					goto out;
 150				}
 151			}
 152			/* kip->nused is broken. Fix it. */
 153			kip->nused = slots_per_page(c);
 154			WARN_ON(1);
 155		}
 156	}
 157	rcu_read_unlock();
 158
 159	/* If there are any garbage slots, collect it and try again. */
 160	if (c->nr_garbage && collect_garbage_slots(c) == 0)
 161		goto retry;
 162
 163	/* All out of space.  Need to allocate a new page. */
 164	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
 165	if (!kip)
 166		goto out;
 167
 168	/*
 169	 * Use module_alloc so this page is within +/- 2GB of where the
 170	 * kernel image and loaded module images reside. This is required
 171	 * so x86_64 can correctly handle the %rip-relative fixups.
 172	 */
 173	kip->insns = c->alloc();
 174	if (!kip->insns) {
 175		kfree(kip);
 176		goto out;
 177	}
 178	INIT_LIST_HEAD(&kip->list);
 179	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
 180	kip->slot_used[0] = SLOT_USED;
 181	kip->nused = 1;
 182	kip->ngarbage = 0;
 183	kip->cache = c;
 184	list_add_rcu(&kip->list, &c->pages);
 185	slot = kip->insns;
 186out:
 187	mutex_unlock(&c->mutex);
 188	return slot;
 189}
 190
 191/* Return 1 if all garbages are collected, otherwise 0. */
 192static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
 193{
 194	kip->slot_used[idx] = SLOT_CLEAN;
 195	kip->nused--;
 196	if (kip->nused == 0) {
 197		/*
 198		 * Page is no longer in use.  Free it unless
 199		 * it's the last one.  We keep the last one
 200		 * so as not to have to set it up again the
 201		 * next time somebody inserts a probe.
 202		 */
 203		if (!list_is_singular(&kip->list)) {
 204			list_del_rcu(&kip->list);
 205			synchronize_rcu();
 206			kip->cache->free(kip->insns);
 207			kfree(kip);
 208		}
 209		return 1;
 210	}
 211	return 0;
 212}
 213
 214static int collect_garbage_slots(struct kprobe_insn_cache *c)
 215{
 216	struct kprobe_insn_page *kip, *next;
 217
 218	/* Ensure no-one is interrupted on the garbages */
 219	synchronize_rcu();
 220
 221	list_for_each_entry_safe(kip, next, &c->pages, list) {
 222		int i;
 223		if (kip->ngarbage == 0)
 224			continue;
 225		kip->ngarbage = 0;	/* we will collect all garbages */
 226		for (i = 0; i < slots_per_page(c); i++) {
 227			if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
 228				break;
 229		}
 230	}
 231	c->nr_garbage = 0;
 232	return 0;
 233}
 234
 235void __free_insn_slot(struct kprobe_insn_cache *c,
 236		      kprobe_opcode_t *slot, int dirty)
 237{
 238	struct kprobe_insn_page *kip;
 239	long idx;
 240
 241	mutex_lock(&c->mutex);
 242	rcu_read_lock();
 243	list_for_each_entry_rcu(kip, &c->pages, list) {
 244		idx = ((long)slot - (long)kip->insns) /
 245			(c->insn_size * sizeof(kprobe_opcode_t));
 246		if (idx >= 0 && idx < slots_per_page(c))
 247			goto out;
 248	}
 249	/* Could not find this slot. */
 250	WARN_ON(1);
 251	kip = NULL;
 252out:
 253	rcu_read_unlock();
 254	/* Mark and sweep: this may sleep */
 255	if (kip) {
 256		/* Check double free */
 257		WARN_ON(kip->slot_used[idx] != SLOT_USED);
 258		if (dirty) {
 259			kip->slot_used[idx] = SLOT_DIRTY;
 260			kip->ngarbage++;
 261			if (++c->nr_garbage > slots_per_page(c))
 262				collect_garbage_slots(c);
 263		} else {
 264			collect_one_slot(kip, idx);
 265		}
 266	}
 267	mutex_unlock(&c->mutex);
 268}
 269
 270/*
 271 * Check given address is on the page of kprobe instruction slots.
 272 * This will be used for checking whether the address on a stack
 273 * is on a text area or not.
 274 */
 275bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
 276{
 277	struct kprobe_insn_page *kip;
 278	bool ret = false;
 279
 280	rcu_read_lock();
 281	list_for_each_entry_rcu(kip, &c->pages, list) {
 282		if (addr >= (unsigned long)kip->insns &&
 283		    addr < (unsigned long)kip->insns + PAGE_SIZE) {
 284			ret = true;
 285			break;
 286		}
 287	}
 288	rcu_read_unlock();
 289
 290	return ret;
 291}
 292
 293#ifdef CONFIG_OPTPROBES
 294/* For optimized_kprobe buffer */
 295struct kprobe_insn_cache kprobe_optinsn_slots = {
 296	.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
 297	.alloc = alloc_insn_page,
 298	.free = free_insn_page,
 299	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
 300	/* .insn_size is initialized later */
 301	.nr_garbage = 0,
 302};
 303#endif
 304#endif
 305
 306/* We have preemption disabled.. so it is safe to use __ versions */
 307static inline void set_kprobe_instance(struct kprobe *kp)
 308{
 309	__this_cpu_write(kprobe_instance, kp);
 310}
 311
 312static inline void reset_kprobe_instance(void)
 313{
 314	__this_cpu_write(kprobe_instance, NULL);
 315}
 316
 317/*
 318 * This routine is called either:
 319 * 	- under the kprobe_mutex - during kprobe_[un]register()
 320 * 				OR
 321 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
 322 */
 323struct kprobe *get_kprobe(void *addr)
 324{
 325	struct hlist_head *head;
 326	struct kprobe *p;
 327
 328	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
 329	hlist_for_each_entry_rcu(p, head, hlist) {
 330		if (p->addr == addr)
 331			return p;
 332	}
 333
 334	return NULL;
 335}
 336NOKPROBE_SYMBOL(get_kprobe);
 337
 338static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
 339
 340/* Return true if the kprobe is an aggregator */
 341static inline int kprobe_aggrprobe(struct kprobe *p)
 342{
 343	return p->pre_handler == aggr_pre_handler;
 344}
 345
 346/* Return true(!0) if the kprobe is unused */
 347static inline int kprobe_unused(struct kprobe *p)
 348{
 349	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
 350	       list_empty(&p->list);
 351}
 352
 353/*
 354 * Keep all fields in the kprobe consistent
 355 */
 356static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
 357{
 358	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
 359	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
 360}
 361
 362#ifdef CONFIG_OPTPROBES
 363/* NOTE: change this value only with kprobe_mutex held */
 364static bool kprobes_allow_optimization;
 365
 366/*
 367 * Call all pre_handler on the list, but ignores its return value.
 368 * This must be called from arch-dep optimized caller.
 369 */
 370void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
 371{
 372	struct kprobe *kp;
 373
 374	list_for_each_entry_rcu(kp, &p->list, list) {
 375		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
 376			set_kprobe_instance(kp);
 377			kp->pre_handler(kp, regs);
 378		}
 379		reset_kprobe_instance();
 380	}
 381}
 382NOKPROBE_SYMBOL(opt_pre_handler);
 383
 384/* Free optimized instructions and optimized_kprobe */
 385static void free_aggr_kprobe(struct kprobe *p)
 386{
 387	struct optimized_kprobe *op;
 388
 389	op = container_of(p, struct optimized_kprobe, kp);
 390	arch_remove_optimized_kprobe(op);
 391	arch_remove_kprobe(p);
 392	kfree(op);
 393}
 394
 395/* Return true(!0) if the kprobe is ready for optimization. */
 396static inline int kprobe_optready(struct kprobe *p)
 397{
 398	struct optimized_kprobe *op;
 399
 400	if (kprobe_aggrprobe(p)) {
 401		op = container_of(p, struct optimized_kprobe, kp);
 402		return arch_prepared_optinsn(&op->optinsn);
 403	}
 404
 405	return 0;
 406}
 407
 408/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
 409static inline int kprobe_disarmed(struct kprobe *p)
 410{
 411	struct optimized_kprobe *op;
 412
 413	/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
 414	if (!kprobe_aggrprobe(p))
 415		return kprobe_disabled(p);
 416
 417	op = container_of(p, struct optimized_kprobe, kp);
 418
 419	return kprobe_disabled(p) && list_empty(&op->list);
 420}
 421
 422/* Return true(!0) if the probe is queued on (un)optimizing lists */
 423static int kprobe_queued(struct kprobe *p)
 424{
 425	struct optimized_kprobe *op;
 426
 427	if (kprobe_aggrprobe(p)) {
 428		op = container_of(p, struct optimized_kprobe, kp);
 429		if (!list_empty(&op->list))
 430			return 1;
 431	}
 432	return 0;
 433}
 434
 435/*
 436 * Return an optimized kprobe whose optimizing code replaces
 437 * instructions including addr (exclude breakpoint).
 438 */
 439static struct kprobe *get_optimized_kprobe(unsigned long addr)
 440{
 441	int i;
 442	struct kprobe *p = NULL;
 443	struct optimized_kprobe *op;
 444
 445	/* Don't check i == 0, since that is a breakpoint case. */
 446	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
 447		p = get_kprobe((void *)(addr - i));
 448
 449	if (p && kprobe_optready(p)) {
 450		op = container_of(p, struct optimized_kprobe, kp);
 451		if (arch_within_optimized_kprobe(op, addr))
 452			return p;
 453	}
 454
 455	return NULL;
 456}
 457
 458/* Optimization staging list, protected by kprobe_mutex */
 459static LIST_HEAD(optimizing_list);
 460static LIST_HEAD(unoptimizing_list);
 461static LIST_HEAD(freeing_list);
 462
 463static void kprobe_optimizer(struct work_struct *work);
 464static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
 465#define OPTIMIZE_DELAY 5
 466
 467/*
 468 * Optimize (replace a breakpoint with a jump) kprobes listed on
 469 * optimizing_list.
 470 */
 471static void do_optimize_kprobes(void)
 472{
 473	lockdep_assert_held(&text_mutex);
 474	/*
 475	 * The optimization/unoptimization refers online_cpus via
 476	 * stop_machine() and cpu-hotplug modifies online_cpus.
 477	 * And same time, text_mutex will be held in cpu-hotplug and here.
 478	 * This combination can cause a deadlock (cpu-hotplug try to lock
 479	 * text_mutex but stop_machine can not be done because online_cpus
 480	 * has been changed)
 481	 * To avoid this deadlock, caller must have locked cpu hotplug
 482	 * for preventing cpu-hotplug outside of text_mutex locking.
 483	 */
 484	lockdep_assert_cpus_held();
 485
 486	/* Optimization never be done when disarmed */
 487	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
 488	    list_empty(&optimizing_list))
 489		return;
 490
 
 491	arch_optimize_kprobes(&optimizing_list);
 
 492}
 493
 494/*
 495 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
 496 * if need) kprobes listed on unoptimizing_list.
 497 */
 498static void do_unoptimize_kprobes(void)
 499{
 500	struct optimized_kprobe *op, *tmp;
 501
 502	lockdep_assert_held(&text_mutex);
 503	/* See comment in do_optimize_kprobes() */
 504	lockdep_assert_cpus_held();
 505
 506	/* Unoptimization must be done anytime */
 507	if (list_empty(&unoptimizing_list))
 508		return;
 509
 
 510	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 511	/* Loop free_list for disarming */
 512	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 513		/* Disarm probes if marked disabled */
 514		if (kprobe_disabled(&op->kp))
 515			arch_disarm_kprobe(&op->kp);
 516		if (kprobe_unused(&op->kp)) {
 517			/*
 518			 * Remove unused probes from hash list. After waiting
 519			 * for synchronization, these probes are reclaimed.
 520			 * (reclaiming is done by do_free_cleaned_kprobes.)
 521			 */
 522			hlist_del_rcu(&op->kp.hlist);
 523		} else
 524			list_del_init(&op->list);
 525	}
 
 526}
 527
 528/* Reclaim all kprobes on the free_list */
 529static void do_free_cleaned_kprobes(void)
 530{
 531	struct optimized_kprobe *op, *tmp;
 532
 533	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 
 534		list_del_init(&op->list);
 535		if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
 536			/*
 537			 * This must not happen, but if there is a kprobe
 538			 * still in use, keep it on kprobes hash list.
 539			 */
 540			continue;
 541		}
 542		free_aggr_kprobe(&op->kp);
 543	}
 544}
 545
 546/* Start optimizer after OPTIMIZE_DELAY passed */
 547static void kick_kprobe_optimizer(void)
 548{
 549	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
 550}
 551
 552/* Kprobe jump optimizer */
 553static void kprobe_optimizer(struct work_struct *work)
 554{
 555	mutex_lock(&kprobe_mutex);
 556	cpus_read_lock();
 557	mutex_lock(&text_mutex);
 558	/* Lock modules while optimizing kprobes */
 559	mutex_lock(&module_mutex);
 560
 561	/*
 562	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
 563	 * kprobes before waiting for quiesence period.
 564	 */
 565	do_unoptimize_kprobes();
 566
 567	/*
 568	 * Step 2: Wait for quiesence period to ensure all potentially
 569	 * preempted tasks to have normally scheduled. Because optprobe
 570	 * may modify multiple instructions, there is a chance that Nth
 571	 * instruction is preempted. In that case, such tasks can return
 572	 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
 573	 * Note that on non-preemptive kernel, this is transparently converted
 574	 * to synchronoze_sched() to wait for all interrupts to have completed.
 575	 */
 576	synchronize_rcu_tasks();
 577
 578	/* Step 3: Optimize kprobes after quiesence period */
 579	do_optimize_kprobes();
 580
 581	/* Step 4: Free cleaned kprobes after quiesence period */
 582	do_free_cleaned_kprobes();
 583
 584	mutex_unlock(&module_mutex);
 585	mutex_unlock(&text_mutex);
 586	cpus_read_unlock();
 587	mutex_unlock(&kprobe_mutex);
 588
 589	/* Step 5: Kick optimizer again if needed */
 590	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
 591		kick_kprobe_optimizer();
 592}
 593
 594/* Wait for completing optimization and unoptimization */
 595void wait_for_kprobe_optimizer(void)
 596{
 597	mutex_lock(&kprobe_mutex);
 598
 599	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
 600		mutex_unlock(&kprobe_mutex);
 601
 602		/* this will also make optimizing_work execute immmediately */
 603		flush_delayed_work(&optimizing_work);
 604		/* @optimizing_work might not have been queued yet, relax */
 605		cpu_relax();
 606
 607		mutex_lock(&kprobe_mutex);
 608	}
 609
 610	mutex_unlock(&kprobe_mutex);
 611}
 612
 613/* Optimize kprobe if p is ready to be optimized */
 614static void optimize_kprobe(struct kprobe *p)
 615{
 616	struct optimized_kprobe *op;
 617
 618	/* Check if the kprobe is disabled or not ready for optimization. */
 619	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
 620	    (kprobe_disabled(p) || kprobes_all_disarmed))
 621		return;
 622
 623	/* kprobes with post_handler can not be optimized */
 624	if (p->post_handler)
 625		return;
 626
 627	op = container_of(p, struct optimized_kprobe, kp);
 628
 629	/* Check there is no other kprobes at the optimized instructions */
 630	if (arch_check_optimized_kprobe(op) < 0)
 631		return;
 632
 633	/* Check if it is already optimized. */
 634	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
 635		return;
 636	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 637
 638	if (!list_empty(&op->list))
 639		/* This is under unoptimizing. Just dequeue the probe */
 640		list_del_init(&op->list);
 641	else {
 642		list_add(&op->list, &optimizing_list);
 643		kick_kprobe_optimizer();
 644	}
 645}
 646
 647/* Short cut to direct unoptimizing */
 648static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 649{
 650	lockdep_assert_cpus_held();
 651	arch_unoptimize_kprobe(op);
 652	if (kprobe_disabled(&op->kp))
 653		arch_disarm_kprobe(&op->kp);
 654}
 655
 656/* Unoptimize a kprobe if p is optimized */
 657static void unoptimize_kprobe(struct kprobe *p, bool force)
 658{
 659	struct optimized_kprobe *op;
 660
 661	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
 662		return; /* This is not an optprobe nor optimized */
 663
 664	op = container_of(p, struct optimized_kprobe, kp);
 665	if (!kprobe_optimized(p)) {
 666		/* Unoptimized or unoptimizing case */
 667		if (force && !list_empty(&op->list)) {
 668			/*
 669			 * Only if this is unoptimizing kprobe and forced,
 670			 * forcibly unoptimize it. (No need to unoptimize
 671			 * unoptimized kprobe again :)
 672			 */
 673			list_del_init(&op->list);
 674			force_unoptimize_kprobe(op);
 675		}
 676		return;
 677	}
 678
 679	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 680	if (!list_empty(&op->list)) {
 681		/* Dequeue from the optimization queue */
 682		list_del_init(&op->list);
 683		return;
 684	}
 685	/* Optimized kprobe case */
 686	if (force)
 687		/* Forcibly update the code: this is a special case */
 688		force_unoptimize_kprobe(op);
 689	else {
 690		list_add(&op->list, &unoptimizing_list);
 691		kick_kprobe_optimizer();
 692	}
 693}
 694
 695/* Cancel unoptimizing for reusing */
 696static int reuse_unused_kprobe(struct kprobe *ap)
 697{
 698	struct optimized_kprobe *op;
 699
 
 700	/*
 701	 * Unused kprobe MUST be on the way of delayed unoptimizing (means
 702	 * there is still a relative jump) and disabled.
 703	 */
 704	op = container_of(ap, struct optimized_kprobe, kp);
 705	WARN_ON_ONCE(list_empty(&op->list));
 
 
 706	/* Enable the probe again */
 707	ap->flags &= ~KPROBE_FLAG_DISABLED;
 708	/* Optimize it again (remove from op->list) */
 709	if (!kprobe_optready(ap))
 710		return -EINVAL;
 711
 712	optimize_kprobe(ap);
 713	return 0;
 714}
 715
 716/* Remove optimized instructions */
 717static void kill_optimized_kprobe(struct kprobe *p)
 718{
 719	struct optimized_kprobe *op;
 720
 721	op = container_of(p, struct optimized_kprobe, kp);
 722	if (!list_empty(&op->list))
 723		/* Dequeue from the (un)optimization queue */
 724		list_del_init(&op->list);
 725	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 726
 727	if (kprobe_unused(p)) {
 728		/* Enqueue if it is unused */
 729		list_add(&op->list, &freeing_list);
 730		/*
 731		 * Remove unused probes from the hash list. After waiting
 732		 * for synchronization, this probe is reclaimed.
 733		 * (reclaiming is done by do_free_cleaned_kprobes().)
 734		 */
 735		hlist_del_rcu(&op->kp.hlist);
 736	}
 737
 738	/* Don't touch the code, because it is already freed. */
 739	arch_remove_optimized_kprobe(op);
 740}
 741
 742static inline
 743void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 744{
 745	if (!kprobe_ftrace(p))
 746		arch_prepare_optimized_kprobe(op, p);
 747}
 748
 749/* Try to prepare optimized instructions */
 750static void prepare_optimized_kprobe(struct kprobe *p)
 751{
 752	struct optimized_kprobe *op;
 753
 754	op = container_of(p, struct optimized_kprobe, kp);
 755	__prepare_optimized_kprobe(op, p);
 756}
 757
 758/* Allocate new optimized_kprobe and try to prepare optimized instructions */
 759static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 760{
 761	struct optimized_kprobe *op;
 762
 763	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
 764	if (!op)
 765		return NULL;
 766
 767	INIT_LIST_HEAD(&op->list);
 768	op->kp.addr = p->addr;
 769	__prepare_optimized_kprobe(op, p);
 770
 771	return &op->kp;
 772}
 773
 774static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
 775
 776/*
 777 * Prepare an optimized_kprobe and optimize it
 778 * NOTE: p must be a normal registered kprobe
 779 */
 780static void try_to_optimize_kprobe(struct kprobe *p)
 781{
 782	struct kprobe *ap;
 783	struct optimized_kprobe *op;
 784
 785	/* Impossible to optimize ftrace-based kprobe */
 786	if (kprobe_ftrace(p))
 787		return;
 788
 789	/* For preparing optimization, jump_label_text_reserved() is called */
 790	cpus_read_lock();
 791	jump_label_lock();
 792	mutex_lock(&text_mutex);
 793
 794	ap = alloc_aggr_kprobe(p);
 795	if (!ap)
 796		goto out;
 797
 798	op = container_of(ap, struct optimized_kprobe, kp);
 799	if (!arch_prepared_optinsn(&op->optinsn)) {
 800		/* If failed to setup optimizing, fallback to kprobe */
 801		arch_remove_optimized_kprobe(op);
 802		kfree(op);
 803		goto out;
 804	}
 805
 806	init_aggr_kprobe(ap, p);
 807	optimize_kprobe(ap);	/* This just kicks optimizer thread */
 808
 809out:
 810	mutex_unlock(&text_mutex);
 811	jump_label_unlock();
 812	cpus_read_unlock();
 813}
 814
 815#ifdef CONFIG_SYSCTL
 816static void optimize_all_kprobes(void)
 817{
 818	struct hlist_head *head;
 819	struct kprobe *p;
 820	unsigned int i;
 821
 822	mutex_lock(&kprobe_mutex);
 823	/* If optimization is already allowed, just return */
 824	if (kprobes_allow_optimization)
 825		goto out;
 826
 827	cpus_read_lock();
 828	kprobes_allow_optimization = true;
 829	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 830		head = &kprobe_table[i];
 831		hlist_for_each_entry_rcu(p, head, hlist)
 832			if (!kprobe_disabled(p))
 833				optimize_kprobe(p);
 834	}
 835	cpus_read_unlock();
 836	printk(KERN_INFO "Kprobes globally optimized\n");
 837out:
 838	mutex_unlock(&kprobe_mutex);
 839}
 840
 841static void unoptimize_all_kprobes(void)
 842{
 843	struct hlist_head *head;
 844	struct kprobe *p;
 845	unsigned int i;
 846
 847	mutex_lock(&kprobe_mutex);
 848	/* If optimization is already prohibited, just return */
 849	if (!kprobes_allow_optimization) {
 850		mutex_unlock(&kprobe_mutex);
 851		return;
 852	}
 853
 854	cpus_read_lock();
 855	kprobes_allow_optimization = false;
 856	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 857		head = &kprobe_table[i];
 858		hlist_for_each_entry_rcu(p, head, hlist) {
 859			if (!kprobe_disabled(p))
 860				unoptimize_kprobe(p, false);
 861		}
 862	}
 863	cpus_read_unlock();
 864	mutex_unlock(&kprobe_mutex);
 865
 866	/* Wait for unoptimizing completion */
 867	wait_for_kprobe_optimizer();
 868	printk(KERN_INFO "Kprobes globally unoptimized\n");
 869}
 870
 871static DEFINE_MUTEX(kprobe_sysctl_mutex);
 872int sysctl_kprobes_optimization;
 873int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
 874				      void __user *buffer, size_t *length,
 875				      loff_t *ppos)
 876{
 877	int ret;
 878
 879	mutex_lock(&kprobe_sysctl_mutex);
 880	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
 881	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 882
 883	if (sysctl_kprobes_optimization)
 884		optimize_all_kprobes();
 885	else
 886		unoptimize_all_kprobes();
 887	mutex_unlock(&kprobe_sysctl_mutex);
 888
 889	return ret;
 890}
 891#endif /* CONFIG_SYSCTL */
 892
 893/* Put a breakpoint for a probe. Must be called with text_mutex locked */
 894static void __arm_kprobe(struct kprobe *p)
 895{
 896	struct kprobe *_p;
 897
 898	/* Check collision with other optimized kprobes */
 899	_p = get_optimized_kprobe((unsigned long)p->addr);
 900	if (unlikely(_p))
 901		/* Fallback to unoptimized kprobe */
 902		unoptimize_kprobe(_p, true);
 903
 904	arch_arm_kprobe(p);
 905	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
 906}
 907
 908/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
 909static void __disarm_kprobe(struct kprobe *p, bool reopt)
 910{
 911	struct kprobe *_p;
 912
 913	/* Try to unoptimize */
 914	unoptimize_kprobe(p, kprobes_all_disarmed);
 915
 916	if (!kprobe_queued(p)) {
 917		arch_disarm_kprobe(p);
 918		/* If another kprobe was blocked, optimize it. */
 919		_p = get_optimized_kprobe((unsigned long)p->addr);
 920		if (unlikely(_p) && reopt)
 921			optimize_kprobe(_p);
 922	}
 923	/* TODO: reoptimize others after unoptimized this probe */
 924}
 925
 926#else /* !CONFIG_OPTPROBES */
 927
 928#define optimize_kprobe(p)			do {} while (0)
 929#define unoptimize_kprobe(p, f)			do {} while (0)
 930#define kill_optimized_kprobe(p)		do {} while (0)
 931#define prepare_optimized_kprobe(p)		do {} while (0)
 932#define try_to_optimize_kprobe(p)		do {} while (0)
 933#define __arm_kprobe(p)				arch_arm_kprobe(p)
 934#define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
 935#define kprobe_disarmed(p)			kprobe_disabled(p)
 936#define wait_for_kprobe_optimizer()		do {} while (0)
 937
 938static int reuse_unused_kprobe(struct kprobe *ap)
 
 939{
 940	/*
 941	 * If the optimized kprobe is NOT supported, the aggr kprobe is
 942	 * released at the same time that the last aggregated kprobe is
 943	 * unregistered.
 944	 * Thus there should be no chance to reuse unused kprobe.
 945	 */
 946	printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
 947	return -EINVAL;
 948}
 949
 950static void free_aggr_kprobe(struct kprobe *p)
 951{
 952	arch_remove_kprobe(p);
 953	kfree(p);
 954}
 955
 956static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 957{
 958	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
 959}
 960#endif /* CONFIG_OPTPROBES */
 961
 962#ifdef CONFIG_KPROBES_ON_FTRACE
 963static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
 964	.func = kprobe_ftrace_handler,
 965	.flags = FTRACE_OPS_FL_SAVE_REGS,
 966};
 967
 968static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
 969	.func = kprobe_ftrace_handler,
 970	.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
 971};
 972
 973static int kprobe_ipmodify_enabled;
 974static int kprobe_ftrace_enabled;
 975
 976/* Must ensure p->addr is really on ftrace */
 977static int prepare_kprobe(struct kprobe *p)
 978{
 979	if (!kprobe_ftrace(p))
 980		return arch_prepare_kprobe(p);
 981
 982	return arch_prepare_kprobe_ftrace(p);
 983}
 984
 985/* Caller must lock kprobe_mutex */
 986static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
 987			       int *cnt)
 988{
 989	int ret = 0;
 990
 991	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
 
 992	if (ret) {
 993		pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
 994			 p->addr, ret);
 995		return ret;
 996	}
 997
 998	if (*cnt == 0) {
 999		ret = register_ftrace_function(ops);
1000		if (ret) {
1001			pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
1002			goto err_ftrace;
1003		}
1004	}
1005
1006	(*cnt)++;
1007	return ret;
1008
1009err_ftrace:
1010	/*
1011	 * At this point, sinec ops is not registered, we should be sefe from
1012	 * registering empty filter.
 
1013	 */
1014	ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1015	return ret;
1016}
1017
1018static int arm_kprobe_ftrace(struct kprobe *p)
1019{
1020	bool ipmodify = (p->post_handler != NULL);
1021
1022	return __arm_kprobe_ftrace(p,
1023		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1024		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1025}
1026
1027/* Caller must lock kprobe_mutex */
1028static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1029				  int *cnt)
1030{
1031	int ret = 0;
1032
1033	if (*cnt == 1) {
1034		ret = unregister_ftrace_function(ops);
1035		if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
1036			return ret;
1037	}
1038
1039	(*cnt)--;
1040
1041	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1042	WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
1043		  p->addr, ret);
1044	return ret;
1045}
1046
1047static int disarm_kprobe_ftrace(struct kprobe *p)
1048{
1049	bool ipmodify = (p->post_handler != NULL);
1050
1051	return __disarm_kprobe_ftrace(p,
1052		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1053		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1054}
1055#else	/* !CONFIG_KPROBES_ON_FTRACE */
1056#define prepare_kprobe(p)	arch_prepare_kprobe(p)
1057#define arm_kprobe_ftrace(p)	(-ENODEV)
1058#define disarm_kprobe_ftrace(p)	(-ENODEV)
1059#endif
1060
1061/* Arm a kprobe with text_mutex */
1062static int arm_kprobe(struct kprobe *kp)
1063{
1064	if (unlikely(kprobe_ftrace(kp)))
1065		return arm_kprobe_ftrace(kp);
1066
1067	cpus_read_lock();
1068	mutex_lock(&text_mutex);
1069	__arm_kprobe(kp);
1070	mutex_unlock(&text_mutex);
1071	cpus_read_unlock();
1072
1073	return 0;
1074}
1075
1076/* Disarm a kprobe with text_mutex */
1077static int disarm_kprobe(struct kprobe *kp, bool reopt)
1078{
1079	if (unlikely(kprobe_ftrace(kp)))
1080		return disarm_kprobe_ftrace(kp);
1081
1082	cpus_read_lock();
1083	mutex_lock(&text_mutex);
1084	__disarm_kprobe(kp, reopt);
1085	mutex_unlock(&text_mutex);
1086	cpus_read_unlock();
1087
1088	return 0;
1089}
1090
1091/*
1092 * Aggregate handlers for multiple kprobes support - these handlers
1093 * take care of invoking the individual kprobe handlers on p->list
1094 */
1095static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1096{
1097	struct kprobe *kp;
1098
1099	list_for_each_entry_rcu(kp, &p->list, list) {
1100		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1101			set_kprobe_instance(kp);
1102			if (kp->pre_handler(kp, regs))
1103				return 1;
1104		}
1105		reset_kprobe_instance();
1106	}
1107	return 0;
1108}
1109NOKPROBE_SYMBOL(aggr_pre_handler);
1110
1111static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1112			      unsigned long flags)
1113{
1114	struct kprobe *kp;
1115
1116	list_for_each_entry_rcu(kp, &p->list, list) {
1117		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1118			set_kprobe_instance(kp);
1119			kp->post_handler(kp, regs, flags);
1120			reset_kprobe_instance();
1121		}
1122	}
1123}
1124NOKPROBE_SYMBOL(aggr_post_handler);
1125
1126static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1127			      int trapnr)
1128{
1129	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1130
1131	/*
1132	 * if we faulted "during" the execution of a user specified
1133	 * probe handler, invoke just that probe's fault handler
1134	 */
1135	if (cur && cur->fault_handler) {
1136		if (cur->fault_handler(cur, regs, trapnr))
1137			return 1;
1138	}
1139	return 0;
1140}
1141NOKPROBE_SYMBOL(aggr_fault_handler);
1142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143/* Walks the list and increments nmissed count for multiprobe case */
1144void kprobes_inc_nmissed_count(struct kprobe *p)
1145{
1146	struct kprobe *kp;
1147	if (!kprobe_aggrprobe(p)) {
1148		p->nmissed++;
1149	} else {
1150		list_for_each_entry_rcu(kp, &p->list, list)
1151			kp->nmissed++;
1152	}
1153	return;
1154}
1155NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1156
1157void recycle_rp_inst(struct kretprobe_instance *ri,
1158		     struct hlist_head *head)
1159{
1160	struct kretprobe *rp = ri->rp;
1161
1162	/* remove rp inst off the rprobe_inst_table */
1163	hlist_del(&ri->hlist);
1164	INIT_HLIST_NODE(&ri->hlist);
1165	if (likely(rp)) {
1166		raw_spin_lock(&rp->lock);
1167		hlist_add_head(&ri->hlist, &rp->free_instances);
1168		raw_spin_unlock(&rp->lock);
1169	} else
1170		/* Unregistering */
1171		hlist_add_head(&ri->hlist, head);
1172}
1173NOKPROBE_SYMBOL(recycle_rp_inst);
1174
1175void kretprobe_hash_lock(struct task_struct *tsk,
1176			 struct hlist_head **head, unsigned long *flags)
1177__acquires(hlist_lock)
1178{
1179	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1180	raw_spinlock_t *hlist_lock;
1181
1182	*head = &kretprobe_inst_table[hash];
1183	hlist_lock = kretprobe_table_lock_ptr(hash);
1184	raw_spin_lock_irqsave(hlist_lock, *flags);
1185}
1186NOKPROBE_SYMBOL(kretprobe_hash_lock);
1187
1188static void kretprobe_table_lock(unsigned long hash,
1189				 unsigned long *flags)
1190__acquires(hlist_lock)
1191{
1192	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1193	raw_spin_lock_irqsave(hlist_lock, *flags);
1194}
1195NOKPROBE_SYMBOL(kretprobe_table_lock);
1196
1197void kretprobe_hash_unlock(struct task_struct *tsk,
1198			   unsigned long *flags)
1199__releases(hlist_lock)
1200{
1201	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1202	raw_spinlock_t *hlist_lock;
1203
1204	hlist_lock = kretprobe_table_lock_ptr(hash);
1205	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1206}
1207NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1208
1209static void kretprobe_table_unlock(unsigned long hash,
1210				   unsigned long *flags)
1211__releases(hlist_lock)
1212{
1213	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1214	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1215}
1216NOKPROBE_SYMBOL(kretprobe_table_unlock);
1217
1218/*
1219 * This function is called from finish_task_switch when task tk becomes dead,
1220 * so that we can recycle any function-return probe instances associated
1221 * with this task. These left over instances represent probed functions
1222 * that have been called but will never return.
1223 */
1224void kprobe_flush_task(struct task_struct *tk)
1225{
1226	struct kretprobe_instance *ri;
1227	struct hlist_head *head, empty_rp;
1228	struct hlist_node *tmp;
1229	unsigned long hash, flags = 0;
1230
1231	if (unlikely(!kprobes_initialized))
1232		/* Early boot.  kretprobe_table_locks not yet initialized. */
1233		return;
1234
1235	INIT_HLIST_HEAD(&empty_rp);
1236	hash = hash_ptr(tk, KPROBE_HASH_BITS);
1237	head = &kretprobe_inst_table[hash];
1238	kretprobe_table_lock(hash, &flags);
1239	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1240		if (ri->task == tk)
1241			recycle_rp_inst(ri, &empty_rp);
1242	}
1243	kretprobe_table_unlock(hash, &flags);
1244	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1245		hlist_del(&ri->hlist);
1246		kfree(ri);
1247	}
1248}
1249NOKPROBE_SYMBOL(kprobe_flush_task);
1250
1251static inline void free_rp_inst(struct kretprobe *rp)
1252{
1253	struct kretprobe_instance *ri;
1254	struct hlist_node *next;
1255
1256	hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1257		hlist_del(&ri->hlist);
1258		kfree(ri);
1259	}
1260}
1261
1262static void cleanup_rp_inst(struct kretprobe *rp)
1263{
1264	unsigned long flags, hash;
1265	struct kretprobe_instance *ri;
1266	struct hlist_node *next;
1267	struct hlist_head *head;
1268
1269	/* No race here */
1270	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1271		kretprobe_table_lock(hash, &flags);
1272		head = &kretprobe_inst_table[hash];
1273		hlist_for_each_entry_safe(ri, next, head, hlist) {
1274			if (ri->rp == rp)
1275				ri->rp = NULL;
1276		}
1277		kretprobe_table_unlock(hash, &flags);
1278	}
1279	free_rp_inst(rp);
1280}
1281NOKPROBE_SYMBOL(cleanup_rp_inst);
1282
1283/* Add the new probe to ap->list */
 
 
 
1284static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1285{
1286	if (p->post_handler)
 
 
1287		unoptimize_kprobe(ap, true);	/* Fall back to normal kprobe */
1288
1289	list_add_rcu(&p->list, &ap->list);
 
 
 
 
 
 
1290	if (p->post_handler && !ap->post_handler)
1291		ap->post_handler = aggr_post_handler;
1292
1293	return 0;
1294}
1295
1296/*
1297 * Fill in the required fields of the "manager kprobe". Replace the
1298 * earlier kprobe in the hlist with the manager kprobe
1299 */
1300static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1301{
1302	/* Copy p's insn slot to ap */
1303	copy_kprobe(p, ap);
1304	flush_insn_slot(ap);
1305	ap->addr = p->addr;
1306	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1307	ap->pre_handler = aggr_pre_handler;
1308	ap->fault_handler = aggr_fault_handler;
1309	/* We don't care the kprobe which has gone. */
1310	if (p->post_handler && !kprobe_gone(p))
1311		ap->post_handler = aggr_post_handler;
 
 
1312
1313	INIT_LIST_HEAD(&ap->list);
1314	INIT_HLIST_NODE(&ap->hlist);
1315
1316	list_add_rcu(&p->list, &ap->list);
1317	hlist_replace_rcu(&p->hlist, &ap->hlist);
1318}
1319
1320/*
1321 * This is the second or subsequent kprobe at the address - handle
1322 * the intricacies
1323 */
1324static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1325{
1326	int ret = 0;
1327	struct kprobe *ap = orig_p;
1328
1329	cpus_read_lock();
1330
1331	/* For preparing optimization, jump_label_text_reserved() is called */
1332	jump_label_lock();
1333	mutex_lock(&text_mutex);
1334
1335	if (!kprobe_aggrprobe(orig_p)) {
1336		/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1337		ap = alloc_aggr_kprobe(orig_p);
1338		if (!ap) {
1339			ret = -ENOMEM;
1340			goto out;
1341		}
1342		init_aggr_kprobe(ap, orig_p);
1343	} else if (kprobe_unused(ap)) {
1344		/* This probe is going to die. Rescue it */
1345		ret = reuse_unused_kprobe(ap);
1346		if (ret)
1347			goto out;
1348	}
1349
1350	if (kprobe_gone(ap)) {
1351		/*
1352		 * Attempting to insert new probe at the same location that
1353		 * had a probe in the module vaddr area which already
1354		 * freed. So, the instruction slot has already been
1355		 * released. We need a new slot for the new probe.
1356		 */
1357		ret = arch_prepare_kprobe(ap);
1358		if (ret)
1359			/*
1360			 * Even if fail to allocate new slot, don't need to
1361			 * free aggr_probe. It will be used next time, or
1362			 * freed by unregister_kprobe.
1363			 */
1364			goto out;
1365
1366		/* Prepare optimized instructions if possible. */
1367		prepare_optimized_kprobe(ap);
1368
1369		/*
1370		 * Clear gone flag to prevent allocating new slot again, and
1371		 * set disabled flag because it is not armed yet.
1372		 */
1373		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1374			    | KPROBE_FLAG_DISABLED;
1375	}
1376
1377	/* Copy ap's insn slot to p */
1378	copy_kprobe(ap, p);
1379	ret = add_new_kprobe(ap, p);
1380
1381out:
1382	mutex_unlock(&text_mutex);
1383	jump_label_unlock();
1384	cpus_read_unlock();
1385
1386	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1387		ap->flags &= ~KPROBE_FLAG_DISABLED;
1388		if (!kprobes_all_disarmed) {
1389			/* Arm the breakpoint again. */
1390			ret = arm_kprobe(ap);
1391			if (ret) {
1392				ap->flags |= KPROBE_FLAG_DISABLED;
1393				list_del_rcu(&p->list);
1394				synchronize_rcu();
1395			}
1396		}
1397	}
1398	return ret;
1399}
1400
1401bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1402{
1403	/* The __kprobes marked functions and entry code must not be probed */
1404	return addr >= (unsigned long)__kprobes_text_start &&
1405	       addr < (unsigned long)__kprobes_text_end;
1406}
1407
1408static bool __within_kprobe_blacklist(unsigned long addr)
1409{
1410	struct kprobe_blacklist_entry *ent;
1411
1412	if (arch_within_kprobe_blacklist(addr))
1413		return true;
1414	/*
1415	 * If there exists a kprobe_blacklist, verify and
1416	 * fail any probe registration in the prohibited area
1417	 */
1418	list_for_each_entry(ent, &kprobe_blacklist, list) {
1419		if (addr >= ent->start_addr && addr < ent->end_addr)
1420			return true;
1421	}
1422	return false;
1423}
1424
1425bool within_kprobe_blacklist(unsigned long addr)
1426{
1427	char symname[KSYM_NAME_LEN], *p;
1428
1429	if (__within_kprobe_blacklist(addr))
1430		return true;
1431
1432	/* Check if the address is on a suffixed-symbol */
1433	if (!lookup_symbol_name(addr, symname)) {
1434		p = strchr(symname, '.');
1435		if (!p)
1436			return false;
1437		*p = '\0';
1438		addr = (unsigned long)kprobe_lookup_name(symname, 0);
1439		if (addr)
1440			return __within_kprobe_blacklist(addr);
1441	}
1442	return false;
1443}
1444
1445/*
1446 * If we have a symbol_name argument, look it up and add the offset field
1447 * to it. This way, we can specify a relative address to a symbol.
1448 * This returns encoded errors if it fails to look up symbol or invalid
1449 * combination of parameters.
1450 */
1451static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1452			const char *symbol_name, unsigned int offset)
1453{
1454	if ((symbol_name && addr) || (!symbol_name && !addr))
1455		goto invalid;
1456
1457	if (symbol_name) {
1458		addr = kprobe_lookup_name(symbol_name, offset);
1459		if (!addr)
1460			return ERR_PTR(-ENOENT);
1461	}
1462
1463	addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1464	if (addr)
1465		return addr;
1466
1467invalid:
1468	return ERR_PTR(-EINVAL);
1469}
1470
1471static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1472{
1473	return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1474}
1475
1476/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1477static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1478{
1479	struct kprobe *ap, *list_p;
1480
1481	ap = get_kprobe(p->addr);
1482	if (unlikely(!ap))
1483		return NULL;
1484
1485	if (p != ap) {
1486		list_for_each_entry_rcu(list_p, &ap->list, list)
1487			if (list_p == p)
1488			/* kprobe p is a valid probe */
1489				goto valid;
1490		return NULL;
1491	}
1492valid:
1493	return ap;
1494}
1495
1496/* Return error if the kprobe is being re-registered */
1497static inline int check_kprobe_rereg(struct kprobe *p)
1498{
1499	int ret = 0;
1500
1501	mutex_lock(&kprobe_mutex);
1502	if (__get_valid_kprobe(p))
1503		ret = -EINVAL;
1504	mutex_unlock(&kprobe_mutex);
1505
1506	return ret;
1507}
1508
1509int __weak arch_check_ftrace_location(struct kprobe *p)
1510{
1511	unsigned long ftrace_addr;
1512
1513	ftrace_addr = ftrace_location((unsigned long)p->addr);
1514	if (ftrace_addr) {
1515#ifdef CONFIG_KPROBES_ON_FTRACE
1516		/* Given address is not on the instruction boundary */
1517		if ((unsigned long)p->addr != ftrace_addr)
1518			return -EILSEQ;
1519		p->flags |= KPROBE_FLAG_FTRACE;
1520#else	/* !CONFIG_KPROBES_ON_FTRACE */
1521		return -EINVAL;
1522#endif
1523	}
1524	return 0;
1525}
1526
1527static int check_kprobe_address_safe(struct kprobe *p,
1528				     struct module **probed_mod)
1529{
1530	int ret;
1531
1532	ret = arch_check_ftrace_location(p);
1533	if (ret)
1534		return ret;
1535	jump_label_lock();
1536	preempt_disable();
1537
1538	/* Ensure it is not in reserved area nor out of text */
1539	if (!kernel_text_address((unsigned long) p->addr) ||
1540	    within_kprobe_blacklist((unsigned long) p->addr) ||
1541	    jump_label_text_reserved(p->addr, p->addr) ||
1542	    find_bug((unsigned long)p->addr)) {
1543		ret = -EINVAL;
1544		goto out;
1545	}
1546
1547	/* Check if are we probing a module */
1548	*probed_mod = __module_text_address((unsigned long) p->addr);
1549	if (*probed_mod) {
1550		/*
1551		 * We must hold a refcount of the probed module while updating
1552		 * its code to prohibit unexpected unloading.
1553		 */
1554		if (unlikely(!try_module_get(*probed_mod))) {
1555			ret = -ENOENT;
1556			goto out;
1557		}
1558
1559		/*
1560		 * If the module freed .init.text, we couldn't insert
1561		 * kprobes in there.
1562		 */
1563		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1564		    (*probed_mod)->state != MODULE_STATE_COMING) {
1565			module_put(*probed_mod);
1566			*probed_mod = NULL;
1567			ret = -ENOENT;
1568		}
1569	}
1570out:
1571	preempt_enable();
1572	jump_label_unlock();
1573
1574	return ret;
1575}
1576
1577int register_kprobe(struct kprobe *p)
1578{
1579	int ret;
1580	struct kprobe *old_p;
1581	struct module *probed_mod;
1582	kprobe_opcode_t *addr;
1583
1584	/* Adjust probe address from symbol */
1585	addr = kprobe_addr(p);
1586	if (IS_ERR(addr))
1587		return PTR_ERR(addr);
1588	p->addr = addr;
1589
1590	ret = check_kprobe_rereg(p);
1591	if (ret)
1592		return ret;
1593
1594	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1595	p->flags &= KPROBE_FLAG_DISABLED;
1596	p->nmissed = 0;
1597	INIT_LIST_HEAD(&p->list);
1598
1599	ret = check_kprobe_address_safe(p, &probed_mod);
1600	if (ret)
1601		return ret;
1602
1603	mutex_lock(&kprobe_mutex);
1604
1605	old_p = get_kprobe(p->addr);
1606	if (old_p) {
1607		/* Since this may unoptimize old_p, locking text_mutex. */
1608		ret = register_aggr_kprobe(old_p, p);
1609		goto out;
1610	}
1611
1612	cpus_read_lock();
1613	/* Prevent text modification */
1614	mutex_lock(&text_mutex);
1615	ret = prepare_kprobe(p);
1616	mutex_unlock(&text_mutex);
1617	cpus_read_unlock();
1618	if (ret)
1619		goto out;
1620
1621	INIT_HLIST_NODE(&p->hlist);
1622	hlist_add_head_rcu(&p->hlist,
1623		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1624
1625	if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1626		ret = arm_kprobe(p);
1627		if (ret) {
1628			hlist_del_rcu(&p->hlist);
1629			synchronize_rcu();
1630			goto out;
1631		}
1632	}
1633
1634	/* Try to optimize kprobe */
1635	try_to_optimize_kprobe(p);
1636out:
1637	mutex_unlock(&kprobe_mutex);
1638
1639	if (probed_mod)
1640		module_put(probed_mod);
1641
1642	return ret;
1643}
1644EXPORT_SYMBOL_GPL(register_kprobe);
1645
1646/* Check if all probes on the aggrprobe are disabled */
1647static int aggr_kprobe_disabled(struct kprobe *ap)
1648{
1649	struct kprobe *kp;
1650
1651	list_for_each_entry_rcu(kp, &ap->list, list)
1652		if (!kprobe_disabled(kp))
1653			/*
1654			 * There is an active probe on the list.
1655			 * We can't disable this ap.
1656			 */
1657			return 0;
1658
1659	return 1;
1660}
1661
1662/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1663static struct kprobe *__disable_kprobe(struct kprobe *p)
1664{
1665	struct kprobe *orig_p;
1666	int ret;
1667
1668	/* Get an original kprobe for return */
1669	orig_p = __get_valid_kprobe(p);
1670	if (unlikely(orig_p == NULL))
1671		return ERR_PTR(-EINVAL);
1672
1673	if (!kprobe_disabled(p)) {
1674		/* Disable probe if it is a child probe */
1675		if (p != orig_p)
1676			p->flags |= KPROBE_FLAG_DISABLED;
1677
1678		/* Try to disarm and disable this/parent probe */
1679		if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1680			/*
1681			 * If kprobes_all_disarmed is set, orig_p
1682			 * should have already been disarmed, so
1683			 * skip unneed disarming process.
1684			 */
1685			if (!kprobes_all_disarmed) {
1686				ret = disarm_kprobe(orig_p, true);
1687				if (ret) {
1688					p->flags &= ~KPROBE_FLAG_DISABLED;
1689					return ERR_PTR(ret);
1690				}
1691			}
1692			orig_p->flags |= KPROBE_FLAG_DISABLED;
1693		}
1694	}
1695
1696	return orig_p;
1697}
1698
1699/*
1700 * Unregister a kprobe without a scheduler synchronization.
1701 */
1702static int __unregister_kprobe_top(struct kprobe *p)
1703{
1704	struct kprobe *ap, *list_p;
1705
1706	/* Disable kprobe. This will disarm it if needed. */
1707	ap = __disable_kprobe(p);
1708	if (IS_ERR(ap))
1709		return PTR_ERR(ap);
1710
1711	if (ap == p)
1712		/*
1713		 * This probe is an independent(and non-optimized) kprobe
1714		 * (not an aggrprobe). Remove from the hash list.
1715		 */
1716		goto disarmed;
1717
1718	/* Following process expects this probe is an aggrprobe */
1719	WARN_ON(!kprobe_aggrprobe(ap));
1720
1721	if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1722		/*
1723		 * !disarmed could be happen if the probe is under delayed
1724		 * unoptimizing.
1725		 */
1726		goto disarmed;
1727	else {
1728		/* If disabling probe has special handlers, update aggrprobe */
 
 
1729		if (p->post_handler && !kprobe_gone(p)) {
1730			list_for_each_entry_rcu(list_p, &ap->list, list) {
1731				if ((list_p != p) && (list_p->post_handler))
1732					goto noclean;
1733			}
1734			ap->post_handler = NULL;
1735		}
1736noclean:
1737		/*
1738		 * Remove from the aggrprobe: this path will do nothing in
1739		 * __unregister_kprobe_bottom().
1740		 */
1741		list_del_rcu(&p->list);
1742		if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1743			/*
1744			 * Try to optimize this probe again, because post
1745			 * handler may have been changed.
1746			 */
1747			optimize_kprobe(ap);
1748	}
1749	return 0;
1750
1751disarmed:
 
1752	hlist_del_rcu(&ap->hlist);
1753	return 0;
1754}
1755
1756static void __unregister_kprobe_bottom(struct kprobe *p)
1757{
1758	struct kprobe *ap;
1759
1760	if (list_empty(&p->list))
1761		/* This is an independent kprobe */
1762		arch_remove_kprobe(p);
1763	else if (list_is_singular(&p->list)) {
1764		/* This is the last child of an aggrprobe */
1765		ap = list_entry(p->list.next, struct kprobe, list);
1766		list_del(&p->list);
1767		free_aggr_kprobe(ap);
1768	}
1769	/* Otherwise, do nothing. */
1770}
1771
1772int register_kprobes(struct kprobe **kps, int num)
1773{
1774	int i, ret = 0;
1775
1776	if (num <= 0)
1777		return -EINVAL;
1778	for (i = 0; i < num; i++) {
1779		ret = register_kprobe(kps[i]);
1780		if (ret < 0) {
1781			if (i > 0)
1782				unregister_kprobes(kps, i);
1783			break;
1784		}
1785	}
1786	return ret;
1787}
1788EXPORT_SYMBOL_GPL(register_kprobes);
1789
1790void unregister_kprobe(struct kprobe *p)
1791{
1792	unregister_kprobes(&p, 1);
1793}
1794EXPORT_SYMBOL_GPL(unregister_kprobe);
1795
1796void unregister_kprobes(struct kprobe **kps, int num)
1797{
1798	int i;
1799
1800	if (num <= 0)
1801		return;
1802	mutex_lock(&kprobe_mutex);
1803	for (i = 0; i < num; i++)
1804		if (__unregister_kprobe_top(kps[i]) < 0)
1805			kps[i]->addr = NULL;
1806	mutex_unlock(&kprobe_mutex);
1807
1808	synchronize_rcu();
1809	for (i = 0; i < num; i++)
1810		if (kps[i]->addr)
1811			__unregister_kprobe_bottom(kps[i]);
1812}
1813EXPORT_SYMBOL_GPL(unregister_kprobes);
1814
1815int __weak kprobe_exceptions_notify(struct notifier_block *self,
1816					unsigned long val, void *data)
1817{
1818	return NOTIFY_DONE;
1819}
1820NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1821
1822static struct notifier_block kprobe_exceptions_nb = {
1823	.notifier_call = kprobe_exceptions_notify,
1824	.priority = 0x7fffffff /* we need to be notified first */
1825};
1826
1827unsigned long __weak arch_deref_entry_point(void *entry)
1828{
1829	return (unsigned long)entry;
1830}
1831
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1832#ifdef CONFIG_KRETPROBES
1833/*
1834 * This kprobe pre_handler is registered with every kretprobe. When probe
1835 * hits it will set up the return probe.
1836 */
1837static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1838{
1839	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1840	unsigned long hash, flags = 0;
1841	struct kretprobe_instance *ri;
1842
1843	/*
1844	 * To avoid deadlocks, prohibit return probing in NMI contexts,
1845	 * just skip the probe and increase the (inexact) 'nmissed'
1846	 * statistical counter, so that the user is informed that
1847	 * something happened:
1848	 */
1849	if (unlikely(in_nmi())) {
1850		rp->nmissed++;
1851		return 0;
1852	}
1853
1854	/* TODO: consider to only swap the RA after the last pre_handler fired */
1855	hash = hash_ptr(current, KPROBE_HASH_BITS);
1856	raw_spin_lock_irqsave(&rp->lock, flags);
1857	if (!hlist_empty(&rp->free_instances)) {
1858		ri = hlist_entry(rp->free_instances.first,
1859				struct kretprobe_instance, hlist);
1860		hlist_del(&ri->hlist);
1861		raw_spin_unlock_irqrestore(&rp->lock, flags);
1862
1863		ri->rp = rp;
1864		ri->task = current;
1865
1866		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1867			raw_spin_lock_irqsave(&rp->lock, flags);
1868			hlist_add_head(&ri->hlist, &rp->free_instances);
1869			raw_spin_unlock_irqrestore(&rp->lock, flags);
1870			return 0;
1871		}
1872
1873		arch_prepare_kretprobe(ri, regs);
1874
1875		/* XXX(hch): why is there no hlist_move_head? */
1876		INIT_HLIST_NODE(&ri->hlist);
1877		kretprobe_table_lock(hash, &flags);
1878		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1879		kretprobe_table_unlock(hash, &flags);
1880	} else {
1881		rp->nmissed++;
1882		raw_spin_unlock_irqrestore(&rp->lock, flags);
1883	}
1884	return 0;
1885}
1886NOKPROBE_SYMBOL(pre_handler_kretprobe);
1887
1888bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1889{
1890	return !offset;
1891}
1892
1893bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1894{
1895	kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1896
1897	if (IS_ERR(kp_addr))
1898		return false;
1899
1900	if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1901						!arch_kprobe_on_func_entry(offset))
1902		return false;
1903
1904	return true;
1905}
1906
1907int register_kretprobe(struct kretprobe *rp)
1908{
1909	int ret = 0;
1910	struct kretprobe_instance *inst;
1911	int i;
1912	void *addr;
1913
1914	if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1915		return -EINVAL;
1916
1917	if (kretprobe_blacklist_size) {
1918		addr = kprobe_addr(&rp->kp);
1919		if (IS_ERR(addr))
1920			return PTR_ERR(addr);
1921
1922		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1923			if (kretprobe_blacklist[i].addr == addr)
1924				return -EINVAL;
1925		}
1926	}
1927
1928	rp->kp.pre_handler = pre_handler_kretprobe;
1929	rp->kp.post_handler = NULL;
1930	rp->kp.fault_handler = NULL;
 
1931
1932	/* Pre-allocate memory for max kretprobe instances */
1933	if (rp->maxactive <= 0) {
1934#ifdef CONFIG_PREEMPTION
1935		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1936#else
1937		rp->maxactive = num_possible_cpus();
1938#endif
1939	}
1940	raw_spin_lock_init(&rp->lock);
1941	INIT_HLIST_HEAD(&rp->free_instances);
1942	for (i = 0; i < rp->maxactive; i++) {
1943		inst = kmalloc(sizeof(struct kretprobe_instance) +
1944			       rp->data_size, GFP_KERNEL);
1945		if (inst == NULL) {
1946			free_rp_inst(rp);
1947			return -ENOMEM;
1948		}
1949		INIT_HLIST_NODE(&inst->hlist);
1950		hlist_add_head(&inst->hlist, &rp->free_instances);
1951	}
1952
1953	rp->nmissed = 0;
1954	/* Establish function entry probe point */
1955	ret = register_kprobe(&rp->kp);
1956	if (ret != 0)
1957		free_rp_inst(rp);
1958	return ret;
1959}
1960EXPORT_SYMBOL_GPL(register_kretprobe);
1961
1962int register_kretprobes(struct kretprobe **rps, int num)
1963{
1964	int ret = 0, i;
1965
1966	if (num <= 0)
1967		return -EINVAL;
1968	for (i = 0; i < num; i++) {
1969		ret = register_kretprobe(rps[i]);
1970		if (ret < 0) {
1971			if (i > 0)
1972				unregister_kretprobes(rps, i);
1973			break;
1974		}
1975	}
1976	return ret;
1977}
1978EXPORT_SYMBOL_GPL(register_kretprobes);
1979
1980void unregister_kretprobe(struct kretprobe *rp)
1981{
1982	unregister_kretprobes(&rp, 1);
1983}
1984EXPORT_SYMBOL_GPL(unregister_kretprobe);
1985
1986void unregister_kretprobes(struct kretprobe **rps, int num)
1987{
1988	int i;
1989
1990	if (num <= 0)
1991		return;
1992	mutex_lock(&kprobe_mutex);
1993	for (i = 0; i < num; i++)
1994		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1995			rps[i]->kp.addr = NULL;
1996	mutex_unlock(&kprobe_mutex);
1997
1998	synchronize_rcu();
1999	for (i = 0; i < num; i++) {
2000		if (rps[i]->kp.addr) {
2001			__unregister_kprobe_bottom(&rps[i]->kp);
2002			cleanup_rp_inst(rps[i]);
2003		}
2004	}
2005}
2006EXPORT_SYMBOL_GPL(unregister_kretprobes);
2007
2008#else /* CONFIG_KRETPROBES */
2009int register_kretprobe(struct kretprobe *rp)
2010{
2011	return -ENOSYS;
2012}
2013EXPORT_SYMBOL_GPL(register_kretprobe);
2014
2015int register_kretprobes(struct kretprobe **rps, int num)
2016{
2017	return -ENOSYS;
2018}
2019EXPORT_SYMBOL_GPL(register_kretprobes);
2020
2021void unregister_kretprobe(struct kretprobe *rp)
2022{
2023}
2024EXPORT_SYMBOL_GPL(unregister_kretprobe);
2025
2026void unregister_kretprobes(struct kretprobe **rps, int num)
2027{
2028}
2029EXPORT_SYMBOL_GPL(unregister_kretprobes);
2030
2031static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2032{
2033	return 0;
2034}
2035NOKPROBE_SYMBOL(pre_handler_kretprobe);
2036
2037#endif /* CONFIG_KRETPROBES */
2038
2039/* Set the kprobe gone and remove its instruction buffer. */
2040static void kill_kprobe(struct kprobe *p)
2041{
2042	struct kprobe *kp;
2043
2044	p->flags |= KPROBE_FLAG_GONE;
2045	if (kprobe_aggrprobe(p)) {
2046		/*
2047		 * If this is an aggr_kprobe, we have to list all the
2048		 * chained probes and mark them GONE.
2049		 */
2050		list_for_each_entry_rcu(kp, &p->list, list)
2051			kp->flags |= KPROBE_FLAG_GONE;
2052		p->post_handler = NULL;
 
2053		kill_optimized_kprobe(p);
2054	}
2055	/*
2056	 * Here, we can remove insn_slot safely, because no thread calls
2057	 * the original probed function (which will be freed soon) any more.
2058	 */
2059	arch_remove_kprobe(p);
2060}
2061
2062/* Disable one kprobe */
2063int disable_kprobe(struct kprobe *kp)
2064{
2065	int ret = 0;
2066	struct kprobe *p;
2067
2068	mutex_lock(&kprobe_mutex);
2069
2070	/* Disable this kprobe */
2071	p = __disable_kprobe(kp);
2072	if (IS_ERR(p))
2073		ret = PTR_ERR(p);
2074
2075	mutex_unlock(&kprobe_mutex);
2076	return ret;
2077}
2078EXPORT_SYMBOL_GPL(disable_kprobe);
2079
2080/* Enable one kprobe */
2081int enable_kprobe(struct kprobe *kp)
2082{
2083	int ret = 0;
2084	struct kprobe *p;
2085
2086	mutex_lock(&kprobe_mutex);
2087
2088	/* Check whether specified probe is valid. */
2089	p = __get_valid_kprobe(kp);
2090	if (unlikely(p == NULL)) {
2091		ret = -EINVAL;
2092		goto out;
2093	}
2094
2095	if (kprobe_gone(kp)) {
2096		/* This kprobe has gone, we couldn't enable it. */
2097		ret = -EINVAL;
2098		goto out;
2099	}
2100
2101	if (p != kp)
2102		kp->flags &= ~KPROBE_FLAG_DISABLED;
2103
2104	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2105		p->flags &= ~KPROBE_FLAG_DISABLED;
2106		ret = arm_kprobe(p);
2107		if (ret)
2108			p->flags |= KPROBE_FLAG_DISABLED;
2109	}
2110out:
2111	mutex_unlock(&kprobe_mutex);
2112	return ret;
2113}
2114EXPORT_SYMBOL_GPL(enable_kprobe);
2115
2116/* Caller must NOT call this in usual path. This is only for critical case */
2117void dump_kprobe(struct kprobe *kp)
2118{
2119	pr_err("Dumping kprobe:\n");
2120	pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
2121	       kp->symbol_name, kp->offset, kp->addr);
2122}
2123NOKPROBE_SYMBOL(dump_kprobe);
2124
2125int kprobe_add_ksym_blacklist(unsigned long entry)
2126{
2127	struct kprobe_blacklist_entry *ent;
2128	unsigned long offset = 0, size = 0;
2129
2130	if (!kernel_text_address(entry) ||
2131	    !kallsyms_lookup_size_offset(entry, &size, &offset))
2132		return -EINVAL;
2133
2134	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2135	if (!ent)
2136		return -ENOMEM;
2137	ent->start_addr = entry;
2138	ent->end_addr = entry + size;
2139	INIT_LIST_HEAD(&ent->list);
2140	list_add_tail(&ent->list, &kprobe_blacklist);
2141
2142	return (int)size;
2143}
2144
2145/* Add all symbols in given area into kprobe blacklist */
2146int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2147{
2148	unsigned long entry;
2149	int ret = 0;
2150
2151	for (entry = start; entry < end; entry += ret) {
2152		ret = kprobe_add_ksym_blacklist(entry);
2153		if (ret < 0)
2154			return ret;
2155		if (ret == 0)	/* In case of alias symbol */
2156			ret = 1;
2157	}
2158	return 0;
2159}
2160
2161int __init __weak arch_populate_kprobe_blacklist(void)
2162{
2163	return 0;
2164}
2165
2166/*
2167 * Lookup and populate the kprobe_blacklist.
2168 *
2169 * Unlike the kretprobe blacklist, we'll need to determine
2170 * the range of addresses that belong to the said functions,
2171 * since a kprobe need not necessarily be at the beginning
2172 * of a function.
2173 */
2174static int __init populate_kprobe_blacklist(unsigned long *start,
2175					     unsigned long *end)
2176{
2177	unsigned long entry;
2178	unsigned long *iter;
2179	int ret;
 
2180
2181	for (iter = start; iter < end; iter++) {
2182		entry = arch_deref_entry_point((void *)*iter);
2183		ret = kprobe_add_ksym_blacklist(entry);
2184		if (ret == -EINVAL)
 
 
 
2185			continue;
2186		if (ret < 0)
2187			return ret;
 
 
 
 
 
 
 
2188	}
2189
2190	/* Symbols in __kprobes_text are blacklisted */
2191	ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2192					(unsigned long)__kprobes_text_end);
2193
2194	return ret ? : arch_populate_kprobe_blacklist();
2195}
2196
2197/* Module notifier call back, checking kprobes on the module */
2198static int kprobes_module_callback(struct notifier_block *nb,
2199				   unsigned long val, void *data)
2200{
2201	struct module *mod = data;
2202	struct hlist_head *head;
2203	struct kprobe *p;
2204	unsigned int i;
2205	int checkcore = (val == MODULE_STATE_GOING);
2206
2207	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2208		return NOTIFY_DONE;
2209
2210	/*
2211	 * When MODULE_STATE_GOING was notified, both of module .text and
2212	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2213	 * notified, only .init.text section would be freed. We need to
2214	 * disable kprobes which have been inserted in the sections.
2215	 */
2216	mutex_lock(&kprobe_mutex);
2217	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2218		head = &kprobe_table[i];
2219		hlist_for_each_entry_rcu(p, head, hlist)
2220			if (within_module_init((unsigned long)p->addr, mod) ||
2221			    (checkcore &&
2222			     within_module_core((unsigned long)p->addr, mod))) {
2223				/*
2224				 * The vaddr this probe is installed will soon
2225				 * be vfreed buy not synced to disk. Hence,
2226				 * disarming the breakpoint isn't needed.
2227				 *
2228				 * Note, this will also move any optimized probes
2229				 * that are pending to be removed from their
2230				 * corresponding lists to the freeing_list and
2231				 * will not be touched by the delayed
2232				 * kprobe_optimizer work handler.
2233				 */
2234				kill_kprobe(p);
2235			}
2236	}
2237	mutex_unlock(&kprobe_mutex);
2238	return NOTIFY_DONE;
2239}
2240
2241static struct notifier_block kprobe_module_nb = {
2242	.notifier_call = kprobes_module_callback,
2243	.priority = 0
2244};
2245
2246/* Markers of _kprobe_blacklist section */
2247extern unsigned long __start_kprobe_blacklist[];
2248extern unsigned long __stop_kprobe_blacklist[];
2249
2250static int __init init_kprobes(void)
2251{
2252	int i, err = 0;
2253
2254	/* FIXME allocate the probe table, currently defined statically */
2255	/* initialize all list heads */
2256	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2257		INIT_HLIST_HEAD(&kprobe_table[i]);
2258		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2259		raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2260	}
2261
2262	err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2263					__stop_kprobe_blacklist);
2264	if (err) {
2265		pr_err("kprobes: failed to populate blacklist: %d\n", err);
2266		pr_err("Please take care of using kprobes.\n");
2267	}
2268
2269	if (kretprobe_blacklist_size) {
2270		/* lookup the function address from its name */
2271		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2272			kretprobe_blacklist[i].addr =
2273				kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2274			if (!kretprobe_blacklist[i].addr)
2275				printk("kretprobe: lookup failed: %s\n",
2276				       kretprobe_blacklist[i].name);
2277		}
2278	}
2279
2280#if defined(CONFIG_OPTPROBES)
2281#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2282	/* Init kprobe_optinsn_slots */
2283	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2284#endif
2285	/* By default, kprobes can be optimized */
2286	kprobes_allow_optimization = true;
2287#endif
2288
2289	/* By default, kprobes are armed */
2290	kprobes_all_disarmed = false;
2291
2292	err = arch_init_kprobes();
2293	if (!err)
2294		err = register_die_notifier(&kprobe_exceptions_nb);
2295	if (!err)
2296		err = register_module_notifier(&kprobe_module_nb);
2297
2298	kprobes_initialized = (err == 0);
2299
2300	if (!err)
2301		init_test_probes();
2302	return err;
2303}
2304subsys_initcall(init_kprobes);
2305
2306#ifdef CONFIG_DEBUG_FS
2307static void report_probe(struct seq_file *pi, struct kprobe *p,
2308		const char *sym, int offset, char *modname, struct kprobe *pp)
2309{
2310	char *kprobe_type;
2311	void *addr = p->addr;
2312
2313	if (p->pre_handler == pre_handler_kretprobe)
2314		kprobe_type = "r";
 
 
2315	else
2316		kprobe_type = "k";
2317
2318	if (!kallsyms_show_value())
2319		addr = NULL;
2320
2321	if (sym)
2322		seq_printf(pi, "%px  %s  %s+0x%x  %s ",
2323			addr, kprobe_type, sym, offset,
2324			(modname ? modname : " "));
2325	else	/* try to use %pS */
2326		seq_printf(pi, "%px  %s  %pS ",
2327			addr, kprobe_type, p->addr);
2328
2329	if (!pp)
2330		pp = p;
2331	seq_printf(pi, "%s%s%s%s\n",
2332		(kprobe_gone(p) ? "[GONE]" : ""),
2333		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2334		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2335		(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2336}
2337
2338static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2339{
2340	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2341}
2342
2343static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2344{
2345	(*pos)++;
2346	if (*pos >= KPROBE_TABLE_SIZE)
2347		return NULL;
2348	return pos;
2349}
2350
2351static void kprobe_seq_stop(struct seq_file *f, void *v)
2352{
2353	/* Nothing to do */
2354}
2355
2356static int show_kprobe_addr(struct seq_file *pi, void *v)
2357{
2358	struct hlist_head *head;
2359	struct kprobe *p, *kp;
2360	const char *sym = NULL;
2361	unsigned int i = *(loff_t *) v;
2362	unsigned long offset = 0;
2363	char *modname, namebuf[KSYM_NAME_LEN];
2364
2365	head = &kprobe_table[i];
2366	preempt_disable();
2367	hlist_for_each_entry_rcu(p, head, hlist) {
2368		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2369					&offset, &modname, namebuf);
2370		if (kprobe_aggrprobe(p)) {
2371			list_for_each_entry_rcu(kp, &p->list, list)
2372				report_probe(pi, kp, sym, offset, modname, p);
2373		} else
2374			report_probe(pi, p, sym, offset, modname, NULL);
2375	}
2376	preempt_enable();
2377	return 0;
2378}
2379
2380static const struct seq_operations kprobes_seq_ops = {
2381	.start = kprobe_seq_start,
2382	.next  = kprobe_seq_next,
2383	.stop  = kprobe_seq_stop,
2384	.show  = show_kprobe_addr
2385};
2386
2387static int kprobes_open(struct inode *inode, struct file *filp)
2388{
2389	return seq_open(filp, &kprobes_seq_ops);
2390}
2391
2392static const struct file_operations debugfs_kprobes_operations = {
2393	.open           = kprobes_open,
2394	.read           = seq_read,
2395	.llseek         = seq_lseek,
2396	.release        = seq_release,
2397};
2398
2399/* kprobes/blacklist -- shows which functions can not be probed */
2400static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2401{
2402	return seq_list_start(&kprobe_blacklist, *pos);
2403}
2404
2405static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2406{
2407	return seq_list_next(v, &kprobe_blacklist, pos);
2408}
2409
2410static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2411{
2412	struct kprobe_blacklist_entry *ent =
2413		list_entry(v, struct kprobe_blacklist_entry, list);
2414
2415	/*
2416	 * If /proc/kallsyms is not showing kernel address, we won't
2417	 * show them here either.
2418	 */
2419	if (!kallsyms_show_value())
2420		seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2421			   (void *)ent->start_addr);
2422	else
2423		seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2424			   (void *)ent->end_addr, (void *)ent->start_addr);
2425	return 0;
2426}
2427
2428static const struct seq_operations kprobe_blacklist_seq_ops = {
2429	.start = kprobe_blacklist_seq_start,
2430	.next  = kprobe_blacklist_seq_next,
2431	.stop  = kprobe_seq_stop,	/* Reuse void function */
2432	.show  = kprobe_blacklist_seq_show,
2433};
2434
2435static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2436{
2437	return seq_open(filp, &kprobe_blacklist_seq_ops);
2438}
2439
2440static const struct file_operations debugfs_kprobe_blacklist_ops = {
2441	.open           = kprobe_blacklist_open,
2442	.read           = seq_read,
2443	.llseek         = seq_lseek,
2444	.release        = seq_release,
2445};
2446
2447static int arm_all_kprobes(void)
2448{
2449	struct hlist_head *head;
2450	struct kprobe *p;
2451	unsigned int i, total = 0, errors = 0;
2452	int err, ret = 0;
2453
2454	mutex_lock(&kprobe_mutex);
2455
2456	/* If kprobes are armed, just return */
2457	if (!kprobes_all_disarmed)
2458		goto already_enabled;
2459
2460	/*
2461	 * optimize_kprobe() called by arm_kprobe() checks
2462	 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2463	 * arm_kprobe.
2464	 */
2465	kprobes_all_disarmed = false;
2466	/* Arming kprobes doesn't optimize kprobe itself */
2467	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2468		head = &kprobe_table[i];
2469		/* Arm all kprobes on a best-effort basis */
2470		hlist_for_each_entry_rcu(p, head, hlist) {
2471			if (!kprobe_disabled(p)) {
2472				err = arm_kprobe(p);
2473				if (err)  {
2474					errors++;
2475					ret = err;
2476				}
2477				total++;
2478			}
2479		}
2480	}
2481
2482	if (errors)
2483		pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2484			errors, total);
2485	else
2486		pr_info("Kprobes globally enabled\n");
2487
2488already_enabled:
2489	mutex_unlock(&kprobe_mutex);
2490	return ret;
2491}
2492
2493static int disarm_all_kprobes(void)
2494{
2495	struct hlist_head *head;
2496	struct kprobe *p;
2497	unsigned int i, total = 0, errors = 0;
2498	int err, ret = 0;
2499
2500	mutex_lock(&kprobe_mutex);
2501
2502	/* If kprobes are already disarmed, just return */
2503	if (kprobes_all_disarmed) {
2504		mutex_unlock(&kprobe_mutex);
2505		return 0;
2506	}
2507
2508	kprobes_all_disarmed = true;
2509
2510	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2511		head = &kprobe_table[i];
2512		/* Disarm all kprobes on a best-effort basis */
2513		hlist_for_each_entry_rcu(p, head, hlist) {
2514			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2515				err = disarm_kprobe(p, false);
2516				if (err) {
2517					errors++;
2518					ret = err;
2519				}
2520				total++;
2521			}
2522		}
2523	}
2524
2525	if (errors)
2526		pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2527			errors, total);
2528	else
2529		pr_info("Kprobes globally disabled\n");
2530
2531	mutex_unlock(&kprobe_mutex);
2532
2533	/* Wait for disarming all kprobes by optimizer */
2534	wait_for_kprobe_optimizer();
2535
2536	return ret;
2537}
2538
2539/*
2540 * XXX: The debugfs bool file interface doesn't allow for callbacks
2541 * when the bool state is switched. We can reuse that facility when
2542 * available
2543 */
2544static ssize_t read_enabled_file_bool(struct file *file,
2545	       char __user *user_buf, size_t count, loff_t *ppos)
2546{
2547	char buf[3];
2548
2549	if (!kprobes_all_disarmed)
2550		buf[0] = '1';
2551	else
2552		buf[0] = '0';
2553	buf[1] = '\n';
2554	buf[2] = 0x00;
2555	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2556}
2557
2558static ssize_t write_enabled_file_bool(struct file *file,
2559	       const char __user *user_buf, size_t count, loff_t *ppos)
2560{
2561	char buf[32];
2562	size_t buf_size;
2563	int ret = 0;
2564
2565	buf_size = min(count, (sizeof(buf)-1));
2566	if (copy_from_user(buf, user_buf, buf_size))
2567		return -EFAULT;
2568
2569	buf[buf_size] = '\0';
2570	switch (buf[0]) {
2571	case 'y':
2572	case 'Y':
2573	case '1':
2574		ret = arm_all_kprobes();
2575		break;
2576	case 'n':
2577	case 'N':
2578	case '0':
2579		ret = disarm_all_kprobes();
2580		break;
2581	default:
2582		return -EINVAL;
2583	}
2584
2585	if (ret)
2586		return ret;
2587
2588	return count;
2589}
2590
2591static const struct file_operations fops_kp = {
2592	.read =         read_enabled_file_bool,
2593	.write =        write_enabled_file_bool,
2594	.llseek =	default_llseek,
2595};
2596
2597static int __init debugfs_kprobe_init(void)
2598{
2599	struct dentry *dir;
2600	unsigned int value = 1;
2601
2602	dir = debugfs_create_dir("kprobes", NULL);
 
 
2603
2604	debugfs_create_file("list", 0400, dir, NULL,
2605			    &debugfs_kprobes_operations);
 
 
 
 
 
 
 
 
 
 
 
 
2606
2607	debugfs_create_file("enabled", 0600, dir, &value, &fops_kp);
2608
2609	debugfs_create_file("blacklist", 0400, dir, NULL,
2610			    &debugfs_kprobe_blacklist_ops);
2611
2612	return 0;
2613}
2614
2615late_initcall(debugfs_kprobe_init);
2616#endif /* CONFIG_DEBUG_FS */