Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * core.c - Kernel Live Patching Core
   3 *
   4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   5 * Copyright (C) 2014 SUSE
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; either version 2
  10 * of the License, or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22
  23#include <linux/module.h>
  24#include <linux/kernel.h>
  25#include <linux/mutex.h>
  26#include <linux/slab.h>
  27#include <linux/ftrace.h>
  28#include <linux/list.h>
  29#include <linux/kallsyms.h>
  30#include <linux/livepatch.h>
  31#include <linux/elf.h>
  32#include <linux/moduleloader.h>
 
 
 
  33#include <asm/cacheflush.h>
 
 
 
 
  34
  35/**
  36 * struct klp_ops - structure for tracking registered ftrace ops structs
  37 *
  38 * A single ftrace_ops is shared between all enabled replacement functions
  39 * (klp_func structs) which have the same old_addr.  This allows the switch
  40 * between function versions to happen instantaneously by updating the klp_ops
  41 * struct's func_stack list.  The winner is the klp_func at the top of the
  42 * func_stack (front of the list).
  43 *
  44 * @node:	node for the global klp_ops list
  45 * @func_stack:	list head for the stack of klp_func's (active func is on top)
  46 * @fops:	registered ftrace ops struct
  47 */
  48struct klp_ops {
  49	struct list_head node;
  50	struct list_head func_stack;
  51	struct ftrace_ops fops;
  52};
  53
  54/*
  55 * The klp_mutex protects the global lists and state transitions of any
  56 * structure reachable from them.  References to any structure must be obtained
  57 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
  58 * ensure it gets consistent data).
  59 */
  60static DEFINE_MUTEX(klp_mutex);
  61
  62static LIST_HEAD(klp_patches);
  63static LIST_HEAD(klp_ops);
  64
  65static struct kobject *klp_root_kobj;
  66
  67static struct klp_ops *klp_find_ops(unsigned long old_addr)
  68{
  69	struct klp_ops *ops;
  70	struct klp_func *func;
  71
  72	list_for_each_entry(ops, &klp_ops, node) {
  73		func = list_first_entry(&ops->func_stack, struct klp_func,
  74					stack_node);
  75		if (func->old_addr == old_addr)
  76			return ops;
  77	}
  78
  79	return NULL;
  80}
  81
  82static bool klp_is_module(struct klp_object *obj)
  83{
  84	return obj->name;
  85}
  86
  87static bool klp_is_object_loaded(struct klp_object *obj)
  88{
  89	return !obj->name || obj->mod;
  90}
  91
  92/* sets obj->mod if object is not vmlinux and module is found */
  93static void klp_find_object_module(struct klp_object *obj)
  94{
  95	struct module *mod;
  96
  97	if (!klp_is_module(obj))
  98		return;
  99
 100	mutex_lock(&module_mutex);
 101	/*
 102	 * We do not want to block removal of patched modules and therefore
 103	 * we do not take a reference here. The patches are removed by
 104	 * klp_module_going() instead.
 105	 */
 106	mod = find_module(obj->name);
 107	/*
 108	 * Do not mess work of klp_module_coming() and klp_module_going().
 109	 * Note that the patch might still be needed before klp_module_going()
 110	 * is called. Module functions can be called even in the GOING state
 111	 * until mod->exit() finishes. This is especially important for
 112	 * patches that modify semantic of the functions.
 113	 */
 114	if (mod && mod->klp_alive)
 115		obj->mod = mod;
 116
 117	mutex_unlock(&module_mutex);
 118}
 119
 120/* klp_mutex must be held by caller */
 121static bool klp_is_patch_registered(struct klp_patch *patch)
 122{
 123	struct klp_patch *mypatch;
 
 124
 125	list_for_each_entry(mypatch, &klp_patches, list)
 126		if (mypatch == patch)
 127			return true;
 
 
 
 
 
 
 
 
 128
 129	return false;
 130}
 131
 132static bool klp_initialized(void)
 
 133{
 134	return !!klp_root_kobj;
 
 
 
 
 
 
 
 
 
 
 
 
 
 135}
 136
 137struct klp_find_arg {
 138	const char *objname;
 139	const char *name;
 140	unsigned long addr;
 141	unsigned long count;
 142	unsigned long pos;
 143};
 144
 145static int klp_find_callback(void *data, const char *name,
 146			     struct module *mod, unsigned long addr)
 147{
 148	struct klp_find_arg *args = data;
 149
 150	if ((mod && !args->objname) || (!mod && args->objname))
 151		return 0;
 152
 153	if (strcmp(args->name, name))
 154		return 0;
 155
 156	if (args->objname && strcmp(args->objname, mod->name))
 157		return 0;
 158
 159	args->addr = addr;
 160	args->count++;
 161
 162	/*
 163	 * Finish the search when the symbol is found for the desired position
 164	 * or the position is not defined for a non-unique symbol.
 165	 */
 166	if ((args->pos && (args->count == args->pos)) ||
 167	    (!args->pos && (args->count > 1)))
 168		return 1;
 169
 170	return 0;
 171}
 172
 173static int klp_find_object_symbol(const char *objname, const char *name,
 174				  unsigned long sympos, unsigned long *addr)
 175{
 176	struct klp_find_arg args = {
 177		.objname = objname,
 178		.name = name,
 179		.addr = 0,
 180		.count = 0,
 181		.pos = sympos,
 182	};
 183
 184	mutex_lock(&module_mutex);
 185	kallsyms_on_each_symbol(klp_find_callback, &args);
 186	mutex_unlock(&module_mutex);
 
 187
 188	/*
 189	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
 190	 * otherwise ensure the symbol position count matches sympos.
 191	 */
 192	if (args.addr == 0)
 193		pr_err("symbol '%s' not found in symbol table\n", name);
 194	else if (args.count > 1 && sympos == 0) {
 195		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
 196		       name, objname);
 197	} else if (sympos != args.count && sympos > 0) {
 198		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
 199		       sympos, name, objname ? objname : "vmlinux");
 200	} else {
 201		*addr = args.addr;
 202		return 0;
 203	}
 204
 205	*addr = 0;
 206	return -EINVAL;
 207}
 208
 209static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
 210{
 211	int i, cnt, vmlinux, ret;
 212	char objname[MODULE_NAME_LEN];
 213	char symname[KSYM_NAME_LEN];
 214	char *strtab = pmod->core_kallsyms.strtab;
 
 215	Elf_Rela *relas;
 216	Elf_Sym *sym;
 217	unsigned long sympos, addr;
 
 
 218
 219	/*
 220	 * Since the field widths for objname and symname in the sscanf()
 221	 * call are hard-coded and correspond to MODULE_NAME_LEN and
 222	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
 223	 * and KSYM_NAME_LEN have the values we expect them to have.
 224	 *
 225	 * Because the value of MODULE_NAME_LEN can differ among architectures,
 226	 * we use the smallest/strictest upper bound possible (56, based on
 227	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
 228	 */
 229	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
 230
 231	relas = (Elf_Rela *) relasec->sh_addr;
 232	/* For each rela in this klp relocation section */
 233	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
 234		sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
 235		if (sym->st_shndx != SHN_LIVEPATCH) {
 236			pr_err("symbol %s is not marked as a livepatch symbol",
 237			       strtab + sym->st_name);
 238			return -EINVAL;
 239		}
 240
 241		/* Format: .klp.sym.objname.symname,sympos */
 242		cnt = sscanf(strtab + sym->st_name,
 243			     ".klp.sym.%55[^.].%127[^,],%lu",
 244			     objname, symname, &sympos);
 245		if (cnt != 3) {
 246			pr_err("symbol %s has an incorrectly formatted name",
 247			       strtab + sym->st_name);
 248			return -EINVAL;
 249		}
 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
 252		vmlinux = !strcmp(objname, "vmlinux");
 253		ret = klp_find_object_symbol(vmlinux ? NULL : objname,
 254					     symname, sympos, &addr);
 255		if (ret)
 256			return ret;
 257
 258		sym->st_value = addr;
 259	}
 260
 261	return 0;
 262}
 263
 264static int klp_write_object_relocations(struct module *pmod,
 265					struct klp_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 266{
 267	int i, cnt, ret = 0;
 268	const char *objname, *secname;
 269	char sec_objname[MODULE_NAME_LEN];
 270	Elf_Shdr *sec;
 271
 272	if (WARN_ON(!klp_is_object_loaded(obj)))
 
 
 
 
 
 
 
 
 
 273		return -EINVAL;
 274
 275	objname = klp_is_module(obj) ? obj->name : "vmlinux";
 276
 277	/* For each klp relocation section */
 278	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
 279		sec = pmod->klp_info->sechdrs + i;
 280		secname = pmod->klp_info->secstrings + sec->sh_name;
 281		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
 282			continue;
 283
 284		/*
 285		 * Format: .klp.rela.sec_objname.section_name
 286		 * See comment in klp_resolve_symbols() for an explanation
 287		 * of the selected field width value.
 288		 */
 289		cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
 290		if (cnt != 1) {
 291			pr_err("section %s has an incorrectly formatted name",
 292			       secname);
 293			ret = -EINVAL;
 294			break;
 295		}
 296
 297		if (strcmp(objname, sec_objname))
 298			continue;
 299
 300		ret = klp_resolve_symbols(sec, pmod);
 301		if (ret)
 302			break;
 303
 304		ret = apply_relocate_add(pmod->klp_info->sechdrs,
 305					 pmod->core_kallsyms.strtab,
 306					 pmod->klp_info->symndx, i, pmod);
 307		if (ret)
 308			break;
 309	}
 310
 311	return ret;
 312}
 313
 314static void notrace klp_ftrace_handler(unsigned long ip,
 315				       unsigned long parent_ip,
 316				       struct ftrace_ops *fops,
 317				       struct pt_regs *regs)
 318{
 319	struct klp_ops *ops;
 320	struct klp_func *func;
 321
 322	ops = container_of(fops, struct klp_ops, fops);
 
 
 323
 324	rcu_read_lock();
 325	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 326				      stack_node);
 327	if (WARN_ON_ONCE(!func))
 328		goto unlock;
 329
 330	klp_arch_set_pc(regs, (unsigned long)func->new_func);
 331unlock:
 332	rcu_read_unlock();
 333}
 334
 335/*
 336 * Convert a function address into the appropriate ftrace location.
 337 *
 338 * Usually this is just the address of the function, but on some architectures
 339 * it's more complicated so allow them to provide a custom behaviour.
 
 
 
 
 
 340 */
 341#ifndef klp_get_ftrace_location
 342static unsigned long klp_get_ftrace_location(unsigned long faddr)
 343{
 344	return faddr;
 345}
 346#endif
 347
 348static void klp_disable_func(struct klp_func *func)
 
 349{
 350	struct klp_ops *ops;
 
 
 351
 352	if (WARN_ON(func->state != KLP_ENABLED))
 353		return;
 354	if (WARN_ON(!func->old_addr))
 355		return;
 356
 357	ops = klp_find_ops(func->old_addr);
 358	if (WARN_ON(!ops))
 359		return;
 360
 361	if (list_is_singular(&ops->func_stack)) {
 362		unsigned long ftrace_loc;
 363
 364		ftrace_loc = klp_get_ftrace_location(func->old_addr);
 365		if (WARN_ON(!ftrace_loc))
 366			return;
 
 
 367
 368		WARN_ON(unregister_ftrace_function(&ops->fops));
 369		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
 
 
 
 
 
 
 
 
 
 
 
 370
 371		list_del_rcu(&func->stack_node);
 372		list_del(&ops->node);
 373		kfree(ops);
 374	} else {
 375		list_del_rcu(&func->stack_node);
 376	}
 377
 378	func->state = KLP_DISABLED;
 
 
 379}
 380
 381static int klp_enable_func(struct klp_func *func)
 
 382{
 383	struct klp_ops *ops;
 384	int ret;
 385
 386	if (WARN_ON(!func->old_addr))
 387		return -EINVAL;
 388
 389	if (WARN_ON(func->state != KLP_DISABLED))
 390		return -EINVAL;
 391
 392	ops = klp_find_ops(func->old_addr);
 393	if (!ops) {
 394		unsigned long ftrace_loc;
 395
 396		ftrace_loc = klp_get_ftrace_location(func->old_addr);
 397		if (!ftrace_loc) {
 398			pr_err("failed to find location for function '%s'\n",
 399				func->old_name);
 400			return -EINVAL;
 401		}
 402
 403		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 404		if (!ops)
 405			return -ENOMEM;
 406
 407		ops->fops.func = klp_ftrace_handler;
 408		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
 409				  FTRACE_OPS_FL_DYNAMIC |
 410				  FTRACE_OPS_FL_IPMODIFY;
 411
 412		list_add(&ops->node, &klp_ops);
 
 
 
 413
 414		INIT_LIST_HEAD(&ops->func_stack);
 415		list_add_rcu(&func->stack_node, &ops->func_stack);
 
 
 
 
 416
 417		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
 418		if (ret) {
 419			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
 420			       func->old_name, ret);
 421			goto err;
 422		}
 423
 424		ret = register_ftrace_function(&ops->fops);
 425		if (ret) {
 426			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
 427			       func->old_name, ret);
 428			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
 429			goto err;
 430		}
 431
 
 432
 433	} else {
 434		list_add_rcu(&func->stack_node, &ops->func_stack);
 
 
 435	}
 436
 437	func->state = KLP_ENABLED;
 438
 439	return 0;
 440
 441err:
 442	list_del_rcu(&func->stack_node);
 443	list_del(&ops->node);
 444	kfree(ops);
 445	return ret;
 446}
 447
 448static void klp_disable_object(struct klp_object *obj)
 449{
 450	struct klp_func *func;
 451
 452	klp_for_each_func(obj, func)
 453		if (func->state == KLP_ENABLED)
 454			klp_disable_func(func);
 455
 456	obj->state = KLP_DISABLED;
 457}
 458
 459static int klp_enable_object(struct klp_object *obj)
 460{
 461	struct klp_func *func;
 462	int ret;
 463
 464	if (WARN_ON(obj->state != KLP_DISABLED))
 465		return -EINVAL;
 466
 467	if (WARN_ON(!klp_is_object_loaded(obj)))
 468		return -EINVAL;
 469
 470	klp_for_each_func(obj, func) {
 471		ret = klp_enable_func(func);
 472		if (ret) {
 473			klp_disable_object(obj);
 474			return ret;
 475		}
 476	}
 477	obj->state = KLP_ENABLED;
 478
 479	return 0;
 480}
 481
 482static int __klp_disable_patch(struct klp_patch *patch)
 
 
 
 
 
 
 483{
 484	struct klp_object *obj;
 485
 486	/* enforce stacking: only the last enabled patch can be disabled */
 487	if (!list_is_last(&patch->list, &klp_patches) &&
 488	    list_next_entry(patch, list)->state == KLP_ENABLED)
 489		return -EBUSY;
 490
 491	pr_notice("disabling patch '%s'\n", patch->mod->name);
 492
 493	klp_for_each_object(patch, obj) {
 494		if (obj->state == KLP_ENABLED)
 495			klp_disable_object(obj);
 496	}
 497
 498	patch->state = KLP_DISABLED;
 
 499
 500	return 0;
 501}
 502
 503/**
 504 * klp_disable_patch() - disables a registered patch
 505 * @patch:	The registered, enabled patch to be disabled
 506 *
 507 * Unregisters the patched functions from ftrace.
 508 *
 509 * Return: 0 on success, otherwise error
 510 */
 511int klp_disable_patch(struct klp_patch *patch)
 512{
 513	int ret;
 514
 515	mutex_lock(&klp_mutex);
 516
 517	if (!klp_is_patch_registered(patch)) {
 518		ret = -EINVAL;
 519		goto err;
 520	}
 521
 522	if (patch->state == KLP_DISABLED) {
 523		ret = -EINVAL;
 524		goto err;
 
 
 
 
 
 
 
 525	}
 526
 527	ret = __klp_disable_patch(patch);
 
 
 
 
 
 
 528
 529err:
 530	mutex_unlock(&klp_mutex);
 531	return ret;
 532}
 533EXPORT_SYMBOL_GPL(klp_disable_patch);
 534
 535static int __klp_enable_patch(struct klp_patch *patch)
 
 536{
 537	struct klp_object *obj;
 538	int ret;
 539
 540	if (WARN_ON(patch->state != KLP_DISABLED))
 541		return -EINVAL;
 542
 543	/* enforce stacking: only the first disabled patch can be enabled */
 544	if (patch->list.prev != &klp_patches &&
 545	    list_prev_entry(patch, list)->state == KLP_DISABLED)
 546		return -EBUSY;
 547
 548	pr_notice("enabling patch '%s'\n", patch->mod->name);
 
 
 
 
 549
 550	klp_for_each_object(patch, obj) {
 551		if (!klp_is_object_loaded(obj))
 
 552			continue;
 553
 554		ret = klp_enable_object(obj);
 555		if (ret)
 556			goto unregister;
 557	}
 558
 559	patch->state = KLP_ENABLED;
 560
 561	return 0;
 562
 563unregister:
 564	WARN_ON(__klp_disable_patch(patch));
 565	return ret;
 566}
 567
 568/**
 569 * klp_enable_patch() - enables a registered patch
 570 * @patch:	The registered, disabled patch to be enabled
 571 *
 572 * Performs the needed symbol lookups and code relocations,
 573 * then registers the patched functions with ftrace.
 574 *
 575 * Return: 0 on success, otherwise error
 576 */
 577int klp_enable_patch(struct klp_patch *patch)
 578{
 579	int ret;
 580
 581	mutex_lock(&klp_mutex);
 582
 583	if (!klp_is_patch_registered(patch)) {
 584		ret = -EINVAL;
 585		goto err;
 586	}
 587
 588	ret = __klp_enable_patch(patch);
 589
 590err:
 591	mutex_unlock(&klp_mutex);
 592	return ret;
 593}
 594EXPORT_SYMBOL_GPL(klp_enable_patch);
 595
 596/*
 597 * Sysfs Interface
 598 *
 599 * /sys/kernel/livepatch
 600 * /sys/kernel/livepatch/<patch>
 601 * /sys/kernel/livepatch/<patch>/enabled
 602 * /sys/kernel/livepatch/<patch>/<object>
 603 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
 604 */
 605
 606static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 607			     const char *buf, size_t count)
 608{
 609	struct klp_patch *patch;
 610	int ret;
 611	unsigned long val;
 612
 613	ret = kstrtoul(buf, 10, &val);
 614	if (ret)
 615		return -EINVAL;
 616
 617	if (val != KLP_DISABLED && val != KLP_ENABLED)
 618		return -EINVAL;
 619
 620	patch = container_of(kobj, struct klp_patch, kobj);
 621
 622	mutex_lock(&klp_mutex);
 623
 624	if (val == patch->state) {
 625		/* already in requested state */
 626		ret = -EINVAL;
 627		goto err;
 628	}
 629
 630	if (val == KLP_ENABLED) {
 631		ret = __klp_enable_patch(patch);
 632		if (ret)
 633			goto err;
 634	} else {
 635		ret = __klp_disable_patch(patch);
 636		if (ret)
 637			goto err;
 638	}
 639
 640	mutex_unlock(&klp_mutex);
 641
 642	return count;
 643
 644err:
 645	mutex_unlock(&klp_mutex);
 646	return ret;
 647}
 648
 649static ssize_t enabled_show(struct kobject *kobj,
 650			    struct kobj_attribute *attr, char *buf)
 651{
 652	struct klp_patch *patch;
 653
 654	patch = container_of(kobj, struct klp_patch, kobj);
 655	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
 656}
 657
 658static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 659static struct attribute *klp_patch_attrs[] = {
 660	&enabled_kobj_attr.attr,
 661	NULL
 662};
 663
 664static void klp_kobj_release_patch(struct kobject *kobj)
 665{
 666	/*
 667	 * Once we have a consistency model we'll need to module_put() the
 668	 * patch module here.  See klp_register_patch() for more details.
 669	 */
 670}
 671
 672static struct kobj_type klp_ktype_patch = {
 673	.release = klp_kobj_release_patch,
 674	.sysfs_ops = &kobj_sysfs_ops,
 675	.default_attrs = klp_patch_attrs,
 676};
 677
 678static void klp_kobj_release_object(struct kobject *kobj)
 679{
 
 
 
 
 
 
 680}
 681
 682static struct kobj_type klp_ktype_object = {
 683	.release = klp_kobj_release_object,
 684	.sysfs_ops = &kobj_sysfs_ops,
 685};
 686
 687static void klp_kobj_release_func(struct kobject *kobj)
 688{
 
 
 
 
 
 
 689}
 690
 691static struct kobj_type klp_ktype_func = {
 692	.release = klp_kobj_release_func,
 693	.sysfs_ops = &kobj_sysfs_ops,
 694};
 695
 696/*
 697 * Free all functions' kobjects in the array up to some limit. When limit is
 698 * NULL, all kobjects are freed.
 699 */
 700static void klp_free_funcs_limited(struct klp_object *obj,
 701				   struct klp_func *limit)
 702{
 703	struct klp_func *func;
 
 
 
 
 704
 705	for (func = obj->funcs; func->old_name && func != limit; func++)
 706		kobject_put(&func->kobj);
 
 707}
 708
 709/* Clean up when a patched object is unloaded */
 710static void klp_free_object_loaded(struct klp_object *obj)
 711{
 712	struct klp_func *func;
 713
 714	obj->mod = NULL;
 715
 716	klp_for_each_func(obj, func)
 717		func->old_addr = 0;
 
 
 
 
 718}
 719
 720/*
 721 * Free all objects' kobjects in the array up to some limit. When limit is
 722 * NULL, all kobjects are freed.
 723 */
 724static void klp_free_objects_limited(struct klp_patch *patch,
 725				     struct klp_object *limit)
 726{
 727	struct klp_object *obj;
 
 
 
 
 
 
 728
 729	for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
 730		klp_free_funcs_limited(obj, NULL);
 731		kobject_put(&obj->kobj);
 732	}
 733}
 734
 735static void klp_free_patch(struct klp_patch *patch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 736{
 737	klp_free_objects_limited(patch, NULL);
 738	if (!list_empty(&patch->list))
 739		list_del(&patch->list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740	kobject_put(&patch->kobj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 741}
 742
 743static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 744{
 745	if (!func->old_name || !func->new_func)
 
 
 
 
 
 
 
 
 
 
 746		return -EINVAL;
 747
 748	INIT_LIST_HEAD(&func->stack_node);
 749	func->state = KLP_DISABLED;
 
 750
 751	/* The format for the sysfs directory is <function,sympos> where sympos
 752	 * is the nth occurrence of this symbol in kallsyms for the patched
 753	 * object. If the user selects 0 for old_sympos, then 1 will be used
 754	 * since a unique symbol will be the first occurrence.
 755	 */
 756	return kobject_init_and_add(&func->kobj, &klp_ktype_func,
 757				    &obj->kobj, "%s,%lu", func->old_name,
 758				    func->old_sympos ? func->old_sympos : 1);
 759}
 760
 761/* Arches may override this to finish any remaining arch-specific tasks */
 762void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
 763					struct klp_object *obj)
 764{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 765}
 766
 767/* parts of the initialization that is done only when the object is loaded */
 768static int klp_init_object_loaded(struct klp_patch *patch,
 769				  struct klp_object *obj)
 770{
 771	struct klp_func *func;
 772	int ret;
 773
 774	module_disable_ro(patch->mod);
 775	ret = klp_write_object_relocations(patch->mod, obj);
 776	if (ret) {
 777		module_enable_ro(patch->mod, true);
 778		return ret;
 
 
 
 
 
 779	}
 780
 781	arch_klp_init_object_loaded(patch, obj);
 782	module_enable_ro(patch->mod, true);
 783
 784	klp_for_each_func(obj, func) {
 785		ret = klp_find_object_symbol(obj->name, func->old_name,
 786					     func->old_sympos,
 787					     &func->old_addr);
 788		if (ret)
 789			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 790	}
 791
 792	return 0;
 793}
 794
 795static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 796{
 797	struct klp_func *func;
 798	int ret;
 799	const char *name;
 800
 801	if (!obj->funcs)
 802		return -EINVAL;
 803
 804	obj->state = KLP_DISABLED;
 805	obj->mod = NULL;
 806
 807	klp_find_object_module(obj);
 808
 809	name = klp_is_module(obj) ? obj->name : "vmlinux";
 810	ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
 811				   &patch->kobj, "%s", name);
 812	if (ret)
 813		return ret;
 814
 815	klp_for_each_func(obj, func) {
 816		ret = klp_init_func(obj, func);
 817		if (ret)
 818			goto free;
 819	}
 820
 821	if (klp_is_object_loaded(obj)) {
 822		ret = klp_init_object_loaded(patch, obj);
 823		if (ret)
 824			goto free;
 825	}
 826
 827	return 0;
 828
 829free:
 830	klp_free_funcs_limited(obj, func);
 831	kobject_put(&obj->kobj);
 832	return ret;
 833}
 834
 835static int klp_init_patch(struct klp_patch *patch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 836{
 837	struct klp_object *obj;
 838	int ret;
 839
 840	if (!patch->objs)
 841		return -EINVAL;
 842
 843	mutex_lock(&klp_mutex);
 
 
 
 
 
 
 
 
 
 
 844
 845	patch->state = KLP_DISABLED;
 846
 847	ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
 848				   klp_root_kobj, "%s", patch->mod->name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 849	if (ret)
 850		goto unlock;
 
 
 
 
 
 
 851
 852	klp_for_each_object(patch, obj) {
 853		ret = klp_init_object(patch, obj);
 854		if (ret)
 855			goto free;
 856	}
 857
 858	list_add_tail(&patch->list, &klp_patches);
 859
 860	mutex_unlock(&klp_mutex);
 861
 862	return 0;
 
 863
 864free:
 865	klp_free_objects_limited(patch, obj);
 866	kobject_put(&patch->kobj);
 867unlock:
 868	mutex_unlock(&klp_mutex);
 869	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870}
 871
 872/**
 873 * klp_unregister_patch() - unregisters a patch
 874 * @patch:	Disabled patch to be unregistered
 875 *
 876 * Frees the data structures and removes the sysfs interface.
 877 *
 878 * Return: 0 on success, otherwise error
 879 */
 880int klp_unregister_patch(struct klp_patch *patch)
 881{
 882	int ret = 0;
 
 883
 884	mutex_lock(&klp_mutex);
 
 885
 886	if (!klp_is_patch_registered(patch)) {
 887		ret = -EINVAL;
 888		goto out;
 889	}
 890
 891	if (patch->state == KLP_ENABLED) {
 892		ret = -EBUSY;
 893		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894	}
 895
 896	klp_free_patch(patch);
 
 
 897
 898out:
 899	mutex_unlock(&klp_mutex);
 
 
 
 900	return ret;
 901}
 902EXPORT_SYMBOL_GPL(klp_unregister_patch);
 903
 904/**
 905 * klp_register_patch() - registers a patch
 906 * @patch:	Patch to be registered
 
 
 
 
 907 *
 908 * Initializes the data structure associated with the patch and
 909 * creates the sysfs interface.
 910 *
 911 * Return: 0 on success, otherwise error
 912 */
 913int klp_register_patch(struct klp_patch *patch)
 914{
 915	int ret;
 916
 917	if (!patch || !patch->mod)
 918		return -EINVAL;
 919
 920	if (!is_livepatch_module(patch->mod)) {
 921		pr_err("module %s is not marked as a livepatch module",
 922		       patch->mod->name);
 923		return -EINVAL;
 924	}
 925
 926	if (!klp_initialized())
 927		return -ENODEV;
 928
 929	/*
 930	 * A reference is taken on the patch module to prevent it from being
 931	 * unloaded.  Right now, we don't allow patch modules to unload since
 932	 * there is currently no method to determine if a thread is still
 933	 * running in the patched code contained in the patch module once
 934	 * the ftrace registration is successful.
 935	 */
 936	if (!try_module_get(patch->mod))
 937		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 938
 939	ret = klp_init_patch(patch);
 940	if (ret)
 941		module_put(patch->mod);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 942
 943	return ret;
 944}
 945EXPORT_SYMBOL_GPL(klp_register_patch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946
 947int klp_module_coming(struct module *mod)
 948{
 949	int ret;
 950	struct klp_patch *patch;
 951	struct klp_object *obj;
 952
 953	if (WARN_ON(mod->state != MODULE_STATE_COMING))
 954		return -EINVAL;
 955
 
 
 
 
 
 956	mutex_lock(&klp_mutex);
 957	/*
 958	 * Each module has to know that klp_module_coming()
 959	 * has been called. We never know what module will
 960	 * get patched by a new patch.
 961	 */
 962	mod->klp_alive = true;
 963
 964	list_for_each_entry(patch, &klp_patches, list) {
 965		klp_for_each_object(patch, obj) {
 966			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
 967				continue;
 968
 969			obj->mod = mod;
 970
 971			ret = klp_init_object_loaded(patch, obj);
 972			if (ret) {
 973				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
 974					patch->mod->name, obj->mod->name, ret);
 975				goto err;
 976			}
 977
 978			if (patch->state == KLP_DISABLED)
 979				break;
 980
 981			pr_notice("applying patch '%s' to loading module '%s'\n",
 982				  patch->mod->name, obj->mod->name);
 983
 984			ret = klp_enable_object(obj);
 
 
 
 
 
 
 
 985			if (ret) {
 986				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
 987					patch->mod->name, obj->mod->name, ret);
 
 
 988				goto err;
 989			}
 990
 
 
 
 991			break;
 992		}
 993	}
 994
 995	mutex_unlock(&klp_mutex);
 996
 997	return 0;
 998
 999err:
1000	/*
1001	 * If a patch is unsuccessfully applied, return
1002	 * error to the module loader.
1003	 */
1004	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1005		patch->mod->name, obj->mod->name, obj->mod->name);
1006	mod->klp_alive = false;
1007	klp_free_object_loaded(obj);
 
1008	mutex_unlock(&klp_mutex);
1009
1010	return ret;
1011}
1012
1013void klp_module_going(struct module *mod)
1014{
1015	struct klp_patch *patch;
1016	struct klp_object *obj;
1017
1018	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1019		    mod->state != MODULE_STATE_COMING))
1020		return;
1021
1022	mutex_lock(&klp_mutex);
1023	/*
1024	 * Each module has to know that klp_module_going()
1025	 * has been called. We never know what module will
1026	 * get patched by a new patch.
1027	 */
1028	mod->klp_alive = false;
1029
1030	list_for_each_entry(patch, &klp_patches, list) {
1031		klp_for_each_object(patch, obj) {
1032			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1033				continue;
1034
1035			if (patch->state != KLP_DISABLED) {
1036				pr_notice("reverting patch '%s' on unloading module '%s'\n",
1037					  patch->mod->name, obj->mod->name);
1038				klp_disable_object(obj);
1039			}
1040
1041			klp_free_object_loaded(obj);
1042			break;
1043		}
1044	}
1045
1046	mutex_unlock(&klp_mutex);
1047}
1048
1049static int __init klp_init(void)
1050{
1051	int ret;
1052
1053	ret = klp_check_compiler_support();
1054	if (ret) {
1055		pr_info("Your compiler is too old; turning off.\n");
1056		return -EINVAL;
1057	}
1058
1059	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1060	if (!klp_root_kobj)
1061		return -ENOMEM;
1062
1063	return 0;
1064}
1065
1066module_init(klp_init);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * core.c - Kernel Live Patching Core
   4 *
   5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   6 * Copyright (C) 2014 SUSE
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/mutex.h>
  14#include <linux/slab.h>
 
  15#include <linux/list.h>
  16#include <linux/kallsyms.h>
  17#include <linux/livepatch.h>
  18#include <linux/elf.h>
  19#include <linux/moduleloader.h>
  20#include <linux/completion.h>
  21#include <linux/memory.h>
  22#include <linux/rcupdate.h>
  23#include <asm/cacheflush.h>
  24#include "core.h"
  25#include "patch.h"
  26#include "state.h"
  27#include "transition.h"
  28
  29/*
  30 * klp_mutex is a coarse lock which serializes access to klp data.  All
  31 * accesses to klp-related variables and structures must have mutex protection,
  32 * except within the following functions which carefully avoid the need for it:
 
 
 
 
  33 *
  34 * - klp_ftrace_handler()
  35 * - klp_update_patch_state()
 
  36 */
  37DEFINE_MUTEX(klp_mutex);
 
 
 
 
  38
  39/*
  40 * Actively used patches: enabled or in transition. Note that replaced
  41 * or disabled patches are not listed even though the related kernel
  42 * module still can be loaded.
 
  43 */
  44LIST_HEAD(klp_patches);
 
 
 
  45
  46static struct kobject *klp_root_kobj;
  47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  48static bool klp_is_module(struct klp_object *obj)
  49{
  50	return obj->name;
  51}
  52
 
 
 
 
 
  53/* sets obj->mod if object is not vmlinux and module is found */
  54static void klp_find_object_module(struct klp_object *obj)
  55{
  56	struct module *mod;
  57
  58	if (!klp_is_module(obj))
  59		return;
  60
  61	rcu_read_lock_sched();
  62	/*
  63	 * We do not want to block removal of patched modules and therefore
  64	 * we do not take a reference here. The patches are removed by
  65	 * klp_module_going() instead.
  66	 */
  67	mod = find_module(obj->name);
  68	/*
  69	 * Do not mess work of klp_module_coming() and klp_module_going().
  70	 * Note that the patch might still be needed before klp_module_going()
  71	 * is called. Module functions can be called even in the GOING state
  72	 * until mod->exit() finishes. This is especially important for
  73	 * patches that modify semantic of the functions.
  74	 */
  75	if (mod && mod->klp_alive)
  76		obj->mod = mod;
  77
  78	rcu_read_unlock_sched();
  79}
  80
  81static bool klp_initialized(void)
 
  82{
  83	return !!klp_root_kobj;
  84}
  85
  86static struct klp_func *klp_find_func(struct klp_object *obj,
  87				      struct klp_func *old_func)
  88{
  89	struct klp_func *func;
  90
  91	klp_for_each_func(obj, func) {
  92		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
  93		    (old_func->old_sympos == func->old_sympos)) {
  94			return func;
  95		}
  96	}
  97
  98	return NULL;
  99}
 100
 101static struct klp_object *klp_find_object(struct klp_patch *patch,
 102					  struct klp_object *old_obj)
 103{
 104	struct klp_object *obj;
 105
 106	klp_for_each_object(patch, obj) {
 107		if (klp_is_module(old_obj)) {
 108			if (klp_is_module(obj) &&
 109			    strcmp(old_obj->name, obj->name) == 0) {
 110				return obj;
 111			}
 112		} else if (!klp_is_module(obj)) {
 113			return obj;
 114		}
 115	}
 116
 117	return NULL;
 118}
 119
 120struct klp_find_arg {
 121	const char *objname;
 122	const char *name;
 123	unsigned long addr;
 124	unsigned long count;
 125	unsigned long pos;
 126};
 127
 128static int klp_find_callback(void *data, const char *name,
 129			     struct module *mod, unsigned long addr)
 130{
 131	struct klp_find_arg *args = data;
 132
 133	if ((mod && !args->objname) || (!mod && args->objname))
 134		return 0;
 135
 136	if (strcmp(args->name, name))
 137		return 0;
 138
 139	if (args->objname && strcmp(args->objname, mod->name))
 140		return 0;
 141
 142	args->addr = addr;
 143	args->count++;
 144
 145	/*
 146	 * Finish the search when the symbol is found for the desired position
 147	 * or the position is not defined for a non-unique symbol.
 148	 */
 149	if ((args->pos && (args->count == args->pos)) ||
 150	    (!args->pos && (args->count > 1)))
 151		return 1;
 152
 153	return 0;
 154}
 155
 156static int klp_find_object_symbol(const char *objname, const char *name,
 157				  unsigned long sympos, unsigned long *addr)
 158{
 159	struct klp_find_arg args = {
 160		.objname = objname,
 161		.name = name,
 162		.addr = 0,
 163		.count = 0,
 164		.pos = sympos,
 165	};
 166
 167	if (objname)
 168		module_kallsyms_on_each_symbol(klp_find_callback, &args);
 169	else
 170		kallsyms_on_each_symbol(klp_find_callback, &args);
 171
 172	/*
 173	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
 174	 * otherwise ensure the symbol position count matches sympos.
 175	 */
 176	if (args.addr == 0)
 177		pr_err("symbol '%s' not found in symbol table\n", name);
 178	else if (args.count > 1 && sympos == 0) {
 179		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
 180		       name, objname);
 181	} else if (sympos != args.count && sympos > 0) {
 182		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
 183		       sympos, name, objname ? objname : "vmlinux");
 184	} else {
 185		*addr = args.addr;
 186		return 0;
 187	}
 188
 189	*addr = 0;
 190	return -EINVAL;
 191}
 192
 193static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
 194			       unsigned int symndx, Elf_Shdr *relasec,
 195			       const char *sec_objname)
 196{
 197	int i, cnt, ret;
 198	char sym_objname[MODULE_NAME_LEN];
 199	char sym_name[KSYM_NAME_LEN];
 200	Elf_Rela *relas;
 201	Elf_Sym *sym;
 202	unsigned long sympos, addr;
 203	bool sym_vmlinux;
 204	bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
 205
 206	/*
 207	 * Since the field widths for sym_objname and sym_name in the sscanf()
 208	 * call are hard-coded and correspond to MODULE_NAME_LEN and
 209	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
 210	 * and KSYM_NAME_LEN have the values we expect them to have.
 211	 *
 212	 * Because the value of MODULE_NAME_LEN can differ among architectures,
 213	 * we use the smallest/strictest upper bound possible (56, based on
 214	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
 215	 */
 216	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
 217
 218	relas = (Elf_Rela *) relasec->sh_addr;
 219	/* For each rela in this klp relocation section */
 220	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
 221		sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
 222		if (sym->st_shndx != SHN_LIVEPATCH) {
 223			pr_err("symbol %s is not marked as a livepatch symbol\n",
 224			       strtab + sym->st_name);
 225			return -EINVAL;
 226		}
 227
 228		/* Format: .klp.sym.sym_objname.sym_name,sympos */
 229		cnt = sscanf(strtab + sym->st_name,
 230			     ".klp.sym.%55[^.].%127[^,],%lu",
 231			     sym_objname, sym_name, &sympos);
 232		if (cnt != 3) {
 233			pr_err("symbol %s has an incorrectly formatted name\n",
 234			       strtab + sym->st_name);
 235			return -EINVAL;
 236		}
 237
 238		sym_vmlinux = !strcmp(sym_objname, "vmlinux");
 239
 240		/*
 241		 * Prevent module-specific KLP rela sections from referencing
 242		 * vmlinux symbols.  This helps prevent ordering issues with
 243		 * module special section initializations.  Presumably such
 244		 * symbols are exported and normal relas can be used instead.
 245		 */
 246		if (!sec_vmlinux && sym_vmlinux) {
 247			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
 248			       sym_name);
 249			return -EINVAL;
 250		}
 251
 252		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
 253		ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
 254					     sym_name, sympos, &addr);
 
 255		if (ret)
 256			return ret;
 257
 258		sym->st_value = addr;
 259	}
 260
 261	return 0;
 262}
 263
 264/*
 265 * At a high-level, there are two types of klp relocation sections: those which
 266 * reference symbols which live in vmlinux; and those which reference symbols
 267 * which live in other modules.  This function is called for both types:
 268 *
 269 * 1) When a klp module itself loads, the module code calls this function to
 270 *    write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
 271 *    These relocations are written to the klp module text to allow the patched
 272 *    code/data to reference unexported vmlinux symbols.  They're written as
 273 *    early as possible to ensure that other module init code (.e.g.,
 274 *    jump_label_apply_nops) can access any unexported vmlinux symbols which
 275 *    might be referenced by the klp module's special sections.
 276 *
 277 * 2) When a to-be-patched module loads -- or is already loaded when a
 278 *    corresponding klp module loads -- klp code calls this function to write
 279 *    module-specific klp relocations (.klp.rela.{module}.* sections).  These
 280 *    are written to the klp module text to allow the patched code/data to
 281 *    reference symbols which live in the to-be-patched module or one of its
 282 *    module dependencies.  Exported symbols are supported, in addition to
 283 *    unexported symbols, in order to enable late module patching, which allows
 284 *    the to-be-patched module to be loaded and patched sometime *after* the
 285 *    klp module is loaded.
 286 */
 287int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
 288			     const char *shstrtab, const char *strtab,
 289			     unsigned int symndx, unsigned int secndx,
 290			     const char *objname)
 291{
 292	int cnt, ret;
 
 293	char sec_objname[MODULE_NAME_LEN];
 294	Elf_Shdr *sec = sechdrs + secndx;
 295
 296	/*
 297	 * Format: .klp.rela.sec_objname.section_name
 298	 * See comment in klp_resolve_symbols() for an explanation
 299	 * of the selected field width value.
 300	 */
 301	cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
 302		     sec_objname);
 303	if (cnt != 1) {
 304		pr_err("section %s has an incorrectly formatted name\n",
 305		       shstrtab + sec->sh_name);
 306		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 307	}
 308
 309	if (strcmp(objname ? objname : "vmlinux", sec_objname))
 310		return 0;
 
 
 
 
 
 
 
 
 311
 312	ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname);
 313	if (ret)
 314		return ret;
 315
 316	return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
 
 
 
 
 
 
 
 
 317}
 318
 319/*
 320 * Sysfs Interface
 321 *
 322 * /sys/kernel/livepatch
 323 * /sys/kernel/livepatch/<patch>
 324 * /sys/kernel/livepatch/<patch>/enabled
 325 * /sys/kernel/livepatch/<patch>/transition
 326 * /sys/kernel/livepatch/<patch>/force
 327 * /sys/kernel/livepatch/<patch>/<object>
 328 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
 329 */
 330static int __klp_disable_patch(struct klp_patch *patch);
 
 
 
 
 
 331
 332static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 333			     const char *buf, size_t count)
 334{
 335	struct klp_patch *patch;
 336	int ret;
 337	bool enabled;
 338
 339	ret = kstrtobool(buf, &enabled);
 340	if (ret)
 341		return ret;
 
 342
 343	patch = container_of(kobj, struct klp_patch, kobj);
 
 
 344
 345	mutex_lock(&klp_mutex);
 
 346
 347	if (patch->enabled == enabled) {
 348		/* already in requested state */
 349		ret = -EINVAL;
 350		goto out;
 351	}
 352
 353	/*
 354	 * Allow to reverse a pending transition in both ways. It might be
 355	 * necessary to complete the transition without forcing and breaking
 356	 * the system integrity.
 357	 *
 358	 * Do not allow to re-enable a disabled patch.
 359	 */
 360	if (patch == klp_transition_patch)
 361		klp_reverse_transition();
 362	else if (!enabled)
 363		ret = __klp_disable_patch(patch);
 364	else
 365		ret = -EINVAL;
 366
 367out:
 368	mutex_unlock(&klp_mutex);
 
 
 
 
 369
 370	if (ret)
 371		return ret;
 372	return count;
 373}
 374
 375static ssize_t enabled_show(struct kobject *kobj,
 376			    struct kobj_attribute *attr, char *buf)
 377{
 378	struct klp_patch *patch;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 379
 380	patch = container_of(kobj, struct klp_patch, kobj);
 381	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
 382}
 383
 384static ssize_t transition_show(struct kobject *kobj,
 385			       struct kobj_attribute *attr, char *buf)
 386{
 387	struct klp_patch *patch;
 388
 389	patch = container_of(kobj, struct klp_patch, kobj);
 390	return snprintf(buf, PAGE_SIZE-1, "%d\n",
 391			patch == klp_transition_patch);
 392}
 393
 394static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
 395			   const char *buf, size_t count)
 396{
 397	struct klp_patch *patch;
 398	int ret;
 399	bool val;
 400
 401	ret = kstrtobool(buf, &val);
 402	if (ret)
 403		return ret;
 
 
 
 404
 405	if (!val)
 406		return count;
 
 
 
 
 
 407
 408	mutex_lock(&klp_mutex);
 409
 410	patch = container_of(kobj, struct klp_patch, kobj);
 411	if (patch != klp_transition_patch) {
 412		mutex_unlock(&klp_mutex);
 413		return -EINVAL;
 414	}
 415
 416	klp_force_transition();
 417
 418	mutex_unlock(&klp_mutex);
 419
 420	return count;
 
 
 
 
 421}
 422
 423static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 424static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
 425static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
 426static struct attribute *klp_patch_attrs[] = {
 427	&enabled_kobj_attr.attr,
 428	&transition_kobj_attr.attr,
 429	&force_kobj_attr.attr,
 430	NULL
 431};
 432ATTRIBUTE_GROUPS(klp_patch);
 433
 434static void klp_free_object_dynamic(struct klp_object *obj)
 435{
 436	kfree(obj->name);
 437	kfree(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 438}
 439
 440static void klp_init_func_early(struct klp_object *obj,
 441				struct klp_func *func);
 442static void klp_init_object_early(struct klp_patch *patch,
 443				  struct klp_object *obj);
 444
 445static struct klp_object *klp_alloc_object_dynamic(const char *name,
 446						   struct klp_patch *patch)
 447{
 448	struct klp_object *obj;
 449
 450	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 451	if (!obj)
 452		return NULL;
 453
 454	if (name) {
 455		obj->name = kstrdup(name, GFP_KERNEL);
 456		if (!obj->name) {
 457			kfree(obj);
 458			return NULL;
 459		}
 460	}
 461
 462	klp_init_object_early(patch, obj);
 463	obj->dynamic = true;
 464
 465	return obj;
 466}
 467
 468static void klp_free_func_nop(struct klp_func *func)
 
 
 
 
 
 
 
 
 469{
 470	kfree(func->old_name);
 471	kfree(func);
 472}
 473
 474static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
 475					   struct klp_object *obj)
 476{
 477	struct klp_func *func;
 478
 479	func = kzalloc(sizeof(*func), GFP_KERNEL);
 480	if (!func)
 481		return NULL;
 482
 483	if (old_func->old_name) {
 484		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
 485		if (!func->old_name) {
 486			kfree(func);
 487			return NULL;
 488		}
 489	}
 490
 491	klp_init_func_early(obj, func);
 492	/*
 493	 * func->new_func is same as func->old_func. These addresses are
 494	 * set when the object is loaded, see klp_init_object_loaded().
 495	 */
 496	func->old_sympos = old_func->old_sympos;
 497	func->nop = true;
 498
 499	return func;
 
 
 500}
 
 501
 502static int klp_add_object_nops(struct klp_patch *patch,
 503			       struct klp_object *old_obj)
 504{
 505	struct klp_object *obj;
 506	struct klp_func *func, *old_func;
 
 
 
 507
 508	obj = klp_find_object(patch, old_obj);
 
 
 
 509
 510	if (!obj) {
 511		obj = klp_alloc_object_dynamic(old_obj->name, patch);
 512		if (!obj)
 513			return -ENOMEM;
 514	}
 515
 516	klp_for_each_func(old_obj, old_func) {
 517		func = klp_find_func(obj, old_func);
 518		if (func)
 519			continue;
 520
 521		func = klp_alloc_func_nop(old_func, obj);
 522		if (!func)
 523			return -ENOMEM;
 524	}
 525
 
 
 526	return 0;
 
 
 
 
 527}
 528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529/*
 530 * Add 'nop' functions which simply return to the caller to run
 531 * the original function. The 'nop' functions are added to a
 532 * patch to facilitate a 'replace' mode.
 
 
 
 
 533 */
 534static int klp_add_nops(struct klp_patch *patch)
 
 
 535{
 536	struct klp_patch *old_patch;
 537	struct klp_object *old_obj;
 
 538
 539	klp_for_each_patch(old_patch) {
 540		klp_for_each_object(old_patch, old_obj) {
 541			int err;
 542
 543			err = klp_add_object_nops(patch, old_obj);
 544			if (err)
 545				return err;
 546		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 547	}
 548
 549	return 0;
 
 
 
 
 
 
 550}
 551
 552static void klp_kobj_release_patch(struct kobject *kobj)
 
 553{
 554	struct klp_patch *patch;
 555
 556	patch = container_of(kobj, struct klp_patch, kobj);
 557	complete(&patch->finish);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 558}
 559
 560static struct kobj_type klp_ktype_patch = {
 561	.release = klp_kobj_release_patch,
 562	.sysfs_ops = &kobj_sysfs_ops,
 563	.default_groups = klp_patch_groups,
 564};
 565
 566static void klp_kobj_release_object(struct kobject *kobj)
 567{
 568	struct klp_object *obj;
 569
 570	obj = container_of(kobj, struct klp_object, kobj);
 571
 572	if (obj->dynamic)
 573		klp_free_object_dynamic(obj);
 574}
 575
 576static struct kobj_type klp_ktype_object = {
 577	.release = klp_kobj_release_object,
 578	.sysfs_ops = &kobj_sysfs_ops,
 579};
 580
 581static void klp_kobj_release_func(struct kobject *kobj)
 582{
 583	struct klp_func *func;
 584
 585	func = container_of(kobj, struct klp_func, kobj);
 586
 587	if (func->nop)
 588		klp_free_func_nop(func);
 589}
 590
 591static struct kobj_type klp_ktype_func = {
 592	.release = klp_kobj_release_func,
 593	.sysfs_ops = &kobj_sysfs_ops,
 594};
 595
 596static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
 
 
 
 
 
 597{
 598	struct klp_func *func, *tmp_func;
 599
 600	klp_for_each_func_safe(obj, func, tmp_func) {
 601		if (nops_only && !func->nop)
 602			continue;
 603
 604		list_del(&func->node);
 605		kobject_put(&func->kobj);
 606	}
 607}
 608
 609/* Clean up when a patched object is unloaded */
 610static void klp_free_object_loaded(struct klp_object *obj)
 611{
 612	struct klp_func *func;
 613
 614	obj->mod = NULL;
 615
 616	klp_for_each_func(obj, func) {
 617		func->old_func = NULL;
 618
 619		if (func->nop)
 620			func->new_func = NULL;
 621	}
 622}
 623
 624static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
 
 
 
 
 
 625{
 626	struct klp_object *obj, *tmp_obj;
 627
 628	klp_for_each_object_safe(patch, obj, tmp_obj) {
 629		__klp_free_funcs(obj, nops_only);
 630
 631		if (nops_only && !obj->dynamic)
 632			continue;
 633
 634		list_del(&obj->node);
 
 635		kobject_put(&obj->kobj);
 636	}
 637}
 638
 639static void klp_free_objects(struct klp_patch *patch)
 640{
 641	__klp_free_objects(patch, false);
 642}
 643
 644static void klp_free_objects_dynamic(struct klp_patch *patch)
 645{
 646	__klp_free_objects(patch, true);
 647}
 648
 649/*
 650 * This function implements the free operations that can be called safely
 651 * under klp_mutex.
 652 *
 653 * The operation must be completed by calling klp_free_patch_finish()
 654 * outside klp_mutex.
 655 */
 656static void klp_free_patch_start(struct klp_patch *patch)
 657{
 
 658	if (!list_empty(&patch->list))
 659		list_del(&patch->list);
 660
 661	klp_free_objects(patch);
 662}
 663
 664/*
 665 * This function implements the free part that must be called outside
 666 * klp_mutex.
 667 *
 668 * It must be called after klp_free_patch_start(). And it has to be
 669 * the last function accessing the livepatch structures when the patch
 670 * gets disabled.
 671 */
 672static void klp_free_patch_finish(struct klp_patch *patch)
 673{
 674	/*
 675	 * Avoid deadlock with enabled_store() sysfs callback by
 676	 * calling this outside klp_mutex. It is safe because
 677	 * this is called when the patch gets disabled and it
 678	 * cannot get enabled again.
 679	 */
 680	kobject_put(&patch->kobj);
 681	wait_for_completion(&patch->finish);
 682
 683	/* Put the module after the last access to struct klp_patch. */
 684	if (!patch->forced)
 685		module_put(patch->mod);
 686}
 687
 688/*
 689 * The livepatch might be freed from sysfs interface created by the patch.
 690 * This work allows to wait until the interface is destroyed in a separate
 691 * context.
 692 */
 693static void klp_free_patch_work_fn(struct work_struct *work)
 694{
 695	struct klp_patch *patch =
 696		container_of(work, struct klp_patch, free_work);
 697
 698	klp_free_patch_finish(patch);
 699}
 700
 701void klp_free_patch_async(struct klp_patch *patch)
 702{
 703	klp_free_patch_start(patch);
 704	schedule_work(&patch->free_work);
 705}
 706
 707void klp_free_replaced_patches_async(struct klp_patch *new_patch)
 708{
 709	struct klp_patch *old_patch, *tmp_patch;
 710
 711	klp_for_each_patch_safe(old_patch, tmp_patch) {
 712		if (old_patch == new_patch)
 713			return;
 714		klp_free_patch_async(old_patch);
 715	}
 716}
 717
 718static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 719{
 720	if (!func->old_name)
 721		return -EINVAL;
 722
 723	/*
 724	 * NOPs get the address later. The patched module must be loaded,
 725	 * see klp_init_object_loaded().
 726	 */
 727	if (!func->new_func && !func->nop)
 728		return -EINVAL;
 729
 730	if (strlen(func->old_name) >= KSYM_NAME_LEN)
 731		return -EINVAL;
 732
 733	INIT_LIST_HEAD(&func->stack_node);
 734	func->patched = false;
 735	func->transition = false;
 736
 737	/* The format for the sysfs directory is <function,sympos> where sympos
 738	 * is the nth occurrence of this symbol in kallsyms for the patched
 739	 * object. If the user selects 0 for old_sympos, then 1 will be used
 740	 * since a unique symbol will be the first occurrence.
 741	 */
 742	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
 743			   func->old_name,
 744			   func->old_sympos ? func->old_sympos : 1);
 745}
 746
 747static int klp_apply_object_relocs(struct klp_patch *patch,
 748				   struct klp_object *obj)
 
 749{
 750	int i, ret;
 751	struct klp_modinfo *info = patch->mod->klp_info;
 752
 753	for (i = 1; i < info->hdr.e_shnum; i++) {
 754		Elf_Shdr *sec = info->sechdrs + i;
 755
 756		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
 757			continue;
 758
 759		ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
 760					       info->secstrings,
 761					       patch->mod->core_kallsyms.strtab,
 762					       info->symndx, i, obj->name);
 763		if (ret)
 764			return ret;
 765	}
 766
 767	return 0;
 768}
 769
 770/* parts of the initialization that is done only when the object is loaded */
 771static int klp_init_object_loaded(struct klp_patch *patch,
 772				  struct klp_object *obj)
 773{
 774	struct klp_func *func;
 775	int ret;
 776
 777	if (klp_is_module(obj)) {
 778		/*
 779		 * Only write module-specific relocations here
 780		 * (.klp.rela.{module}.*).  vmlinux-specific relocations were
 781		 * written earlier during the initialization of the klp module
 782		 * itself.
 783		 */
 784		ret = klp_apply_object_relocs(patch, obj);
 785		if (ret)
 786			return ret;
 787	}
 788
 
 
 
 789	klp_for_each_func(obj, func) {
 790		ret = klp_find_object_symbol(obj->name, func->old_name,
 791					     func->old_sympos,
 792					     (unsigned long *)&func->old_func);
 793		if (ret)
 794			return ret;
 795
 796		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
 797						  &func->old_size, NULL);
 798		if (!ret) {
 799			pr_err("kallsyms size lookup failed for '%s'\n",
 800			       func->old_name);
 801			return -ENOENT;
 802		}
 803
 804		if (func->nop)
 805			func->new_func = func->old_func;
 806
 807		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
 808						  &func->new_size, NULL);
 809		if (!ret) {
 810			pr_err("kallsyms size lookup failed for '%s' replacement\n",
 811			       func->old_name);
 812			return -ENOENT;
 813		}
 814	}
 815
 816	return 0;
 817}
 818
 819static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 820{
 821	struct klp_func *func;
 822	int ret;
 823	const char *name;
 824
 825	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
 826		return -EINVAL;
 827
 828	obj->patched = false;
 829	obj->mod = NULL;
 830
 831	klp_find_object_module(obj);
 832
 833	name = klp_is_module(obj) ? obj->name : "vmlinux";
 834	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
 
 835	if (ret)
 836		return ret;
 837
 838	klp_for_each_func(obj, func) {
 839		ret = klp_init_func(obj, func);
 840		if (ret)
 841			return ret;
 842	}
 843
 844	if (klp_is_object_loaded(obj))
 845		ret = klp_init_object_loaded(patch, obj);
 
 
 
 846
 
 
 
 
 
 847	return ret;
 848}
 849
 850static void klp_init_func_early(struct klp_object *obj,
 851				struct klp_func *func)
 852{
 853	kobject_init(&func->kobj, &klp_ktype_func);
 854	list_add_tail(&func->node, &obj->func_list);
 855}
 856
 857static void klp_init_object_early(struct klp_patch *patch,
 858				  struct klp_object *obj)
 859{
 860	INIT_LIST_HEAD(&obj->func_list);
 861	kobject_init(&obj->kobj, &klp_ktype_object);
 862	list_add_tail(&obj->node, &patch->obj_list);
 863}
 864
 865static int klp_init_patch_early(struct klp_patch *patch)
 866{
 867	struct klp_object *obj;
 868	struct klp_func *func;
 869
 870	if (!patch->objs)
 871		return -EINVAL;
 872
 873	INIT_LIST_HEAD(&patch->list);
 874	INIT_LIST_HEAD(&patch->obj_list);
 875	kobject_init(&patch->kobj, &klp_ktype_patch);
 876	patch->enabled = false;
 877	patch->forced = false;
 878	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
 879	init_completion(&patch->finish);
 880
 881	klp_for_each_object_static(patch, obj) {
 882		if (!obj->funcs)
 883			return -EINVAL;
 884
 885		klp_init_object_early(patch, obj);
 886
 887		klp_for_each_func_static(obj, func) {
 888			klp_init_func_early(obj, func);
 889		}
 890	}
 891
 892	if (!try_module_get(patch->mod))
 893		return -ENODEV;
 894
 895	return 0;
 896}
 897
 898static int klp_init_patch(struct klp_patch *patch)
 899{
 900	struct klp_object *obj;
 901	int ret;
 902
 903	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
 904	if (ret)
 905		return ret;
 906
 907	if (patch->replace) {
 908		ret = klp_add_nops(patch);
 909		if (ret)
 910			return ret;
 911	}
 912
 913	klp_for_each_object(patch, obj) {
 914		ret = klp_init_object(patch, obj);
 915		if (ret)
 916			return ret;
 917	}
 918
 919	list_add_tail(&patch->list, &klp_patches);
 920
 
 
 921	return 0;
 922}
 923
 924static int __klp_disable_patch(struct klp_patch *patch)
 925{
 926	struct klp_object *obj;
 927
 928	if (WARN_ON(!patch->enabled))
 929		return -EINVAL;
 930
 931	if (klp_transition_patch)
 932		return -EBUSY;
 933
 934	klp_init_transition(patch, KLP_UNPATCHED);
 935
 936	klp_for_each_object(patch, obj)
 937		if (obj->patched)
 938			klp_pre_unpatch_callback(obj);
 939
 940	/*
 941	 * Enforce the order of the func->transition writes in
 942	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
 943	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
 944	 * is called shortly after klp_update_patch_state() switches the task,
 945	 * this ensures the handler sees that func->transition is set.
 946	 */
 947	smp_wmb();
 948
 949	klp_start_transition();
 950	patch->enabled = false;
 951	klp_try_complete_transition();
 952
 953	return 0;
 954}
 955
 956static int __klp_enable_patch(struct klp_patch *patch)
 
 
 
 
 
 
 
 
 957{
 958	struct klp_object *obj;
 959	int ret;
 960
 961	if (klp_transition_patch)
 962		return -EBUSY;
 963
 964	if (WARN_ON(patch->enabled))
 965		return -EINVAL;
 
 
 966
 967	pr_notice("enabling patch '%s'\n", patch->mod->name);
 968
 969	klp_init_transition(patch, KLP_PATCHED);
 970
 971	/*
 972	 * Enforce the order of the func->transition writes in
 973	 * klp_init_transition() and the ops->func_stack writes in
 974	 * klp_patch_object(), so that klp_ftrace_handler() will see the
 975	 * func->transition updates before the handler is registered and the
 976	 * new funcs become visible to the handler.
 977	 */
 978	smp_wmb();
 979
 980	klp_for_each_object(patch, obj) {
 981		if (!klp_is_object_loaded(obj))
 982			continue;
 983
 984		ret = klp_pre_patch_callback(obj);
 985		if (ret) {
 986			pr_warn("pre-patch callback failed for object '%s'\n",
 987				klp_is_module(obj) ? obj->name : "vmlinux");
 988			goto err;
 989		}
 990
 991		ret = klp_patch_object(obj);
 992		if (ret) {
 993			pr_warn("failed to patch object '%s'\n",
 994				klp_is_module(obj) ? obj->name : "vmlinux");
 995			goto err;
 996		}
 997	}
 998
 999	klp_start_transition();
1000	patch->enabled = true;
1001	klp_try_complete_transition();
1002
1003	return 0;
1004err:
1005	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
1006
1007	klp_cancel_transition();
1008	return ret;
1009}
 
1010
1011/**
1012 * klp_enable_patch() - enable the livepatch
1013 * @patch:	patch to be enabled
1014 *
1015 * Initializes the data structure associated with the patch, creates the sysfs
1016 * interface, performs the needed symbol lookups and code relocations,
1017 * registers the patched functions with ftrace.
1018 *
1019 * This function is supposed to be called from the livepatch module_init()
1020 * callback.
1021 *
1022 * Return: 0 on success, otherwise error
1023 */
1024int klp_enable_patch(struct klp_patch *patch)
1025{
1026	int ret;
1027
1028	if (!patch || !patch->mod)
1029		return -EINVAL;
1030
1031	if (!is_livepatch_module(patch->mod)) {
1032		pr_err("module %s is not marked as a livepatch module\n",
1033		       patch->mod->name);
1034		return -EINVAL;
1035	}
1036
1037	if (!klp_initialized())
1038		return -ENODEV;
1039
1040	if (!klp_have_reliable_stack()) {
1041		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1042		pr_warn("The livepatch transition may never complete.\n");
1043	}
1044
1045	mutex_lock(&klp_mutex);
1046
1047	if (!klp_is_patch_compatible(patch)) {
1048		pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1049			patch->mod->name);
1050		mutex_unlock(&klp_mutex);
1051		return -EINVAL;
1052	}
1053
1054	ret = klp_init_patch_early(patch);
1055	if (ret) {
1056		mutex_unlock(&klp_mutex);
1057		return ret;
1058	}
1059
1060	ret = klp_init_patch(patch);
1061	if (ret)
1062		goto err;
1063
1064	ret = __klp_enable_patch(patch);
1065	if (ret)
1066		goto err;
1067
1068	mutex_unlock(&klp_mutex);
1069
1070	return 0;
1071
1072err:
1073	klp_free_patch_start(patch);
1074
1075	mutex_unlock(&klp_mutex);
1076
1077	klp_free_patch_finish(patch);
1078
1079	return ret;
1080}
1081EXPORT_SYMBOL_GPL(klp_enable_patch);
1082
1083/*
1084 * This function unpatches objects from the replaced livepatches.
1085 *
1086 * We could be pretty aggressive here. It is called in the situation where
1087 * these structures are no longer accessed from the ftrace handler.
1088 * All functions are redirected by the klp_transition_patch. They
1089 * use either a new code or they are in the original code because
1090 * of the special nop function patches.
1091 *
1092 * The only exception is when the transition was forced. In this case,
1093 * klp_ftrace_handler() might still see the replaced patch on the stack.
1094 * Fortunately, it is carefully designed to work with removed functions
1095 * thanks to RCU. We only have to keep the patches on the system. Also
1096 * this is handled transparently by patch->module_put.
1097 */
1098void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
1099{
1100	struct klp_patch *old_patch;
1101
1102	klp_for_each_patch(old_patch) {
1103		if (old_patch == new_patch)
1104			return;
1105
1106		old_patch->enabled = false;
1107		klp_unpatch_objects(old_patch);
1108	}
1109}
1110
1111/*
1112 * This function removes the dynamically allocated 'nop' functions.
1113 *
1114 * We could be pretty aggressive. NOPs do not change the existing
1115 * behavior except for adding unnecessary delay by the ftrace handler.
1116 *
1117 * It is safe even when the transition was forced. The ftrace handler
1118 * will see a valid ops->func_stack entry thanks to RCU.
1119 *
1120 * We could even free the NOPs structures. They must be the last entry
1121 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1122 * It does the same as klp_synchronize_transition() to make sure that
1123 * nobody is inside the ftrace handler once the operation finishes.
1124 *
1125 * IMPORTANT: It must be called right after removing the replaced patches!
1126 */
1127void klp_discard_nops(struct klp_patch *new_patch)
1128{
1129	klp_unpatch_objects_dynamic(klp_transition_patch);
1130	klp_free_objects_dynamic(klp_transition_patch);
1131}
1132
1133/*
1134 * Remove parts of patches that touch a given kernel module. The list of
1135 * patches processed might be limited. When limit is NULL, all patches
1136 * will be handled.
1137 */
1138static void klp_cleanup_module_patches_limited(struct module *mod,
1139					       struct klp_patch *limit)
1140{
1141	struct klp_patch *patch;
1142	struct klp_object *obj;
1143
1144	klp_for_each_patch(patch) {
1145		if (patch == limit)
1146			break;
1147
1148		klp_for_each_object(patch, obj) {
1149			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1150				continue;
1151
1152			if (patch != klp_transition_patch)
1153				klp_pre_unpatch_callback(obj);
1154
1155			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1156				  patch->mod->name, obj->mod->name);
1157			klp_unpatch_object(obj);
1158
1159			klp_post_unpatch_callback(obj);
1160
1161			klp_free_object_loaded(obj);
1162			break;
1163		}
1164	}
1165}
1166
1167int klp_module_coming(struct module *mod)
1168{
1169	int ret;
1170	struct klp_patch *patch;
1171	struct klp_object *obj;
1172
1173	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1174		return -EINVAL;
1175
1176	if (!strcmp(mod->name, "vmlinux")) {
1177		pr_err("vmlinux.ko: invalid module name");
1178		return -EINVAL;
1179	}
1180
1181	mutex_lock(&klp_mutex);
1182	/*
1183	 * Each module has to know that klp_module_coming()
1184	 * has been called. We never know what module will
1185	 * get patched by a new patch.
1186	 */
1187	mod->klp_alive = true;
1188
1189	klp_for_each_patch(patch) {
1190		klp_for_each_object(patch, obj) {
1191			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1192				continue;
1193
1194			obj->mod = mod;
1195
1196			ret = klp_init_object_loaded(patch, obj);
1197			if (ret) {
1198				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1199					patch->mod->name, obj->mod->name, ret);
1200				goto err;
1201			}
1202
 
 
 
1203			pr_notice("applying patch '%s' to loading module '%s'\n",
1204				  patch->mod->name, obj->mod->name);
1205
1206			ret = klp_pre_patch_callback(obj);
1207			if (ret) {
1208				pr_warn("pre-patch callback failed for object '%s'\n",
1209					obj->name);
1210				goto err;
1211			}
1212
1213			ret = klp_patch_object(obj);
1214			if (ret) {
1215				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1216					patch->mod->name, obj->mod->name, ret);
1217
1218				klp_post_unpatch_callback(obj);
1219				goto err;
1220			}
1221
1222			if (patch != klp_transition_patch)
1223				klp_post_patch_callback(obj);
1224
1225			break;
1226		}
1227	}
1228
1229	mutex_unlock(&klp_mutex);
1230
1231	return 0;
1232
1233err:
1234	/*
1235	 * If a patch is unsuccessfully applied, return
1236	 * error to the module loader.
1237	 */
1238	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1239		patch->mod->name, obj->mod->name, obj->mod->name);
1240	mod->klp_alive = false;
1241	obj->mod = NULL;
1242	klp_cleanup_module_patches_limited(mod, patch);
1243	mutex_unlock(&klp_mutex);
1244
1245	return ret;
1246}
1247
1248void klp_module_going(struct module *mod)
1249{
 
 
 
1250	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1251		    mod->state != MODULE_STATE_COMING))
1252		return;
1253
1254	mutex_lock(&klp_mutex);
1255	/*
1256	 * Each module has to know that klp_module_going()
1257	 * has been called. We never know what module will
1258	 * get patched by a new patch.
1259	 */
1260	mod->klp_alive = false;
1261
1262	klp_cleanup_module_patches_limited(mod, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1263
1264	mutex_unlock(&klp_mutex);
1265}
1266
1267static int __init klp_init(void)
1268{
 
 
 
 
 
 
 
 
1269	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1270	if (!klp_root_kobj)
1271		return -ENOMEM;
1272
1273	return 0;
1274}
1275
1276module_init(klp_init);