Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * core.c - Kernel Live Patching Core
   4 *
   5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   6 * Copyright (C) 2014 SUSE
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/mutex.h>
  14#include <linux/slab.h>
 
  15#include <linux/list.h>
  16#include <linux/kallsyms.h>
  17#include <linux/livepatch.h>
  18#include <linux/elf.h>
  19#include <linux/moduleloader.h>
  20#include <linux/completion.h>
  21#include <linux/memory.h>
  22#include <linux/rcupdate.h>
  23#include <asm/cacheflush.h>
  24#include "core.h"
  25#include "patch.h"
  26#include "state.h"
  27#include "transition.h"
  28
  29/*
  30 * klp_mutex is a coarse lock which serializes access to klp data.  All
  31 * accesses to klp-related variables and structures must have mutex protection,
  32 * except within the following functions which carefully avoid the need for it:
  33 *
  34 * - klp_ftrace_handler()
  35 * - klp_update_patch_state()
 
 
 
 
 
 
 
  36 */
  37DEFINE_MUTEX(klp_mutex);
 
 
 
 
  38
  39/*
  40 * Actively used patches: enabled or in transition. Note that replaced
  41 * or disabled patches are not listed even though the related kernel
  42 * module still can be loaded.
 
  43 */
  44LIST_HEAD(klp_patches);
 
 
 
  45
  46static struct kobject *klp_root_kobj;
  47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  48static bool klp_is_module(struct klp_object *obj)
  49{
  50	return obj->name;
  51}
  52
 
 
 
 
 
  53/* sets obj->mod if object is not vmlinux and module is found */
  54static void klp_find_object_module(struct klp_object *obj)
  55{
  56	struct module *mod;
  57
  58	if (!klp_is_module(obj))
  59		return;
  60
  61	rcu_read_lock_sched();
  62	/*
  63	 * We do not want to block removal of patched modules and therefore
  64	 * we do not take a reference here. The patches are removed by
  65	 * klp_module_going() instead.
  66	 */
  67	mod = find_module(obj->name);
  68	/*
  69	 * Do not mess work of klp_module_coming() and klp_module_going().
  70	 * Note that the patch might still be needed before klp_module_going()
  71	 * is called. Module functions can be called even in the GOING state
  72	 * until mod->exit() finishes. This is especially important for
  73	 * patches that modify semantic of the functions.
  74	 */
  75	if (mod && mod->klp_alive)
  76		obj->mod = mod;
  77
  78	rcu_read_unlock_sched();
  79}
  80
  81static bool klp_initialized(void)
  82{
  83	return !!klp_root_kobj;
  84}
  85
  86static struct klp_func *klp_find_func(struct klp_object *obj,
  87				      struct klp_func *old_func)
  88{
  89	struct klp_func *func;
  90
  91	klp_for_each_func(obj, func) {
  92		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
  93		    (old_func->old_sympos == func->old_sympos)) {
  94			return func;
  95		}
  96	}
  97
  98	return NULL;
  99}
 100
 101static struct klp_object *klp_find_object(struct klp_patch *patch,
 102					  struct klp_object *old_obj)
 103{
 104	struct klp_object *obj;
 105
 106	klp_for_each_object(patch, obj) {
 107		if (klp_is_module(old_obj)) {
 108			if (klp_is_module(obj) &&
 109			    strcmp(old_obj->name, obj->name) == 0) {
 110				return obj;
 111			}
 112		} else if (!klp_is_module(obj)) {
 113			return obj;
 114		}
 115	}
 116
 117	return NULL;
 118}
 119
 120struct klp_find_arg {
 121	const char *objname;
 122	const char *name;
 123	unsigned long addr;
 124	unsigned long count;
 125	unsigned long pos;
 126};
 127
 128static int klp_match_callback(void *data, unsigned long addr)
 
 129{
 130	struct klp_find_arg *args = data;
 131
 
 
 
 
 
 
 
 
 
 132	args->addr = addr;
 133	args->count++;
 134
 135	/*
 136	 * Finish the search when the symbol is found for the desired position
 137	 * or the position is not defined for a non-unique symbol.
 138	 */
 139	if ((args->pos && (args->count == args->pos)) ||
 140	    (!args->pos && (args->count > 1)))
 141		return 1;
 142
 143	return 0;
 144}
 145
 146static int klp_find_callback(void *data, const char *name,
 147			     struct module *mod, unsigned long addr)
 148{
 149	struct klp_find_arg *args = data;
 150
 151	if ((mod && !args->objname) || (!mod && args->objname))
 152		return 0;
 153
 154	if (strcmp(args->name, name))
 155		return 0;
 156
 157	if (args->objname && strcmp(args->objname, mod->name))
 158		return 0;
 159
 160	return klp_match_callback(data, addr);
 161}
 162
 163static int klp_find_object_symbol(const char *objname, const char *name,
 164				  unsigned long sympos, unsigned long *addr)
 165{
 166	struct klp_find_arg args = {
 167		.objname = objname,
 168		.name = name,
 169		.addr = 0,
 170		.count = 0,
 171		.pos = sympos,
 172	};
 173
 174	if (objname)
 175		module_kallsyms_on_each_symbol(klp_find_callback, &args);
 176	else
 177		kallsyms_on_each_match_symbol(klp_match_callback, name, &args);
 178
 179	/*
 180	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
 181	 * otherwise ensure the symbol position count matches sympos.
 182	 */
 183	if (args.addr == 0)
 184		pr_err("symbol '%s' not found in symbol table\n", name);
 185	else if (args.count > 1 && sympos == 0) {
 186		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
 187		       name, objname);
 188	} else if (sympos != args.count && sympos > 0) {
 189		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
 190		       sympos, name, objname ? objname : "vmlinux");
 191	} else {
 192		*addr = args.addr;
 193		return 0;
 194	}
 195
 196	*addr = 0;
 197	return -EINVAL;
 198}
 199
 200static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
 201			       unsigned int symndx, Elf_Shdr *relasec,
 202			       const char *sec_objname)
 203{
 204	int i, cnt, ret;
 205	char sym_objname[MODULE_NAME_LEN];
 206	char sym_name[KSYM_NAME_LEN];
 207	Elf_Rela *relas;
 208	Elf_Sym *sym;
 209	unsigned long sympos, addr;
 210	bool sym_vmlinux;
 211	bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
 212
 213	/*
 214	 * Since the field widths for sym_objname and sym_name in the sscanf()
 215	 * call are hard-coded and correspond to MODULE_NAME_LEN and
 216	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
 217	 * and KSYM_NAME_LEN have the values we expect them to have.
 218	 *
 219	 * Because the value of MODULE_NAME_LEN can differ among architectures,
 220	 * we use the smallest/strictest upper bound possible (56, based on
 221	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
 222	 */
 223	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512);
 224
 225	relas = (Elf_Rela *) relasec->sh_addr;
 226	/* For each rela in this klp relocation section */
 227	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
 228		sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
 229		if (sym->st_shndx != SHN_LIVEPATCH) {
 230			pr_err("symbol %s is not marked as a livepatch symbol\n",
 231			       strtab + sym->st_name);
 232			return -EINVAL;
 233		}
 234
 235		/* Format: .klp.sym.sym_objname.sym_name,sympos */
 236		cnt = sscanf(strtab + sym->st_name,
 237			     ".klp.sym.%55[^.].%511[^,],%lu",
 238			     sym_objname, sym_name, &sympos);
 239		if (cnt != 3) {
 240			pr_err("symbol %s has an incorrectly formatted name\n",
 241			       strtab + sym->st_name);
 242			return -EINVAL;
 243		}
 244
 245		sym_vmlinux = !strcmp(sym_objname, "vmlinux");
 246
 247		/*
 248		 * Prevent module-specific KLP rela sections from referencing
 249		 * vmlinux symbols.  This helps prevent ordering issues with
 250		 * module special section initializations.  Presumably such
 251		 * symbols are exported and normal relas can be used instead.
 252		 */
 253		if (!sec_vmlinux && sym_vmlinux) {
 254			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
 255			       sym_name);
 256			return -EINVAL;
 257		}
 258
 259		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
 260		ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
 261					     sym_name, sympos, &addr);
 
 262		if (ret)
 263			return ret;
 264
 265		sym->st_value = addr;
 266	}
 267
 268	return 0;
 269}
 270
 271/*
 272 * At a high-level, there are two types of klp relocation sections: those which
 273 * reference symbols which live in vmlinux; and those which reference symbols
 274 * which live in other modules.  This function is called for both types:
 275 *
 276 * 1) When a klp module itself loads, the module code calls this function to
 277 *    write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
 278 *    These relocations are written to the klp module text to allow the patched
 279 *    code/data to reference unexported vmlinux symbols.  They're written as
 280 *    early as possible to ensure that other module init code (.e.g.,
 281 *    jump_label_apply_nops) can access any unexported vmlinux symbols which
 282 *    might be referenced by the klp module's special sections.
 283 *
 284 * 2) When a to-be-patched module loads -- or is already loaded when a
 285 *    corresponding klp module loads -- klp code calls this function to write
 286 *    module-specific klp relocations (.klp.rela.{module}.* sections).  These
 287 *    are written to the klp module text to allow the patched code/data to
 288 *    reference symbols which live in the to-be-patched module or one of its
 289 *    module dependencies.  Exported symbols are supported, in addition to
 290 *    unexported symbols, in order to enable late module patching, which allows
 291 *    the to-be-patched module to be loaded and patched sometime *after* the
 292 *    klp module is loaded.
 293 */
 294int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
 295			     const char *shstrtab, const char *strtab,
 296			     unsigned int symndx, unsigned int secndx,
 297			     const char *objname)
 298{
 299	int cnt, ret;
 
 300	char sec_objname[MODULE_NAME_LEN];
 301	Elf_Shdr *sec = sechdrs + secndx;
 302
 303	/*
 304	 * Format: .klp.rela.sec_objname.section_name
 305	 * See comment in klp_resolve_symbols() for an explanation
 306	 * of the selected field width value.
 307	 */
 308	cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
 309		     sec_objname);
 310	if (cnt != 1) {
 311		pr_err("section %s has an incorrectly formatted name\n",
 312		       shstrtab + sec->sh_name);
 313		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314	}
 315
 316	if (strcmp(objname ? objname : "vmlinux", sec_objname))
 317		return 0;
 318
 319	ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname);
 320	if (ret)
 321		return ret;
 
 
 
 
 322
 323	return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
 
 
 
 
 
 
 
 
 
 
 324}
 325
 326/*
 327 * Sysfs Interface
 328 *
 329 * /sys/kernel/livepatch
 330 * /sys/kernel/livepatch/<patch>
 331 * /sys/kernel/livepatch/<patch>/enabled
 332 * /sys/kernel/livepatch/<patch>/transition
 333 * /sys/kernel/livepatch/<patch>/force
 334 * /sys/kernel/livepatch/<patch>/<object>
 335 * /sys/kernel/livepatch/<patch>/<object>/patched
 336 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
 337 */
 338static int __klp_disable_patch(struct klp_patch *patch);
 
 
 
 
 
 339
 340static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 341			     const char *buf, size_t count)
 342{
 343	struct klp_patch *patch;
 344	int ret;
 345	bool enabled;
 346
 347	ret = kstrtobool(buf, &enabled);
 348	if (ret)
 349		return ret;
 
 350
 351	patch = container_of(kobj, struct klp_patch, kobj);
 
 
 352
 353	mutex_lock(&klp_mutex);
 
 354
 355	if (patch->enabled == enabled) {
 356		/* already in requested state */
 357		ret = -EINVAL;
 358		goto out;
 359	}
 360
 361	/*
 362	 * Allow to reverse a pending transition in both ways. It might be
 363	 * necessary to complete the transition without forcing and breaking
 364	 * the system integrity.
 365	 *
 366	 * Do not allow to re-enable a disabled patch.
 367	 */
 368	if (patch == klp_transition_patch)
 369		klp_reverse_transition();
 370	else if (!enabled)
 371		ret = __klp_disable_patch(patch);
 372	else
 373		ret = -EINVAL;
 374
 375out:
 376	mutex_unlock(&klp_mutex);
 
 
 
 
 377
 378	if (ret)
 379		return ret;
 380	return count;
 381}
 382
 383static ssize_t enabled_show(struct kobject *kobj,
 384			    struct kobj_attribute *attr, char *buf)
 385{
 386	struct klp_patch *patch;
 
 387
 388	patch = container_of(kobj, struct klp_patch, kobj);
 389	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
 390}
 391
 392static ssize_t transition_show(struct kobject *kobj,
 393			       struct kobj_attribute *attr, char *buf)
 394{
 395	struct klp_patch *patch;
 396
 397	patch = container_of(kobj, struct klp_patch, kobj);
 398	return snprintf(buf, PAGE_SIZE-1, "%d\n",
 399			patch == klp_transition_patch);
 400}
 
 
 
 
 
 
 401
 402static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
 403			   const char *buf, size_t count)
 404{
 405	struct klp_patch *patch;
 406	int ret;
 407	bool val;
 408
 409	ret = kstrtobool(buf, &val);
 410	if (ret)
 411		return ret;
 
 412
 413	if (!val)
 414		return count;
 415
 416	mutex_lock(&klp_mutex);
 
 417
 418	patch = container_of(kobj, struct klp_patch, kobj);
 419	if (patch != klp_transition_patch) {
 420		mutex_unlock(&klp_mutex);
 421		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422	}
 423
 424	klp_force_transition();
 425
 426	mutex_unlock(&klp_mutex);
 427
 428	return count;
 
 
 
 
 429}
 430
 431static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 432static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
 433static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
 434static struct attribute *klp_patch_attrs[] = {
 435	&enabled_kobj_attr.attr,
 436	&transition_kobj_attr.attr,
 437	&force_kobj_attr.attr,
 438	NULL
 439};
 440ATTRIBUTE_GROUPS(klp_patch);
 441
 442static ssize_t patched_show(struct kobject *kobj,
 443			    struct kobj_attribute *attr, char *buf)
 444{
 445	struct klp_object *obj;
 446
 447	obj = container_of(kobj, struct klp_object, kobj);
 448	return sysfs_emit(buf, "%d\n", obj->patched);
 449}
 450
 451static struct kobj_attribute patched_kobj_attr = __ATTR_RO(patched);
 452static struct attribute *klp_object_attrs[] = {
 453	&patched_kobj_attr.attr,
 454	NULL,
 455};
 456ATTRIBUTE_GROUPS(klp_object);
 457
 458static void klp_free_object_dynamic(struct klp_object *obj)
 459{
 460	kfree(obj->name);
 461	kfree(obj);
 462}
 463
 464static void klp_init_func_early(struct klp_object *obj,
 465				struct klp_func *func);
 466static void klp_init_object_early(struct klp_patch *patch,
 467				  struct klp_object *obj);
 468
 469static struct klp_object *klp_alloc_object_dynamic(const char *name,
 470						   struct klp_patch *patch)
 471{
 472	struct klp_object *obj;
 473
 474	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 475	if (!obj)
 476		return NULL;
 477
 478	if (name) {
 479		obj->name = kstrdup(name, GFP_KERNEL);
 480		if (!obj->name) {
 481			kfree(obj);
 482			return NULL;
 483		}
 484	}
 
 485
 486	klp_init_object_early(patch, obj);
 487	obj->dynamic = true;
 488
 489	return obj;
 490}
 491
 492static void klp_free_func_nop(struct klp_func *func)
 493{
 494	kfree(func->old_name);
 495	kfree(func);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 496}
 497
 498static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
 499					   struct klp_object *obj)
 
 
 
 
 
 
 
 500{
 501	struct klp_func *func;
 502
 503	func = kzalloc(sizeof(*func), GFP_KERNEL);
 504	if (!func)
 505		return NULL;
 506
 507	if (old_func->old_name) {
 508		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
 509		if (!func->old_name) {
 510			kfree(func);
 511			return NULL;
 512		}
 513	}
 514
 515	klp_init_func_early(obj, func);
 516	/*
 517	 * func->new_func is same as func->old_func. These addresses are
 518	 * set when the object is loaded, see klp_init_object_loaded().
 519	 */
 520	func->old_sympos = old_func->old_sympos;
 521	func->nop = true;
 522
 523	return func;
 
 
 524}
 
 525
 526static int klp_add_object_nops(struct klp_patch *patch,
 527			       struct klp_object *old_obj)
 528{
 529	struct klp_object *obj;
 530	struct klp_func *func, *old_func;
 531
 532	obj = klp_find_object(patch, old_obj);
 
 533
 534	if (!obj) {
 535		obj = klp_alloc_object_dynamic(old_obj->name, patch);
 536		if (!obj)
 537			return -ENOMEM;
 538	}
 539
 540	klp_for_each_func(old_obj, old_func) {
 541		func = klp_find_func(obj, old_func);
 542		if (func)
 
 543			continue;
 544
 545		func = klp_alloc_func_nop(old_func, obj);
 546		if (!func)
 547			return -ENOMEM;
 548	}
 549
 
 
 550	return 0;
 
 
 
 
 551}
 552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 553/*
 554 * Add 'nop' functions which simply return to the caller to run
 555 * the original function. The 'nop' functions are added to a
 556 * patch to facilitate a 'replace' mode.
 
 
 
 
 557 */
 558static int klp_add_nops(struct klp_patch *patch)
 
 
 559{
 560	struct klp_patch *old_patch;
 561	struct klp_object *old_obj;
 
 562
 563	klp_for_each_patch(old_patch) {
 564		klp_for_each_object(old_patch, old_obj) {
 565			int err;
 566
 567			err = klp_add_object_nops(patch, old_obj);
 568			if (err)
 569				return err;
 570		}
 
 
 
 
 
 
 
 571	}
 572
 573	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574}
 575
 576static void klp_kobj_release_patch(struct kobject *kobj)
 
 577{
 578	struct klp_patch *patch;
 579
 580	patch = container_of(kobj, struct klp_patch, kobj);
 581	complete(&patch->finish);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582}
 583
 584static struct kobj_type klp_ktype_patch = {
 585	.release = klp_kobj_release_patch,
 586	.sysfs_ops = &kobj_sysfs_ops,
 587	.default_groups = klp_patch_groups,
 588};
 589
 590static void klp_kobj_release_object(struct kobject *kobj)
 591{
 592	struct klp_object *obj;
 593
 594	obj = container_of(kobj, struct klp_object, kobj);
 595
 596	if (obj->dynamic)
 597		klp_free_object_dynamic(obj);
 598}
 599
 600static struct kobj_type klp_ktype_object = {
 601	.release = klp_kobj_release_object,
 602	.sysfs_ops = &kobj_sysfs_ops,
 603	.default_groups = klp_object_groups,
 604};
 605
 606static void klp_kobj_release_func(struct kobject *kobj)
 607{
 608	struct klp_func *func;
 609
 610	func = container_of(kobj, struct klp_func, kobj);
 611
 612	if (func->nop)
 613		klp_free_func_nop(func);
 614}
 615
 616static struct kobj_type klp_ktype_func = {
 617	.release = klp_kobj_release_func,
 618	.sysfs_ops = &kobj_sysfs_ops,
 619};
 620
 621static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
 
 
 
 
 
 622{
 623	struct klp_func *func, *tmp_func;
 624
 625	klp_for_each_func_safe(obj, func, tmp_func) {
 626		if (nops_only && !func->nop)
 627			continue;
 628
 629		list_del(&func->node);
 630		kobject_put(&func->kobj);
 631	}
 632}
 633
 634/* Clean up when a patched object is unloaded */
 635static void klp_free_object_loaded(struct klp_object *obj)
 636{
 637	struct klp_func *func;
 638
 639	obj->mod = NULL;
 640
 641	klp_for_each_func(obj, func) {
 642		func->old_func = NULL;
 643
 644		if (func->nop)
 645			func->new_func = NULL;
 646	}
 647}
 648
 649static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
 
 
 
 
 
 650{
 651	struct klp_object *obj, *tmp_obj;
 652
 653	klp_for_each_object_safe(patch, obj, tmp_obj) {
 654		__klp_free_funcs(obj, nops_only);
 655
 656		if (nops_only && !obj->dynamic)
 657			continue;
 658
 659		list_del(&obj->node);
 
 660		kobject_put(&obj->kobj);
 661	}
 662}
 663
 664static void klp_free_objects(struct klp_patch *patch)
 665{
 666	__klp_free_objects(patch, false);
 667}
 668
 669static void klp_free_objects_dynamic(struct klp_patch *patch)
 670{
 671	__klp_free_objects(patch, true);
 672}
 673
 674/*
 675 * This function implements the free operations that can be called safely
 676 * under klp_mutex.
 677 *
 678 * The operation must be completed by calling klp_free_patch_finish()
 679 * outside klp_mutex.
 680 */
 681static void klp_free_patch_start(struct klp_patch *patch)
 682{
 
 683	if (!list_empty(&patch->list))
 684		list_del(&patch->list);
 685
 686	klp_free_objects(patch);
 687}
 688
 689/*
 690 * This function implements the free part that must be called outside
 691 * klp_mutex.
 692 *
 693 * It must be called after klp_free_patch_start(). And it has to be
 694 * the last function accessing the livepatch structures when the patch
 695 * gets disabled.
 696 */
 697static void klp_free_patch_finish(struct klp_patch *patch)
 698{
 699	/*
 700	 * Avoid deadlock with enabled_store() sysfs callback by
 701	 * calling this outside klp_mutex. It is safe because
 702	 * this is called when the patch gets disabled and it
 703	 * cannot get enabled again.
 704	 */
 705	kobject_put(&patch->kobj);
 706	wait_for_completion(&patch->finish);
 707
 708	/* Put the module after the last access to struct klp_patch. */
 709	if (!patch->forced)
 710		module_put(patch->mod);
 711}
 712
 713/*
 714 * The livepatch might be freed from sysfs interface created by the patch.
 715 * This work allows to wait until the interface is destroyed in a separate
 716 * context.
 717 */
 718static void klp_free_patch_work_fn(struct work_struct *work)
 719{
 720	struct klp_patch *patch =
 721		container_of(work, struct klp_patch, free_work);
 722
 723	klp_free_patch_finish(patch);
 724}
 725
 726void klp_free_patch_async(struct klp_patch *patch)
 727{
 728	klp_free_patch_start(patch);
 729	schedule_work(&patch->free_work);
 730}
 731
 732void klp_free_replaced_patches_async(struct klp_patch *new_patch)
 733{
 734	struct klp_patch *old_patch, *tmp_patch;
 735
 736	klp_for_each_patch_safe(old_patch, tmp_patch) {
 737		if (old_patch == new_patch)
 738			return;
 739		klp_free_patch_async(old_patch);
 740	}
 741}
 742
 743static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 744{
 745	if (!func->old_name)
 746		return -EINVAL;
 747
 748	/*
 749	 * NOPs get the address later. The patched module must be loaded,
 750	 * see klp_init_object_loaded().
 751	 */
 752	if (!func->new_func && !func->nop)
 753		return -EINVAL;
 754
 755	if (strlen(func->old_name) >= KSYM_NAME_LEN)
 756		return -EINVAL;
 757
 758	INIT_LIST_HEAD(&func->stack_node);
 759	func->patched = false;
 760	func->transition = false;
 761
 762	/* The format for the sysfs directory is <function,sympos> where sympos
 763	 * is the nth occurrence of this symbol in kallsyms for the patched
 764	 * object. If the user selects 0 for old_sympos, then 1 will be used
 765	 * since a unique symbol will be the first occurrence.
 766	 */
 767	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
 768			   func->old_name,
 769			   func->old_sympos ? func->old_sympos : 1);
 770}
 771
 772static int klp_apply_object_relocs(struct klp_patch *patch,
 773				   struct klp_object *obj)
 
 774{
 775	int i, ret;
 776	struct klp_modinfo *info = patch->mod->klp_info;
 777
 778	for (i = 1; i < info->hdr.e_shnum; i++) {
 779		Elf_Shdr *sec = info->sechdrs + i;
 780
 781		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
 782			continue;
 783
 784		ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
 785					       info->secstrings,
 786					       patch->mod->core_kallsyms.strtab,
 787					       info->symndx, i, obj->name);
 788		if (ret)
 789			return ret;
 790	}
 791
 792	return 0;
 793}
 794
 795/* parts of the initialization that is done only when the object is loaded */
 796static int klp_init_object_loaded(struct klp_patch *patch,
 797				  struct klp_object *obj)
 798{
 799	struct klp_func *func;
 800	int ret;
 801
 802	if (klp_is_module(obj)) {
 803		/*
 804		 * Only write module-specific relocations here
 805		 * (.klp.rela.{module}.*).  vmlinux-specific relocations were
 806		 * written earlier during the initialization of the klp module
 807		 * itself.
 808		 */
 809		ret = klp_apply_object_relocs(patch, obj);
 810		if (ret)
 811			return ret;
 812	}
 813
 
 
 
 814	klp_for_each_func(obj, func) {
 815		ret = klp_find_object_symbol(obj->name, func->old_name,
 816					     func->old_sympos,
 817					     (unsigned long *)&func->old_func);
 818		if (ret)
 819			return ret;
 820
 821		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
 822						  &func->old_size, NULL);
 823		if (!ret) {
 824			pr_err("kallsyms size lookup failed for '%s'\n",
 825			       func->old_name);
 826			return -ENOENT;
 827		}
 828
 829		if (func->nop)
 830			func->new_func = func->old_func;
 831
 832		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
 833						  &func->new_size, NULL);
 834		if (!ret) {
 835			pr_err("kallsyms size lookup failed for '%s' replacement\n",
 836			       func->old_name);
 837			return -ENOENT;
 838		}
 839	}
 840
 841	return 0;
 842}
 843
 844static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 845{
 846	struct klp_func *func;
 847	int ret;
 848	const char *name;
 849
 850	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
 851		return -EINVAL;
 852
 853	obj->patched = false;
 854	obj->mod = NULL;
 855
 856	klp_find_object_module(obj);
 857
 858	name = klp_is_module(obj) ? obj->name : "vmlinux";
 859	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
 
 860	if (ret)
 861		return ret;
 862
 863	klp_for_each_func(obj, func) {
 864		ret = klp_init_func(obj, func);
 865		if (ret)
 866			return ret;
 867	}
 868
 869	if (klp_is_object_loaded(obj))
 870		ret = klp_init_object_loaded(patch, obj);
 
 
 
 871
 872	return ret;
 873}
 874
 875static void klp_init_func_early(struct klp_object *obj,
 876				struct klp_func *func)
 877{
 878	kobject_init(&func->kobj, &klp_ktype_func);
 879	list_add_tail(&func->node, &obj->func_list);
 880}
 881
 882static void klp_init_object_early(struct klp_patch *patch,
 883				  struct klp_object *obj)
 884{
 885	INIT_LIST_HEAD(&obj->func_list);
 886	kobject_init(&obj->kobj, &klp_ktype_object);
 887	list_add_tail(&obj->node, &patch->obj_list);
 888}
 889
 890static void klp_init_patch_early(struct klp_patch *patch)
 891{
 892	struct klp_object *obj;
 893	struct klp_func *func;
 894
 895	INIT_LIST_HEAD(&patch->list);
 896	INIT_LIST_HEAD(&patch->obj_list);
 897	kobject_init(&patch->kobj, &klp_ktype_patch);
 898	patch->enabled = false;
 899	patch->forced = false;
 900	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
 901	init_completion(&patch->finish);
 902
 903	klp_for_each_object_static(patch, obj) {
 904		klp_init_object_early(patch, obj);
 905
 906		klp_for_each_func_static(obj, func) {
 907			klp_init_func_early(obj, func);
 908		}
 909	}
 910}
 911
 912static int klp_init_patch(struct klp_patch *patch)
 913{
 914	struct klp_object *obj;
 915	int ret;
 916
 917	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
 
 918	if (ret)
 919		return ret;
 920
 921	if (patch->replace) {
 922		ret = klp_add_nops(patch);
 923		if (ret)
 924			return ret;
 925	}
 926
 927	klp_for_each_object(patch, obj) {
 928		ret = klp_init_object(patch, obj);
 929		if (ret)
 930			return ret;
 931	}
 932
 933	list_add_tail(&patch->list, &klp_patches);
 934
 935	return 0;
 936}
 937
 938static int __klp_disable_patch(struct klp_patch *patch)
 939{
 940	struct klp_object *obj;
 941
 942	if (WARN_ON(!patch->enabled))
 943		return -EINVAL;
 944
 945	if (klp_transition_patch)
 946		return -EBUSY;
 947
 948	klp_init_transition(patch, KLP_UNPATCHED);
 949
 950	klp_for_each_object(patch, obj)
 951		if (obj->patched)
 952			klp_pre_unpatch_callback(obj);
 953
 954	/*
 955	 * Enforce the order of the func->transition writes in
 956	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
 957	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
 958	 * is called shortly after klp_update_patch_state() switches the task,
 959	 * this ensures the handler sees that func->transition is set.
 960	 */
 961	smp_wmb();
 962
 963	klp_start_transition();
 964	patch->enabled = false;
 965	klp_try_complete_transition();
 966
 967	return 0;
 
 
 
 
 
 
 
 968}
 969
 970static int __klp_enable_patch(struct klp_patch *patch)
 
 
 
 
 
 
 
 
 971{
 972	struct klp_object *obj;
 973	int ret;
 974
 975	if (klp_transition_patch)
 976		return -EBUSY;
 977
 978	if (WARN_ON(patch->enabled))
 979		return -EINVAL;
 980
 981	pr_notice("enabling patch '%s'\n", patch->mod->name);
 982
 983	klp_init_transition(patch, KLP_PATCHED);
 984
 985	/*
 986	 * Enforce the order of the func->transition writes in
 987	 * klp_init_transition() and the ops->func_stack writes in
 988	 * klp_patch_object(), so that klp_ftrace_handler() will see the
 989	 * func->transition updates before the handler is registered and the
 990	 * new funcs become visible to the handler.
 991	 */
 992	smp_wmb();
 993
 994	klp_for_each_object(patch, obj) {
 995		if (!klp_is_object_loaded(obj))
 996			continue;
 997
 998		ret = klp_pre_patch_callback(obj);
 999		if (ret) {
1000			pr_warn("pre-patch callback failed for object '%s'\n",
1001				klp_is_module(obj) ? obj->name : "vmlinux");
1002			goto err;
1003		}
1004
1005		ret = klp_patch_object(obj);
1006		if (ret) {
1007			pr_warn("failed to patch object '%s'\n",
1008				klp_is_module(obj) ? obj->name : "vmlinux");
1009			goto err;
1010		}
1011	}
1012
1013	klp_start_transition();
1014	patch->enabled = true;
1015	klp_try_complete_transition();
 
1016
1017	return 0;
1018err:
1019	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
1020
1021	klp_cancel_transition();
 
1022	return ret;
1023}
 
1024
1025/**
1026 * klp_enable_patch() - enable the livepatch
1027 * @patch:	patch to be enabled
1028 *
1029 * Initializes the data structure associated with the patch, creates the sysfs
1030 * interface, performs the needed symbol lookups and code relocations,
1031 * registers the patched functions with ftrace.
1032 *
1033 * This function is supposed to be called from the livepatch module_init()
1034 * callback.
1035 *
1036 * Return: 0 on success, otherwise error
1037 */
1038int klp_enable_patch(struct klp_patch *patch)
1039{
1040	int ret;
1041	struct klp_object *obj;
1042
1043	if (!patch || !patch->mod || !patch->objs)
1044		return -EINVAL;
1045
1046	klp_for_each_object_static(patch, obj) {
1047		if (!obj->funcs)
1048			return -EINVAL;
1049	}
1050
1051
1052	if (!is_livepatch_module(patch->mod)) {
1053		pr_err("module %s is not marked as a livepatch module\n",
1054		       patch->mod->name);
1055		return -EINVAL;
1056	}
1057
1058	if (!klp_initialized())
1059		return -ENODEV;
1060
1061	if (!klp_have_reliable_stack()) {
1062		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1063		pr_warn("The livepatch transition may never complete.\n");
1064	}
1065
1066	mutex_lock(&klp_mutex);
1067
1068	if (!klp_is_patch_compatible(patch)) {
1069		pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1070			patch->mod->name);
1071		mutex_unlock(&klp_mutex);
1072		return -EINVAL;
1073	}
1074
1075	if (!try_module_get(patch->mod)) {
1076		mutex_unlock(&klp_mutex);
1077		return -ENODEV;
1078	}
1079
1080	klp_init_patch_early(patch);
1081
1082	ret = klp_init_patch(patch);
1083	if (ret)
1084		goto err;
1085
1086	ret = __klp_enable_patch(patch);
1087	if (ret)
1088		goto err;
1089
1090	mutex_unlock(&klp_mutex);
1091
1092	return 0;
1093
1094err:
1095	klp_free_patch_start(patch);
1096
1097	mutex_unlock(&klp_mutex);
1098
1099	klp_free_patch_finish(patch);
1100
1101	return ret;
1102}
1103EXPORT_SYMBOL_GPL(klp_enable_patch);
1104
1105/*
1106 * This function unpatches objects from the replaced livepatches.
1107 *
1108 * We could be pretty aggressive here. It is called in the situation where
1109 * these structures are no longer accessed from the ftrace handler.
1110 * All functions are redirected by the klp_transition_patch. They
1111 * use either a new code or they are in the original code because
1112 * of the special nop function patches.
1113 *
1114 * The only exception is when the transition was forced. In this case,
1115 * klp_ftrace_handler() might still see the replaced patch on the stack.
1116 * Fortunately, it is carefully designed to work with removed functions
1117 * thanks to RCU. We only have to keep the patches on the system. Also
1118 * this is handled transparently by patch->module_put.
1119 */
1120void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
1121{
1122	struct klp_patch *old_patch;
1123
1124	klp_for_each_patch(old_patch) {
1125		if (old_patch == new_patch)
1126			return;
1127
1128		old_patch->enabled = false;
1129		klp_unpatch_objects(old_patch);
1130	}
1131}
1132
1133/*
1134 * This function removes the dynamically allocated 'nop' functions.
1135 *
1136 * We could be pretty aggressive. NOPs do not change the existing
1137 * behavior except for adding unnecessary delay by the ftrace handler.
1138 *
1139 * It is safe even when the transition was forced. The ftrace handler
1140 * will see a valid ops->func_stack entry thanks to RCU.
1141 *
1142 * We could even free the NOPs structures. They must be the last entry
1143 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1144 * It does the same as klp_synchronize_transition() to make sure that
1145 * nobody is inside the ftrace handler once the operation finishes.
1146 *
1147 * IMPORTANT: It must be called right after removing the replaced patches!
1148 */
1149void klp_discard_nops(struct klp_patch *new_patch)
1150{
1151	klp_unpatch_objects_dynamic(klp_transition_patch);
1152	klp_free_objects_dynamic(klp_transition_patch);
1153}
1154
1155/*
1156 * Remove parts of patches that touch a given kernel module. The list of
1157 * patches processed might be limited. When limit is NULL, all patches
1158 * will be handled.
1159 */
1160static void klp_cleanup_module_patches_limited(struct module *mod,
1161					       struct klp_patch *limit)
1162{
1163	struct klp_patch *patch;
1164	struct klp_object *obj;
1165
1166	klp_for_each_patch(patch) {
1167		if (patch == limit)
1168			break;
1169
1170		klp_for_each_object(patch, obj) {
1171			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1172				continue;
1173
1174			if (patch != klp_transition_patch)
1175				klp_pre_unpatch_callback(obj);
1176
1177			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1178				  patch->mod->name, obj->mod->name);
1179			klp_unpatch_object(obj);
1180
1181			klp_post_unpatch_callback(obj);
1182
1183			klp_free_object_loaded(obj);
1184			break;
1185		}
1186	}
1187}
1188
1189int klp_module_coming(struct module *mod)
1190{
1191	int ret;
1192	struct klp_patch *patch;
1193	struct klp_object *obj;
1194
1195	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1196		return -EINVAL;
1197
1198	if (!strcmp(mod->name, "vmlinux")) {
1199		pr_err("vmlinux.ko: invalid module name\n");
1200		return -EINVAL;
1201	}
1202
1203	mutex_lock(&klp_mutex);
1204	/*
1205	 * Each module has to know that klp_module_coming()
1206	 * has been called. We never know what module will
1207	 * get patched by a new patch.
1208	 */
1209	mod->klp_alive = true;
1210
1211	klp_for_each_patch(patch) {
1212		klp_for_each_object(patch, obj) {
1213			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1214				continue;
1215
1216			obj->mod = mod;
1217
1218			ret = klp_init_object_loaded(patch, obj);
1219			if (ret) {
1220				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1221					patch->mod->name, obj->mod->name, ret);
1222				goto err;
1223			}
1224
 
 
 
1225			pr_notice("applying patch '%s' to loading module '%s'\n",
1226				  patch->mod->name, obj->mod->name);
1227
1228			ret = klp_pre_patch_callback(obj);
1229			if (ret) {
1230				pr_warn("pre-patch callback failed for object '%s'\n",
1231					obj->name);
1232				goto err;
1233			}
1234
1235			ret = klp_patch_object(obj);
1236			if (ret) {
1237				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1238					patch->mod->name, obj->mod->name, ret);
1239
1240				klp_post_unpatch_callback(obj);
1241				goto err;
1242			}
1243
1244			if (patch != klp_transition_patch)
1245				klp_post_patch_callback(obj);
1246
1247			break;
1248		}
1249	}
1250
1251	mutex_unlock(&klp_mutex);
1252
1253	return 0;
1254
1255err:
1256	/*
1257	 * If a patch is unsuccessfully applied, return
1258	 * error to the module loader.
1259	 */
1260	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1261		patch->mod->name, obj->mod->name, obj->mod->name);
1262	mod->klp_alive = false;
1263	obj->mod = NULL;
1264	klp_cleanup_module_patches_limited(mod, patch);
1265	mutex_unlock(&klp_mutex);
1266
1267	return ret;
1268}
1269
1270void klp_module_going(struct module *mod)
1271{
 
 
 
1272	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1273		    mod->state != MODULE_STATE_COMING))
1274		return;
1275
1276	mutex_lock(&klp_mutex);
1277	/*
1278	 * Each module has to know that klp_module_going()
1279	 * has been called. We never know what module will
1280	 * get patched by a new patch.
1281	 */
1282	mod->klp_alive = false;
1283
1284	klp_cleanup_module_patches_limited(mod, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1285
1286	mutex_unlock(&klp_mutex);
1287}
1288
1289static int __init klp_init(void)
1290{
 
 
 
 
 
 
 
 
1291	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1292	if (!klp_root_kobj)
1293		return -ENOMEM;
1294
1295	return 0;
1296}
1297
1298module_init(klp_init);
v4.10.11
 
   1/*
   2 * core.c - Kernel Live Patching Core
   3 *
   4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   5 * Copyright (C) 2014 SUSE
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; either version 2
  10 * of the License, or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22
  23#include <linux/module.h>
  24#include <linux/kernel.h>
  25#include <linux/mutex.h>
  26#include <linux/slab.h>
  27#include <linux/ftrace.h>
  28#include <linux/list.h>
  29#include <linux/kallsyms.h>
  30#include <linux/livepatch.h>
  31#include <linux/elf.h>
  32#include <linux/moduleloader.h>
 
 
 
  33#include <asm/cacheflush.h>
 
 
 
 
  34
  35/**
  36 * struct klp_ops - structure for tracking registered ftrace ops structs
 
 
  37 *
  38 * A single ftrace_ops is shared between all enabled replacement functions
  39 * (klp_func structs) which have the same old_addr.  This allows the switch
  40 * between function versions to happen instantaneously by updating the klp_ops
  41 * struct's func_stack list.  The winner is the klp_func at the top of the
  42 * func_stack (front of the list).
  43 *
  44 * @node:	node for the global klp_ops list
  45 * @func_stack:	list head for the stack of klp_func's (active func is on top)
  46 * @fops:	registered ftrace ops struct
  47 */
  48struct klp_ops {
  49	struct list_head node;
  50	struct list_head func_stack;
  51	struct ftrace_ops fops;
  52};
  53
  54/*
  55 * The klp_mutex protects the global lists and state transitions of any
  56 * structure reachable from them.  References to any structure must be obtained
  57 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
  58 * ensure it gets consistent data).
  59 */
  60static DEFINE_MUTEX(klp_mutex);
  61
  62static LIST_HEAD(klp_patches);
  63static LIST_HEAD(klp_ops);
  64
  65static struct kobject *klp_root_kobj;
  66
  67static struct klp_ops *klp_find_ops(unsigned long old_addr)
  68{
  69	struct klp_ops *ops;
  70	struct klp_func *func;
  71
  72	list_for_each_entry(ops, &klp_ops, node) {
  73		func = list_first_entry(&ops->func_stack, struct klp_func,
  74					stack_node);
  75		if (func->old_addr == old_addr)
  76			return ops;
  77	}
  78
  79	return NULL;
  80}
  81
  82static bool klp_is_module(struct klp_object *obj)
  83{
  84	return obj->name;
  85}
  86
  87static bool klp_is_object_loaded(struct klp_object *obj)
  88{
  89	return !obj->name || obj->mod;
  90}
  91
  92/* sets obj->mod if object is not vmlinux and module is found */
  93static void klp_find_object_module(struct klp_object *obj)
  94{
  95	struct module *mod;
  96
  97	if (!klp_is_module(obj))
  98		return;
  99
 100	mutex_lock(&module_mutex);
 101	/*
 102	 * We do not want to block removal of patched modules and therefore
 103	 * we do not take a reference here. The patches are removed by
 104	 * klp_module_going() instead.
 105	 */
 106	mod = find_module(obj->name);
 107	/*
 108	 * Do not mess work of klp_module_coming() and klp_module_going().
 109	 * Note that the patch might still be needed before klp_module_going()
 110	 * is called. Module functions can be called even in the GOING state
 111	 * until mod->exit() finishes. This is especially important for
 112	 * patches that modify semantic of the functions.
 113	 */
 114	if (mod && mod->klp_alive)
 115		obj->mod = mod;
 116
 117	mutex_unlock(&module_mutex);
 118}
 119
 120/* klp_mutex must be held by caller */
 121static bool klp_is_patch_registered(struct klp_patch *patch)
 
 
 
 
 
 122{
 123	struct klp_patch *mypatch;
 124
 125	list_for_each_entry(mypatch, &klp_patches, list)
 126		if (mypatch == patch)
 127			return true;
 
 
 
 128
 129	return false;
 130}
 131
 132static bool klp_initialized(void)
 
 133{
 134	return !!klp_root_kobj;
 
 
 
 
 
 
 
 
 
 
 
 
 
 135}
 136
 137struct klp_find_arg {
 138	const char *objname;
 139	const char *name;
 140	unsigned long addr;
 141	unsigned long count;
 142	unsigned long pos;
 143};
 144
 145static int klp_find_callback(void *data, const char *name,
 146			     struct module *mod, unsigned long addr)
 147{
 148	struct klp_find_arg *args = data;
 149
 150	if ((mod && !args->objname) || (!mod && args->objname))
 151		return 0;
 152
 153	if (strcmp(args->name, name))
 154		return 0;
 155
 156	if (args->objname && strcmp(args->objname, mod->name))
 157		return 0;
 158
 159	args->addr = addr;
 160	args->count++;
 161
 162	/*
 163	 * Finish the search when the symbol is found for the desired position
 164	 * or the position is not defined for a non-unique symbol.
 165	 */
 166	if ((args->pos && (args->count == args->pos)) ||
 167	    (!args->pos && (args->count > 1)))
 168		return 1;
 169
 170	return 0;
 171}
 172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 173static int klp_find_object_symbol(const char *objname, const char *name,
 174				  unsigned long sympos, unsigned long *addr)
 175{
 176	struct klp_find_arg args = {
 177		.objname = objname,
 178		.name = name,
 179		.addr = 0,
 180		.count = 0,
 181		.pos = sympos,
 182	};
 183
 184	mutex_lock(&module_mutex);
 185	kallsyms_on_each_symbol(klp_find_callback, &args);
 186	mutex_unlock(&module_mutex);
 
 187
 188	/*
 189	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
 190	 * otherwise ensure the symbol position count matches sympos.
 191	 */
 192	if (args.addr == 0)
 193		pr_err("symbol '%s' not found in symbol table\n", name);
 194	else if (args.count > 1 && sympos == 0) {
 195		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
 196		       name, objname);
 197	} else if (sympos != args.count && sympos > 0) {
 198		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
 199		       sympos, name, objname ? objname : "vmlinux");
 200	} else {
 201		*addr = args.addr;
 202		return 0;
 203	}
 204
 205	*addr = 0;
 206	return -EINVAL;
 207}
 208
 209static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
 210{
 211	int i, cnt, vmlinux, ret;
 212	char objname[MODULE_NAME_LEN];
 213	char symname[KSYM_NAME_LEN];
 214	char *strtab = pmod->core_kallsyms.strtab;
 
 215	Elf_Rela *relas;
 216	Elf_Sym *sym;
 217	unsigned long sympos, addr;
 
 
 218
 219	/*
 220	 * Since the field widths for objname and symname in the sscanf()
 221	 * call are hard-coded and correspond to MODULE_NAME_LEN and
 222	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
 223	 * and KSYM_NAME_LEN have the values we expect them to have.
 224	 *
 225	 * Because the value of MODULE_NAME_LEN can differ among architectures,
 226	 * we use the smallest/strictest upper bound possible (56, based on
 227	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
 228	 */
 229	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
 230
 231	relas = (Elf_Rela *) relasec->sh_addr;
 232	/* For each rela in this klp relocation section */
 233	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
 234		sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
 235		if (sym->st_shndx != SHN_LIVEPATCH) {
 236			pr_err("symbol %s is not marked as a livepatch symbol",
 237			       strtab + sym->st_name);
 238			return -EINVAL;
 239		}
 240
 241		/* Format: .klp.sym.objname.symname,sympos */
 242		cnt = sscanf(strtab + sym->st_name,
 243			     ".klp.sym.%55[^.].%127[^,],%lu",
 244			     objname, symname, &sympos);
 245		if (cnt != 3) {
 246			pr_err("symbol %s has an incorrectly formatted name",
 247			       strtab + sym->st_name);
 248			return -EINVAL;
 249		}
 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
 252		vmlinux = !strcmp(objname, "vmlinux");
 253		ret = klp_find_object_symbol(vmlinux ? NULL : objname,
 254					     symname, sympos, &addr);
 255		if (ret)
 256			return ret;
 257
 258		sym->st_value = addr;
 259	}
 260
 261	return 0;
 262}
 263
 264static int klp_write_object_relocations(struct module *pmod,
 265					struct klp_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 266{
 267	int i, cnt, ret = 0;
 268	const char *objname, *secname;
 269	char sec_objname[MODULE_NAME_LEN];
 270	Elf_Shdr *sec;
 271
 272	if (WARN_ON(!klp_is_object_loaded(obj)))
 
 
 
 
 
 
 
 
 
 273		return -EINVAL;
 274
 275	objname = klp_is_module(obj) ? obj->name : "vmlinux";
 276
 277	/* For each klp relocation section */
 278	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
 279		sec = pmod->klp_info->sechdrs + i;
 280		secname = pmod->klp_info->secstrings + sec->sh_name;
 281		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
 282			continue;
 283
 284		/*
 285		 * Format: .klp.rela.sec_objname.section_name
 286		 * See comment in klp_resolve_symbols() for an explanation
 287		 * of the selected field width value.
 288		 */
 289		cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
 290		if (cnt != 1) {
 291			pr_err("section %s has an incorrectly formatted name",
 292			       secname);
 293			ret = -EINVAL;
 294			break;
 295		}
 296
 297		if (strcmp(objname, sec_objname))
 298			continue;
 299
 300		ret = klp_resolve_symbols(sec, pmod);
 301		if (ret)
 302			break;
 303
 304		ret = apply_relocate_add(pmod->klp_info->sechdrs,
 305					 pmod->core_kallsyms.strtab,
 306					 pmod->klp_info->symndx, i, pmod);
 307		if (ret)
 308			break;
 309	}
 310
 311	return ret;
 312}
 313
 314static void notrace klp_ftrace_handler(unsigned long ip,
 315				       unsigned long parent_ip,
 316				       struct ftrace_ops *fops,
 317				       struct pt_regs *regs)
 318{
 319	struct klp_ops *ops;
 320	struct klp_func *func;
 321
 322	ops = container_of(fops, struct klp_ops, fops);
 323
 324	rcu_read_lock();
 325	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 326				      stack_node);
 327	if (WARN_ON_ONCE(!func))
 328		goto unlock;
 329
 330	klp_arch_set_pc(regs, (unsigned long)func->new_func);
 331unlock:
 332	rcu_read_unlock();
 333}
 334
 335/*
 336 * Convert a function address into the appropriate ftrace location.
 337 *
 338 * Usually this is just the address of the function, but on some architectures
 339 * it's more complicated so allow them to provide a custom behaviour.
 
 
 
 
 
 
 340 */
 341#ifndef klp_get_ftrace_location
 342static unsigned long klp_get_ftrace_location(unsigned long faddr)
 343{
 344	return faddr;
 345}
 346#endif
 347
 348static void klp_disable_func(struct klp_func *func)
 
 349{
 350	struct klp_ops *ops;
 
 
 351
 352	if (WARN_ON(func->state != KLP_ENABLED))
 353		return;
 354	if (WARN_ON(!func->old_addr))
 355		return;
 356
 357	ops = klp_find_ops(func->old_addr);
 358	if (WARN_ON(!ops))
 359		return;
 360
 361	if (list_is_singular(&ops->func_stack)) {
 362		unsigned long ftrace_loc;
 363
 364		ftrace_loc = klp_get_ftrace_location(func->old_addr);
 365		if (WARN_ON(!ftrace_loc))
 366			return;
 
 
 367
 368		WARN_ON(unregister_ftrace_function(&ops->fops));
 369		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
 
 
 
 
 
 
 
 
 
 
 
 370
 371		list_del_rcu(&func->stack_node);
 372		list_del(&ops->node);
 373		kfree(ops);
 374	} else {
 375		list_del_rcu(&func->stack_node);
 376	}
 377
 378	func->state = KLP_DISABLED;
 
 
 379}
 380
 381static int klp_enable_func(struct klp_func *func)
 
 382{
 383	struct klp_ops *ops;
 384	int ret;
 385
 386	if (WARN_ON(!func->old_addr))
 387		return -EINVAL;
 
 388
 389	if (WARN_ON(func->state != KLP_DISABLED))
 390		return -EINVAL;
 
 
 391
 392	ops = klp_find_ops(func->old_addr);
 393	if (!ops) {
 394		unsigned long ftrace_loc;
 395
 396		ftrace_loc = klp_get_ftrace_location(func->old_addr);
 397		if (!ftrace_loc) {
 398			pr_err("failed to find location for function '%s'\n",
 399				func->old_name);
 400			return -EINVAL;
 401		}
 402
 403		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 404		if (!ops)
 405			return -ENOMEM;
 
 
 
 406
 407		ops->fops.func = klp_ftrace_handler;
 408		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
 409				  FTRACE_OPS_FL_DYNAMIC |
 410				  FTRACE_OPS_FL_IPMODIFY;
 411
 412		list_add(&ops->node, &klp_ops);
 
 413
 414		INIT_LIST_HEAD(&ops->func_stack);
 415		list_add_rcu(&func->stack_node, &ops->func_stack);
 416
 417		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
 418		if (ret) {
 419			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
 420			       func->old_name, ret);
 421			goto err;
 422		}
 423
 424		ret = register_ftrace_function(&ops->fops);
 425		if (ret) {
 426			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
 427			       func->old_name, ret);
 428			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
 429			goto err;
 430		}
 431
 432
 433	} else {
 434		list_add_rcu(&func->stack_node, &ops->func_stack);
 435	}
 436
 437	func->state = KLP_ENABLED;
 438
 439	return 0;
 440
 441err:
 442	list_del_rcu(&func->stack_node);
 443	list_del(&ops->node);
 444	kfree(ops);
 445	return ret;
 446}
 447
 448static void klp_disable_object(struct klp_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 449{
 450	struct klp_func *func;
 451
 452	klp_for_each_func(obj, func)
 453		if (func->state == KLP_ENABLED)
 454			klp_disable_func(func);
 455
 456	obj->state = KLP_DISABLED;
 457}
 
 
 
 
 458
 459static int klp_enable_object(struct klp_object *obj)
 460{
 461	struct klp_func *func;
 462	int ret;
 
 463
 464	if (WARN_ON(obj->state != KLP_DISABLED))
 465		return -EINVAL;
 
 
 466
 467	if (WARN_ON(!klp_is_object_loaded(obj)))
 468		return -EINVAL;
 
 
 469
 470	klp_for_each_func(obj, func) {
 471		ret = klp_enable_func(func);
 472		if (ret) {
 473			klp_disable_object(obj);
 474			return ret;
 
 
 
 
 475		}
 476	}
 477	obj->state = KLP_ENABLED;
 478
 479	return 0;
 
 
 
 480}
 481
 482static int __klp_disable_patch(struct klp_patch *patch)
 483{
 484	struct klp_object *obj;
 485
 486	/* enforce stacking: only the last enabled patch can be disabled */
 487	if (!list_is_last(&patch->list, &klp_patches) &&
 488	    list_next_entry(patch, list)->state == KLP_ENABLED)
 489		return -EBUSY;
 490
 491	pr_notice("disabling patch '%s'\n", patch->mod->name);
 492
 493	klp_for_each_object(patch, obj) {
 494		if (obj->state == KLP_ENABLED)
 495			klp_disable_object(obj);
 496	}
 497
 498	patch->state = KLP_DISABLED;
 499
 500	return 0;
 501}
 502
 503/**
 504 * klp_disable_patch() - disables a registered patch
 505 * @patch:	The registered, enabled patch to be disabled
 506 *
 507 * Unregisters the patched functions from ftrace.
 508 *
 509 * Return: 0 on success, otherwise error
 510 */
 511int klp_disable_patch(struct klp_patch *patch)
 512{
 513	int ret;
 514
 515	mutex_lock(&klp_mutex);
 516
 517	if (!klp_is_patch_registered(patch)) {
 518		ret = -EINVAL;
 519		goto err;
 
 
 
 
 
 520	}
 521
 522	if (patch->state == KLP_DISABLED) {
 523		ret = -EINVAL;
 524		goto err;
 525	}
 526
 527	ret = __klp_disable_patch(patch);
 
 528
 529err:
 530	mutex_unlock(&klp_mutex);
 531	return ret;
 532}
 533EXPORT_SYMBOL_GPL(klp_disable_patch);
 534
 535static int __klp_enable_patch(struct klp_patch *patch)
 
 536{
 537	struct klp_object *obj;
 538	int ret;
 539
 540	if (WARN_ON(patch->state != KLP_DISABLED))
 541		return -EINVAL;
 542
 543	/* enforce stacking: only the first disabled patch can be enabled */
 544	if (patch->list.prev != &klp_patches &&
 545	    list_prev_entry(patch, list)->state == KLP_DISABLED)
 546		return -EBUSY;
 
 547
 548	pr_notice("enabling patch '%s'\n", patch->mod->name);
 549
 550	klp_for_each_object(patch, obj) {
 551		if (!klp_is_object_loaded(obj))
 552			continue;
 553
 554		ret = klp_enable_object(obj);
 555		if (ret)
 556			goto unregister;
 557	}
 558
 559	patch->state = KLP_ENABLED;
 560
 561	return 0;
 562
 563unregister:
 564	WARN_ON(__klp_disable_patch(patch));
 565	return ret;
 566}
 567
 568/**
 569 * klp_enable_patch() - enables a registered patch
 570 * @patch:	The registered, disabled patch to be enabled
 571 *
 572 * Performs the needed symbol lookups and code relocations,
 573 * then registers the patched functions with ftrace.
 574 *
 575 * Return: 0 on success, otherwise error
 576 */
 577int klp_enable_patch(struct klp_patch *patch)
 578{
 579	int ret;
 580
 581	mutex_lock(&klp_mutex);
 582
 583	if (!klp_is_patch_registered(patch)) {
 584		ret = -EINVAL;
 585		goto err;
 586	}
 587
 588	ret = __klp_enable_patch(patch);
 589
 590err:
 591	mutex_unlock(&klp_mutex);
 592	return ret;
 593}
 594EXPORT_SYMBOL_GPL(klp_enable_patch);
 595
 596/*
 597 * Sysfs Interface
 598 *
 599 * /sys/kernel/livepatch
 600 * /sys/kernel/livepatch/<patch>
 601 * /sys/kernel/livepatch/<patch>/enabled
 602 * /sys/kernel/livepatch/<patch>/<object>
 603 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
 604 */
 605
 606static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 607			     const char *buf, size_t count)
 608{
 609	struct klp_patch *patch;
 610	int ret;
 611	unsigned long val;
 612
 613	ret = kstrtoul(buf, 10, &val);
 614	if (ret)
 615		return -EINVAL;
 616
 617	if (val != KLP_DISABLED && val != KLP_ENABLED)
 618		return -EINVAL;
 619
 620	patch = container_of(kobj, struct klp_patch, kobj);
 621
 622	mutex_lock(&klp_mutex);
 623
 624	if (val == patch->state) {
 625		/* already in requested state */
 626		ret = -EINVAL;
 627		goto err;
 628	}
 629
 630	if (val == KLP_ENABLED) {
 631		ret = __klp_enable_patch(patch);
 632		if (ret)
 633			goto err;
 634	} else {
 635		ret = __klp_disable_patch(patch);
 636		if (ret)
 637			goto err;
 638	}
 639
 640	mutex_unlock(&klp_mutex);
 641
 642	return count;
 643
 644err:
 645	mutex_unlock(&klp_mutex);
 646	return ret;
 647}
 648
 649static ssize_t enabled_show(struct kobject *kobj,
 650			    struct kobj_attribute *attr, char *buf)
 651{
 652	struct klp_patch *patch;
 653
 654	patch = container_of(kobj, struct klp_patch, kobj);
 655	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
 656}
 657
 658static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 659static struct attribute *klp_patch_attrs[] = {
 660	&enabled_kobj_attr.attr,
 661	NULL
 662};
 663
 664static void klp_kobj_release_patch(struct kobject *kobj)
 665{
 666	/*
 667	 * Once we have a consistency model we'll need to module_put() the
 668	 * patch module here.  See klp_register_patch() for more details.
 669	 */
 670}
 671
 672static struct kobj_type klp_ktype_patch = {
 673	.release = klp_kobj_release_patch,
 674	.sysfs_ops = &kobj_sysfs_ops,
 675	.default_attrs = klp_patch_attrs,
 676};
 677
 678static void klp_kobj_release_object(struct kobject *kobj)
 679{
 
 
 
 
 
 
 680}
 681
 682static struct kobj_type klp_ktype_object = {
 683	.release = klp_kobj_release_object,
 684	.sysfs_ops = &kobj_sysfs_ops,
 
 685};
 686
 687static void klp_kobj_release_func(struct kobject *kobj)
 688{
 
 
 
 
 
 
 689}
 690
 691static struct kobj_type klp_ktype_func = {
 692	.release = klp_kobj_release_func,
 693	.sysfs_ops = &kobj_sysfs_ops,
 694};
 695
 696/*
 697 * Free all functions' kobjects in the array up to some limit. When limit is
 698 * NULL, all kobjects are freed.
 699 */
 700static void klp_free_funcs_limited(struct klp_object *obj,
 701				   struct klp_func *limit)
 702{
 703	struct klp_func *func;
 
 
 
 
 704
 705	for (func = obj->funcs; func->old_name && func != limit; func++)
 706		kobject_put(&func->kobj);
 
 707}
 708
 709/* Clean up when a patched object is unloaded */
 710static void klp_free_object_loaded(struct klp_object *obj)
 711{
 712	struct klp_func *func;
 713
 714	obj->mod = NULL;
 715
 716	klp_for_each_func(obj, func)
 717		func->old_addr = 0;
 
 
 
 
 718}
 719
 720/*
 721 * Free all objects' kobjects in the array up to some limit. When limit is
 722 * NULL, all kobjects are freed.
 723 */
 724static void klp_free_objects_limited(struct klp_patch *patch,
 725				     struct klp_object *limit)
 726{
 727	struct klp_object *obj;
 
 
 
 
 
 
 728
 729	for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
 730		klp_free_funcs_limited(obj, NULL);
 731		kobject_put(&obj->kobj);
 732	}
 733}
 734
 735static void klp_free_patch(struct klp_patch *patch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 736{
 737	klp_free_objects_limited(patch, NULL);
 738	if (!list_empty(&patch->list))
 739		list_del(&patch->list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740	kobject_put(&patch->kobj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 741}
 742
 743static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 744{
 745	if (!func->old_name || !func->new_func)
 
 
 
 
 
 
 
 
 
 
 746		return -EINVAL;
 747
 748	INIT_LIST_HEAD(&func->stack_node);
 749	func->state = KLP_DISABLED;
 
 750
 751	/* The format for the sysfs directory is <function,sympos> where sympos
 752	 * is the nth occurrence of this symbol in kallsyms for the patched
 753	 * object. If the user selects 0 for old_sympos, then 1 will be used
 754	 * since a unique symbol will be the first occurrence.
 755	 */
 756	return kobject_init_and_add(&func->kobj, &klp_ktype_func,
 757				    &obj->kobj, "%s,%lu", func->old_name,
 758				    func->old_sympos ? func->old_sympos : 1);
 759}
 760
 761/* Arches may override this to finish any remaining arch-specific tasks */
 762void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
 763					struct klp_object *obj)
 764{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 765}
 766
 767/* parts of the initialization that is done only when the object is loaded */
 768static int klp_init_object_loaded(struct klp_patch *patch,
 769				  struct klp_object *obj)
 770{
 771	struct klp_func *func;
 772	int ret;
 773
 774	module_disable_ro(patch->mod);
 775	ret = klp_write_object_relocations(patch->mod, obj);
 776	if (ret) {
 777		module_enable_ro(patch->mod, true);
 778		return ret;
 
 
 
 
 
 779	}
 780
 781	arch_klp_init_object_loaded(patch, obj);
 782	module_enable_ro(patch->mod, true);
 783
 784	klp_for_each_func(obj, func) {
 785		ret = klp_find_object_symbol(obj->name, func->old_name,
 786					     func->old_sympos,
 787					     &func->old_addr);
 788		if (ret)
 789			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 790	}
 791
 792	return 0;
 793}
 794
 795static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 796{
 797	struct klp_func *func;
 798	int ret;
 799	const char *name;
 800
 801	if (!obj->funcs)
 802		return -EINVAL;
 803
 804	obj->state = KLP_DISABLED;
 805	obj->mod = NULL;
 806
 807	klp_find_object_module(obj);
 808
 809	name = klp_is_module(obj) ? obj->name : "vmlinux";
 810	ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
 811				   &patch->kobj, "%s", name);
 812	if (ret)
 813		return ret;
 814
 815	klp_for_each_func(obj, func) {
 816		ret = klp_init_func(obj, func);
 817		if (ret)
 818			goto free;
 819	}
 820
 821	if (klp_is_object_loaded(obj)) {
 822		ret = klp_init_object_loaded(patch, obj);
 823		if (ret)
 824			goto free;
 825	}
 826
 827	return 0;
 
 
 
 
 
 
 
 
 828
 829free:
 830	klp_free_funcs_limited(obj, func);
 831	kobject_put(&obj->kobj);
 832	return ret;
 
 
 833}
 834
 835static int klp_init_patch(struct klp_patch *patch)
 836{
 837	struct klp_object *obj;
 838	int ret;
 
 
 
 
 
 
 
 
 839
 840	if (!patch->objs)
 841		return -EINVAL;
 842
 843	mutex_lock(&klp_mutex);
 
 
 
 
 844
 845	patch->state = KLP_DISABLED;
 
 
 
 846
 847	ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
 848				   klp_root_kobj, "%s", patch->mod->name);
 849	if (ret)
 850		goto unlock;
 
 
 
 
 
 
 851
 852	klp_for_each_object(patch, obj) {
 853		ret = klp_init_object(patch, obj);
 854		if (ret)
 855			goto free;
 856	}
 857
 858	list_add_tail(&patch->list, &klp_patches);
 859
 860	mutex_unlock(&klp_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 861
 862	return 0;
 863
 864free:
 865	klp_free_objects_limited(patch, obj);
 866	kobject_put(&patch->kobj);
 867unlock:
 868	mutex_unlock(&klp_mutex);
 869	return ret;
 870}
 871
 872/**
 873 * klp_unregister_patch() - unregisters a patch
 874 * @patch:	Disabled patch to be unregistered
 875 *
 876 * Frees the data structures and removes the sysfs interface.
 877 *
 878 * Return: 0 on success, otherwise error
 879 */
 880int klp_unregister_patch(struct klp_patch *patch)
 881{
 882	int ret = 0;
 
 
 
 
 883
 884	mutex_lock(&klp_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 885
 886	if (!klp_is_patch_registered(patch)) {
 887		ret = -EINVAL;
 888		goto out;
 
 
 
 889	}
 890
 891	if (patch->state == KLP_ENABLED) {
 892		ret = -EBUSY;
 893		goto out;
 894	}
 895
 896	klp_free_patch(patch);
 
 
 897
 898out:
 899	mutex_unlock(&klp_mutex);
 900	return ret;
 901}
 902EXPORT_SYMBOL_GPL(klp_unregister_patch);
 903
 904/**
 905 * klp_register_patch() - registers a patch
 906 * @patch:	Patch to be registered
 907 *
 908 * Initializes the data structure associated with the patch and
 909 * creates the sysfs interface.
 
 
 
 
 910 *
 911 * Return: 0 on success, otherwise error
 912 */
 913int klp_register_patch(struct klp_patch *patch)
 914{
 915	int ret;
 
 916
 917	if (!patch || !patch->mod)
 918		return -EINVAL;
 919
 
 
 
 
 
 
 920	if (!is_livepatch_module(patch->mod)) {
 921		pr_err("module %s is not marked as a livepatch module",
 922		       patch->mod->name);
 923		return -EINVAL;
 924	}
 925
 926	if (!klp_initialized())
 927		return -ENODEV;
 928
 929	/*
 930	 * A reference is taken on the patch module to prevent it from being
 931	 * unloaded.  Right now, we don't allow patch modules to unload since
 932	 * there is currently no method to determine if a thread is still
 933	 * running in the patched code contained in the patch module once
 934	 * the ftrace registration is successful.
 935	 */
 936	if (!try_module_get(patch->mod))
 
 
 
 
 
 
 
 
 937		return -ENODEV;
 
 
 
 938
 939	ret = klp_init_patch(patch);
 940	if (ret)
 941		module_put(patch->mod);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 942
 943	return ret;
 944}
 945EXPORT_SYMBOL_GPL(klp_register_patch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946
 947int klp_module_coming(struct module *mod)
 948{
 949	int ret;
 950	struct klp_patch *patch;
 951	struct klp_object *obj;
 952
 953	if (WARN_ON(mod->state != MODULE_STATE_COMING))
 954		return -EINVAL;
 955
 
 
 
 
 
 956	mutex_lock(&klp_mutex);
 957	/*
 958	 * Each module has to know that klp_module_coming()
 959	 * has been called. We never know what module will
 960	 * get patched by a new patch.
 961	 */
 962	mod->klp_alive = true;
 963
 964	list_for_each_entry(patch, &klp_patches, list) {
 965		klp_for_each_object(patch, obj) {
 966			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
 967				continue;
 968
 969			obj->mod = mod;
 970
 971			ret = klp_init_object_loaded(patch, obj);
 972			if (ret) {
 973				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
 974					patch->mod->name, obj->mod->name, ret);
 975				goto err;
 976			}
 977
 978			if (patch->state == KLP_DISABLED)
 979				break;
 980
 981			pr_notice("applying patch '%s' to loading module '%s'\n",
 982				  patch->mod->name, obj->mod->name);
 983
 984			ret = klp_enable_object(obj);
 
 
 
 
 
 
 
 985			if (ret) {
 986				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
 987					patch->mod->name, obj->mod->name, ret);
 
 
 988				goto err;
 989			}
 990
 
 
 
 991			break;
 992		}
 993	}
 994
 995	mutex_unlock(&klp_mutex);
 996
 997	return 0;
 998
 999err:
1000	/*
1001	 * If a patch is unsuccessfully applied, return
1002	 * error to the module loader.
1003	 */
1004	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1005		patch->mod->name, obj->mod->name, obj->mod->name);
1006	mod->klp_alive = false;
1007	klp_free_object_loaded(obj);
 
1008	mutex_unlock(&klp_mutex);
1009
1010	return ret;
1011}
1012
1013void klp_module_going(struct module *mod)
1014{
1015	struct klp_patch *patch;
1016	struct klp_object *obj;
1017
1018	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1019		    mod->state != MODULE_STATE_COMING))
1020		return;
1021
1022	mutex_lock(&klp_mutex);
1023	/*
1024	 * Each module has to know that klp_module_going()
1025	 * has been called. We never know what module will
1026	 * get patched by a new patch.
1027	 */
1028	mod->klp_alive = false;
1029
1030	list_for_each_entry(patch, &klp_patches, list) {
1031		klp_for_each_object(patch, obj) {
1032			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1033				continue;
1034
1035			if (patch->state != KLP_DISABLED) {
1036				pr_notice("reverting patch '%s' on unloading module '%s'\n",
1037					  patch->mod->name, obj->mod->name);
1038				klp_disable_object(obj);
1039			}
1040
1041			klp_free_object_loaded(obj);
1042			break;
1043		}
1044	}
1045
1046	mutex_unlock(&klp_mutex);
1047}
1048
1049static int __init klp_init(void)
1050{
1051	int ret;
1052
1053	ret = klp_check_compiler_support();
1054	if (ret) {
1055		pr_info("Your compiler is too old; turning off.\n");
1056		return -EINVAL;
1057	}
1058
1059	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1060	if (!klp_root_kobj)
1061		return -ENOMEM;
1062
1063	return 0;
1064}
1065
1066module_init(klp_init);