Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * core.c - Kernel Live Patching Core
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/mutex.h>
14#include <linux/slab.h>
15#include <linux/list.h>
16#include <linux/kallsyms.h>
17#include <linux/livepatch.h>
18#include <linux/elf.h>
19#include <linux/moduleloader.h>
20#include <linux/completion.h>
21#include <linux/memory.h>
22#include <asm/cacheflush.h>
23#include "core.h"
24#include "patch.h"
25#include "transition.h"
26
27/*
28 * klp_mutex is a coarse lock which serializes access to klp data. All
29 * accesses to klp-related variables and structures must have mutex protection,
30 * except within the following functions which carefully avoid the need for it:
31 *
32 * - klp_ftrace_handler()
33 * - klp_update_patch_state()
34 */
35DEFINE_MUTEX(klp_mutex);
36
37/*
38 * Actively used patches: enabled or in transition. Note that replaced
39 * or disabled patches are not listed even though the related kernel
40 * module still can be loaded.
41 */
42LIST_HEAD(klp_patches);
43
44static struct kobject *klp_root_kobj;
45
46static bool klp_is_module(struct klp_object *obj)
47{
48 return obj->name;
49}
50
51/* sets obj->mod if object is not vmlinux and module is found */
52static void klp_find_object_module(struct klp_object *obj)
53{
54 struct module *mod;
55
56 if (!klp_is_module(obj))
57 return;
58
59 mutex_lock(&module_mutex);
60 /*
61 * We do not want to block removal of patched modules and therefore
62 * we do not take a reference here. The patches are removed by
63 * klp_module_going() instead.
64 */
65 mod = find_module(obj->name);
66 /*
67 * Do not mess work of klp_module_coming() and klp_module_going().
68 * Note that the patch might still be needed before klp_module_going()
69 * is called. Module functions can be called even in the GOING state
70 * until mod->exit() finishes. This is especially important for
71 * patches that modify semantic of the functions.
72 */
73 if (mod && mod->klp_alive)
74 obj->mod = mod;
75
76 mutex_unlock(&module_mutex);
77}
78
79static bool klp_initialized(void)
80{
81 return !!klp_root_kobj;
82}
83
84static struct klp_func *klp_find_func(struct klp_object *obj,
85 struct klp_func *old_func)
86{
87 struct klp_func *func;
88
89 klp_for_each_func(obj, func) {
90 if ((strcmp(old_func->old_name, func->old_name) == 0) &&
91 (old_func->old_sympos == func->old_sympos)) {
92 return func;
93 }
94 }
95
96 return NULL;
97}
98
99static struct klp_object *klp_find_object(struct klp_patch *patch,
100 struct klp_object *old_obj)
101{
102 struct klp_object *obj;
103
104 klp_for_each_object(patch, obj) {
105 if (klp_is_module(old_obj)) {
106 if (klp_is_module(obj) &&
107 strcmp(old_obj->name, obj->name) == 0) {
108 return obj;
109 }
110 } else if (!klp_is_module(obj)) {
111 return obj;
112 }
113 }
114
115 return NULL;
116}
117
118struct klp_find_arg {
119 const char *objname;
120 const char *name;
121 unsigned long addr;
122 unsigned long count;
123 unsigned long pos;
124};
125
126static int klp_find_callback(void *data, const char *name,
127 struct module *mod, unsigned long addr)
128{
129 struct klp_find_arg *args = data;
130
131 if ((mod && !args->objname) || (!mod && args->objname))
132 return 0;
133
134 if (strcmp(args->name, name))
135 return 0;
136
137 if (args->objname && strcmp(args->objname, mod->name))
138 return 0;
139
140 args->addr = addr;
141 args->count++;
142
143 /*
144 * Finish the search when the symbol is found for the desired position
145 * or the position is not defined for a non-unique symbol.
146 */
147 if ((args->pos && (args->count == args->pos)) ||
148 (!args->pos && (args->count > 1)))
149 return 1;
150
151 return 0;
152}
153
154static int klp_find_object_symbol(const char *objname, const char *name,
155 unsigned long sympos, unsigned long *addr)
156{
157 struct klp_find_arg args = {
158 .objname = objname,
159 .name = name,
160 .addr = 0,
161 .count = 0,
162 .pos = sympos,
163 };
164
165 mutex_lock(&module_mutex);
166 if (objname)
167 module_kallsyms_on_each_symbol(klp_find_callback, &args);
168 else
169 kallsyms_on_each_symbol(klp_find_callback, &args);
170 mutex_unlock(&module_mutex);
171
172 /*
173 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
174 * otherwise ensure the symbol position count matches sympos.
175 */
176 if (args.addr == 0)
177 pr_err("symbol '%s' not found in symbol table\n", name);
178 else if (args.count > 1 && sympos == 0) {
179 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
180 name, objname);
181 } else if (sympos != args.count && sympos > 0) {
182 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
183 sympos, name, objname ? objname : "vmlinux");
184 } else {
185 *addr = args.addr;
186 return 0;
187 }
188
189 *addr = 0;
190 return -EINVAL;
191}
192
193static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
194{
195 int i, cnt, vmlinux, ret;
196 char objname[MODULE_NAME_LEN];
197 char symname[KSYM_NAME_LEN];
198 char *strtab = pmod->core_kallsyms.strtab;
199 Elf_Rela *relas;
200 Elf_Sym *sym;
201 unsigned long sympos, addr;
202
203 /*
204 * Since the field widths for objname and symname in the sscanf()
205 * call are hard-coded and correspond to MODULE_NAME_LEN and
206 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
207 * and KSYM_NAME_LEN have the values we expect them to have.
208 *
209 * Because the value of MODULE_NAME_LEN can differ among architectures,
210 * we use the smallest/strictest upper bound possible (56, based on
211 * the current definition of MODULE_NAME_LEN) to prevent overflows.
212 */
213 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
214
215 relas = (Elf_Rela *) relasec->sh_addr;
216 /* For each rela in this klp relocation section */
217 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
218 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
219 if (sym->st_shndx != SHN_LIVEPATCH) {
220 pr_err("symbol %s is not marked as a livepatch symbol\n",
221 strtab + sym->st_name);
222 return -EINVAL;
223 }
224
225 /* Format: .klp.sym.objname.symname,sympos */
226 cnt = sscanf(strtab + sym->st_name,
227 ".klp.sym.%55[^.].%127[^,],%lu",
228 objname, symname, &sympos);
229 if (cnt != 3) {
230 pr_err("symbol %s has an incorrectly formatted name\n",
231 strtab + sym->st_name);
232 return -EINVAL;
233 }
234
235 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
236 vmlinux = !strcmp(objname, "vmlinux");
237 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
238 symname, sympos, &addr);
239 if (ret)
240 return ret;
241
242 sym->st_value = addr;
243 }
244
245 return 0;
246}
247
248static int klp_write_object_relocations(struct module *pmod,
249 struct klp_object *obj)
250{
251 int i, cnt, ret = 0;
252 const char *objname, *secname;
253 char sec_objname[MODULE_NAME_LEN];
254 Elf_Shdr *sec;
255
256 if (WARN_ON(!klp_is_object_loaded(obj)))
257 return -EINVAL;
258
259 objname = klp_is_module(obj) ? obj->name : "vmlinux";
260
261 /* For each klp relocation section */
262 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
263 sec = pmod->klp_info->sechdrs + i;
264 secname = pmod->klp_info->secstrings + sec->sh_name;
265 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
266 continue;
267
268 /*
269 * Format: .klp.rela.sec_objname.section_name
270 * See comment in klp_resolve_symbols() for an explanation
271 * of the selected field width value.
272 */
273 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
274 if (cnt != 1) {
275 pr_err("section %s has an incorrectly formatted name\n",
276 secname);
277 ret = -EINVAL;
278 break;
279 }
280
281 if (strcmp(objname, sec_objname))
282 continue;
283
284 ret = klp_resolve_symbols(sec, pmod);
285 if (ret)
286 break;
287
288 ret = apply_relocate_add(pmod->klp_info->sechdrs,
289 pmod->core_kallsyms.strtab,
290 pmod->klp_info->symndx, i, pmod);
291 if (ret)
292 break;
293 }
294
295 return ret;
296}
297
298/*
299 * Sysfs Interface
300 *
301 * /sys/kernel/livepatch
302 * /sys/kernel/livepatch/<patch>
303 * /sys/kernel/livepatch/<patch>/enabled
304 * /sys/kernel/livepatch/<patch>/transition
305 * /sys/kernel/livepatch/<patch>/force
306 * /sys/kernel/livepatch/<patch>/<object>
307 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
308 */
309static int __klp_disable_patch(struct klp_patch *patch);
310
311static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
312 const char *buf, size_t count)
313{
314 struct klp_patch *patch;
315 int ret;
316 bool enabled;
317
318 ret = kstrtobool(buf, &enabled);
319 if (ret)
320 return ret;
321
322 patch = container_of(kobj, struct klp_patch, kobj);
323
324 mutex_lock(&klp_mutex);
325
326 if (patch->enabled == enabled) {
327 /* already in requested state */
328 ret = -EINVAL;
329 goto out;
330 }
331
332 /*
333 * Allow to reverse a pending transition in both ways. It might be
334 * necessary to complete the transition without forcing and breaking
335 * the system integrity.
336 *
337 * Do not allow to re-enable a disabled patch.
338 */
339 if (patch == klp_transition_patch)
340 klp_reverse_transition();
341 else if (!enabled)
342 ret = __klp_disable_patch(patch);
343 else
344 ret = -EINVAL;
345
346out:
347 mutex_unlock(&klp_mutex);
348
349 if (ret)
350 return ret;
351 return count;
352}
353
354static ssize_t enabled_show(struct kobject *kobj,
355 struct kobj_attribute *attr, char *buf)
356{
357 struct klp_patch *patch;
358
359 patch = container_of(kobj, struct klp_patch, kobj);
360 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
361}
362
363static ssize_t transition_show(struct kobject *kobj,
364 struct kobj_attribute *attr, char *buf)
365{
366 struct klp_patch *patch;
367
368 patch = container_of(kobj, struct klp_patch, kobj);
369 return snprintf(buf, PAGE_SIZE-1, "%d\n",
370 patch == klp_transition_patch);
371}
372
373static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
374 const char *buf, size_t count)
375{
376 struct klp_patch *patch;
377 int ret;
378 bool val;
379
380 ret = kstrtobool(buf, &val);
381 if (ret)
382 return ret;
383
384 if (!val)
385 return count;
386
387 mutex_lock(&klp_mutex);
388
389 patch = container_of(kobj, struct klp_patch, kobj);
390 if (patch != klp_transition_patch) {
391 mutex_unlock(&klp_mutex);
392 return -EINVAL;
393 }
394
395 klp_force_transition();
396
397 mutex_unlock(&klp_mutex);
398
399 return count;
400}
401
402static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
403static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
404static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
405static struct attribute *klp_patch_attrs[] = {
406 &enabled_kobj_attr.attr,
407 &transition_kobj_attr.attr,
408 &force_kobj_attr.attr,
409 NULL
410};
411ATTRIBUTE_GROUPS(klp_patch);
412
413static void klp_free_object_dynamic(struct klp_object *obj)
414{
415 kfree(obj->name);
416 kfree(obj);
417}
418
419static void klp_init_func_early(struct klp_object *obj,
420 struct klp_func *func);
421static void klp_init_object_early(struct klp_patch *patch,
422 struct klp_object *obj);
423
424static struct klp_object *klp_alloc_object_dynamic(const char *name,
425 struct klp_patch *patch)
426{
427 struct klp_object *obj;
428
429 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
430 if (!obj)
431 return NULL;
432
433 if (name) {
434 obj->name = kstrdup(name, GFP_KERNEL);
435 if (!obj->name) {
436 kfree(obj);
437 return NULL;
438 }
439 }
440
441 klp_init_object_early(patch, obj);
442 obj->dynamic = true;
443
444 return obj;
445}
446
447static void klp_free_func_nop(struct klp_func *func)
448{
449 kfree(func->old_name);
450 kfree(func);
451}
452
453static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
454 struct klp_object *obj)
455{
456 struct klp_func *func;
457
458 func = kzalloc(sizeof(*func), GFP_KERNEL);
459 if (!func)
460 return NULL;
461
462 if (old_func->old_name) {
463 func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
464 if (!func->old_name) {
465 kfree(func);
466 return NULL;
467 }
468 }
469
470 klp_init_func_early(obj, func);
471 /*
472 * func->new_func is same as func->old_func. These addresses are
473 * set when the object is loaded, see klp_init_object_loaded().
474 */
475 func->old_sympos = old_func->old_sympos;
476 func->nop = true;
477
478 return func;
479}
480
481static int klp_add_object_nops(struct klp_patch *patch,
482 struct klp_object *old_obj)
483{
484 struct klp_object *obj;
485 struct klp_func *func, *old_func;
486
487 obj = klp_find_object(patch, old_obj);
488
489 if (!obj) {
490 obj = klp_alloc_object_dynamic(old_obj->name, patch);
491 if (!obj)
492 return -ENOMEM;
493 }
494
495 klp_for_each_func(old_obj, old_func) {
496 func = klp_find_func(obj, old_func);
497 if (func)
498 continue;
499
500 func = klp_alloc_func_nop(old_func, obj);
501 if (!func)
502 return -ENOMEM;
503 }
504
505 return 0;
506}
507
508/*
509 * Add 'nop' functions which simply return to the caller to run
510 * the original function. The 'nop' functions are added to a
511 * patch to facilitate a 'replace' mode.
512 */
513static int klp_add_nops(struct klp_patch *patch)
514{
515 struct klp_patch *old_patch;
516 struct klp_object *old_obj;
517
518 klp_for_each_patch(old_patch) {
519 klp_for_each_object(old_patch, old_obj) {
520 int err;
521
522 err = klp_add_object_nops(patch, old_obj);
523 if (err)
524 return err;
525 }
526 }
527
528 return 0;
529}
530
531static void klp_kobj_release_patch(struct kobject *kobj)
532{
533 struct klp_patch *patch;
534
535 patch = container_of(kobj, struct klp_patch, kobj);
536 complete(&patch->finish);
537}
538
539static struct kobj_type klp_ktype_patch = {
540 .release = klp_kobj_release_patch,
541 .sysfs_ops = &kobj_sysfs_ops,
542 .default_groups = klp_patch_groups,
543};
544
545static void klp_kobj_release_object(struct kobject *kobj)
546{
547 struct klp_object *obj;
548
549 obj = container_of(kobj, struct klp_object, kobj);
550
551 if (obj->dynamic)
552 klp_free_object_dynamic(obj);
553}
554
555static struct kobj_type klp_ktype_object = {
556 .release = klp_kobj_release_object,
557 .sysfs_ops = &kobj_sysfs_ops,
558};
559
560static void klp_kobj_release_func(struct kobject *kobj)
561{
562 struct klp_func *func;
563
564 func = container_of(kobj, struct klp_func, kobj);
565
566 if (func->nop)
567 klp_free_func_nop(func);
568}
569
570static struct kobj_type klp_ktype_func = {
571 .release = klp_kobj_release_func,
572 .sysfs_ops = &kobj_sysfs_ops,
573};
574
575static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
576{
577 struct klp_func *func, *tmp_func;
578
579 klp_for_each_func_safe(obj, func, tmp_func) {
580 if (nops_only && !func->nop)
581 continue;
582
583 list_del(&func->node);
584 kobject_put(&func->kobj);
585 }
586}
587
588/* Clean up when a patched object is unloaded */
589static void klp_free_object_loaded(struct klp_object *obj)
590{
591 struct klp_func *func;
592
593 obj->mod = NULL;
594
595 klp_for_each_func(obj, func) {
596 func->old_func = NULL;
597
598 if (func->nop)
599 func->new_func = NULL;
600 }
601}
602
603static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
604{
605 struct klp_object *obj, *tmp_obj;
606
607 klp_for_each_object_safe(patch, obj, tmp_obj) {
608 __klp_free_funcs(obj, nops_only);
609
610 if (nops_only && !obj->dynamic)
611 continue;
612
613 list_del(&obj->node);
614 kobject_put(&obj->kobj);
615 }
616}
617
618static void klp_free_objects(struct klp_patch *patch)
619{
620 __klp_free_objects(patch, false);
621}
622
623static void klp_free_objects_dynamic(struct klp_patch *patch)
624{
625 __klp_free_objects(patch, true);
626}
627
628/*
629 * This function implements the free operations that can be called safely
630 * under klp_mutex.
631 *
632 * The operation must be completed by calling klp_free_patch_finish()
633 * outside klp_mutex.
634 */
635void klp_free_patch_start(struct klp_patch *patch)
636{
637 if (!list_empty(&patch->list))
638 list_del(&patch->list);
639
640 klp_free_objects(patch);
641}
642
643/*
644 * This function implements the free part that must be called outside
645 * klp_mutex.
646 *
647 * It must be called after klp_free_patch_start(). And it has to be
648 * the last function accessing the livepatch structures when the patch
649 * gets disabled.
650 */
651static void klp_free_patch_finish(struct klp_patch *patch)
652{
653 /*
654 * Avoid deadlock with enabled_store() sysfs callback by
655 * calling this outside klp_mutex. It is safe because
656 * this is called when the patch gets disabled and it
657 * cannot get enabled again.
658 */
659 kobject_put(&patch->kobj);
660 wait_for_completion(&patch->finish);
661
662 /* Put the module after the last access to struct klp_patch. */
663 if (!patch->forced)
664 module_put(patch->mod);
665}
666
667/*
668 * The livepatch might be freed from sysfs interface created by the patch.
669 * This work allows to wait until the interface is destroyed in a separate
670 * context.
671 */
672static void klp_free_patch_work_fn(struct work_struct *work)
673{
674 struct klp_patch *patch =
675 container_of(work, struct klp_patch, free_work);
676
677 klp_free_patch_finish(patch);
678}
679
680static int klp_init_func(struct klp_object *obj, struct klp_func *func)
681{
682 if (!func->old_name)
683 return -EINVAL;
684
685 /*
686 * NOPs get the address later. The patched module must be loaded,
687 * see klp_init_object_loaded().
688 */
689 if (!func->new_func && !func->nop)
690 return -EINVAL;
691
692 if (strlen(func->old_name) >= KSYM_NAME_LEN)
693 return -EINVAL;
694
695 INIT_LIST_HEAD(&func->stack_node);
696 func->patched = false;
697 func->transition = false;
698
699 /* The format for the sysfs directory is <function,sympos> where sympos
700 * is the nth occurrence of this symbol in kallsyms for the patched
701 * object. If the user selects 0 for old_sympos, then 1 will be used
702 * since a unique symbol will be the first occurrence.
703 */
704 return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
705 func->old_name,
706 func->old_sympos ? func->old_sympos : 1);
707}
708
709/* Arches may override this to finish any remaining arch-specific tasks */
710void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
711 struct klp_object *obj)
712{
713}
714
715/* parts of the initialization that is done only when the object is loaded */
716static int klp_init_object_loaded(struct klp_patch *patch,
717 struct klp_object *obj)
718{
719 struct klp_func *func;
720 int ret;
721
722 mutex_lock(&text_mutex);
723
724 module_disable_ro(patch->mod);
725 ret = klp_write_object_relocations(patch->mod, obj);
726 if (ret) {
727 module_enable_ro(patch->mod, true);
728 mutex_unlock(&text_mutex);
729 return ret;
730 }
731
732 arch_klp_init_object_loaded(patch, obj);
733 module_enable_ro(patch->mod, true);
734
735 mutex_unlock(&text_mutex);
736
737 klp_for_each_func(obj, func) {
738 ret = klp_find_object_symbol(obj->name, func->old_name,
739 func->old_sympos,
740 (unsigned long *)&func->old_func);
741 if (ret)
742 return ret;
743
744 ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
745 &func->old_size, NULL);
746 if (!ret) {
747 pr_err("kallsyms size lookup failed for '%s'\n",
748 func->old_name);
749 return -ENOENT;
750 }
751
752 if (func->nop)
753 func->new_func = func->old_func;
754
755 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
756 &func->new_size, NULL);
757 if (!ret) {
758 pr_err("kallsyms size lookup failed for '%s' replacement\n",
759 func->old_name);
760 return -ENOENT;
761 }
762 }
763
764 return 0;
765}
766
767static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
768{
769 struct klp_func *func;
770 int ret;
771 const char *name;
772
773 if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
774 return -EINVAL;
775
776 obj->patched = false;
777 obj->mod = NULL;
778
779 klp_find_object_module(obj);
780
781 name = klp_is_module(obj) ? obj->name : "vmlinux";
782 ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
783 if (ret)
784 return ret;
785
786 klp_for_each_func(obj, func) {
787 ret = klp_init_func(obj, func);
788 if (ret)
789 return ret;
790 }
791
792 if (klp_is_object_loaded(obj))
793 ret = klp_init_object_loaded(patch, obj);
794
795 return ret;
796}
797
798static void klp_init_func_early(struct klp_object *obj,
799 struct klp_func *func)
800{
801 kobject_init(&func->kobj, &klp_ktype_func);
802 list_add_tail(&func->node, &obj->func_list);
803}
804
805static void klp_init_object_early(struct klp_patch *patch,
806 struct klp_object *obj)
807{
808 INIT_LIST_HEAD(&obj->func_list);
809 kobject_init(&obj->kobj, &klp_ktype_object);
810 list_add_tail(&obj->node, &patch->obj_list);
811}
812
813static int klp_init_patch_early(struct klp_patch *patch)
814{
815 struct klp_object *obj;
816 struct klp_func *func;
817
818 if (!patch->objs)
819 return -EINVAL;
820
821 INIT_LIST_HEAD(&patch->list);
822 INIT_LIST_HEAD(&patch->obj_list);
823 kobject_init(&patch->kobj, &klp_ktype_patch);
824 patch->enabled = false;
825 patch->forced = false;
826 INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
827 init_completion(&patch->finish);
828
829 klp_for_each_object_static(patch, obj) {
830 if (!obj->funcs)
831 return -EINVAL;
832
833 klp_init_object_early(patch, obj);
834
835 klp_for_each_func_static(obj, func) {
836 klp_init_func_early(obj, func);
837 }
838 }
839
840 if (!try_module_get(patch->mod))
841 return -ENODEV;
842
843 return 0;
844}
845
846static int klp_init_patch(struct klp_patch *patch)
847{
848 struct klp_object *obj;
849 int ret;
850
851 ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
852 if (ret)
853 return ret;
854
855 if (patch->replace) {
856 ret = klp_add_nops(patch);
857 if (ret)
858 return ret;
859 }
860
861 klp_for_each_object(patch, obj) {
862 ret = klp_init_object(patch, obj);
863 if (ret)
864 return ret;
865 }
866
867 list_add_tail(&patch->list, &klp_patches);
868
869 return 0;
870}
871
872static int __klp_disable_patch(struct klp_patch *patch)
873{
874 struct klp_object *obj;
875
876 if (WARN_ON(!patch->enabled))
877 return -EINVAL;
878
879 if (klp_transition_patch)
880 return -EBUSY;
881
882 klp_init_transition(patch, KLP_UNPATCHED);
883
884 klp_for_each_object(patch, obj)
885 if (obj->patched)
886 klp_pre_unpatch_callback(obj);
887
888 /*
889 * Enforce the order of the func->transition writes in
890 * klp_init_transition() and the TIF_PATCH_PENDING writes in
891 * klp_start_transition(). In the rare case where klp_ftrace_handler()
892 * is called shortly after klp_update_patch_state() switches the task,
893 * this ensures the handler sees that func->transition is set.
894 */
895 smp_wmb();
896
897 klp_start_transition();
898 patch->enabled = false;
899 klp_try_complete_transition();
900
901 return 0;
902}
903
904static int __klp_enable_patch(struct klp_patch *patch)
905{
906 struct klp_object *obj;
907 int ret;
908
909 if (klp_transition_patch)
910 return -EBUSY;
911
912 if (WARN_ON(patch->enabled))
913 return -EINVAL;
914
915 pr_notice("enabling patch '%s'\n", patch->mod->name);
916
917 klp_init_transition(patch, KLP_PATCHED);
918
919 /*
920 * Enforce the order of the func->transition writes in
921 * klp_init_transition() and the ops->func_stack writes in
922 * klp_patch_object(), so that klp_ftrace_handler() will see the
923 * func->transition updates before the handler is registered and the
924 * new funcs become visible to the handler.
925 */
926 smp_wmb();
927
928 klp_for_each_object(patch, obj) {
929 if (!klp_is_object_loaded(obj))
930 continue;
931
932 ret = klp_pre_patch_callback(obj);
933 if (ret) {
934 pr_warn("pre-patch callback failed for object '%s'\n",
935 klp_is_module(obj) ? obj->name : "vmlinux");
936 goto err;
937 }
938
939 ret = klp_patch_object(obj);
940 if (ret) {
941 pr_warn("failed to patch object '%s'\n",
942 klp_is_module(obj) ? obj->name : "vmlinux");
943 goto err;
944 }
945 }
946
947 klp_start_transition();
948 patch->enabled = true;
949 klp_try_complete_transition();
950
951 return 0;
952err:
953 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
954
955 klp_cancel_transition();
956 return ret;
957}
958
959/**
960 * klp_enable_patch() - enable the livepatch
961 * @patch: patch to be enabled
962 *
963 * Initializes the data structure associated with the patch, creates the sysfs
964 * interface, performs the needed symbol lookups and code relocations,
965 * registers the patched functions with ftrace.
966 *
967 * This function is supposed to be called from the livepatch module_init()
968 * callback.
969 *
970 * Return: 0 on success, otherwise error
971 */
972int klp_enable_patch(struct klp_patch *patch)
973{
974 int ret;
975
976 if (!patch || !patch->mod)
977 return -EINVAL;
978
979 if (!is_livepatch_module(patch->mod)) {
980 pr_err("module %s is not marked as a livepatch module\n",
981 patch->mod->name);
982 return -EINVAL;
983 }
984
985 if (!klp_initialized())
986 return -ENODEV;
987
988 if (!klp_have_reliable_stack()) {
989 pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
990 pr_warn("The livepatch transition may never complete.\n");
991 }
992
993 mutex_lock(&klp_mutex);
994
995 ret = klp_init_patch_early(patch);
996 if (ret) {
997 mutex_unlock(&klp_mutex);
998 return ret;
999 }
1000
1001 ret = klp_init_patch(patch);
1002 if (ret)
1003 goto err;
1004
1005 ret = __klp_enable_patch(patch);
1006 if (ret)
1007 goto err;
1008
1009 mutex_unlock(&klp_mutex);
1010
1011 return 0;
1012
1013err:
1014 klp_free_patch_start(patch);
1015
1016 mutex_unlock(&klp_mutex);
1017
1018 klp_free_patch_finish(patch);
1019
1020 return ret;
1021}
1022EXPORT_SYMBOL_GPL(klp_enable_patch);
1023
1024/*
1025 * This function removes replaced patches.
1026 *
1027 * We could be pretty aggressive here. It is called in the situation where
1028 * these structures are no longer accessible. All functions are redirected
1029 * by the klp_transition_patch. They use either a new code or they are in
1030 * the original code because of the special nop function patches.
1031 *
1032 * The only exception is when the transition was forced. In this case,
1033 * klp_ftrace_handler() might still see the replaced patch on the stack.
1034 * Fortunately, it is carefully designed to work with removed functions
1035 * thanks to RCU. We only have to keep the patches on the system. Also
1036 * this is handled transparently by patch->module_put.
1037 */
1038void klp_discard_replaced_patches(struct klp_patch *new_patch)
1039{
1040 struct klp_patch *old_patch, *tmp_patch;
1041
1042 klp_for_each_patch_safe(old_patch, tmp_patch) {
1043 if (old_patch == new_patch)
1044 return;
1045
1046 old_patch->enabled = false;
1047 klp_unpatch_objects(old_patch);
1048 klp_free_patch_start(old_patch);
1049 schedule_work(&old_patch->free_work);
1050 }
1051}
1052
1053/*
1054 * This function removes the dynamically allocated 'nop' functions.
1055 *
1056 * We could be pretty aggressive. NOPs do not change the existing
1057 * behavior except for adding unnecessary delay by the ftrace handler.
1058 *
1059 * It is safe even when the transition was forced. The ftrace handler
1060 * will see a valid ops->func_stack entry thanks to RCU.
1061 *
1062 * We could even free the NOPs structures. They must be the last entry
1063 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1064 * It does the same as klp_synchronize_transition() to make sure that
1065 * nobody is inside the ftrace handler once the operation finishes.
1066 *
1067 * IMPORTANT: It must be called right after removing the replaced patches!
1068 */
1069void klp_discard_nops(struct klp_patch *new_patch)
1070{
1071 klp_unpatch_objects_dynamic(klp_transition_patch);
1072 klp_free_objects_dynamic(klp_transition_patch);
1073}
1074
1075/*
1076 * Remove parts of patches that touch a given kernel module. The list of
1077 * patches processed might be limited. When limit is NULL, all patches
1078 * will be handled.
1079 */
1080static void klp_cleanup_module_patches_limited(struct module *mod,
1081 struct klp_patch *limit)
1082{
1083 struct klp_patch *patch;
1084 struct klp_object *obj;
1085
1086 klp_for_each_patch(patch) {
1087 if (patch == limit)
1088 break;
1089
1090 klp_for_each_object(patch, obj) {
1091 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1092 continue;
1093
1094 if (patch != klp_transition_patch)
1095 klp_pre_unpatch_callback(obj);
1096
1097 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1098 patch->mod->name, obj->mod->name);
1099 klp_unpatch_object(obj);
1100
1101 klp_post_unpatch_callback(obj);
1102
1103 klp_free_object_loaded(obj);
1104 break;
1105 }
1106 }
1107}
1108
1109int klp_module_coming(struct module *mod)
1110{
1111 int ret;
1112 struct klp_patch *patch;
1113 struct klp_object *obj;
1114
1115 if (WARN_ON(mod->state != MODULE_STATE_COMING))
1116 return -EINVAL;
1117
1118 mutex_lock(&klp_mutex);
1119 /*
1120 * Each module has to know that klp_module_coming()
1121 * has been called. We never know what module will
1122 * get patched by a new patch.
1123 */
1124 mod->klp_alive = true;
1125
1126 klp_for_each_patch(patch) {
1127 klp_for_each_object(patch, obj) {
1128 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1129 continue;
1130
1131 obj->mod = mod;
1132
1133 ret = klp_init_object_loaded(patch, obj);
1134 if (ret) {
1135 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1136 patch->mod->name, obj->mod->name, ret);
1137 goto err;
1138 }
1139
1140 pr_notice("applying patch '%s' to loading module '%s'\n",
1141 patch->mod->name, obj->mod->name);
1142
1143 ret = klp_pre_patch_callback(obj);
1144 if (ret) {
1145 pr_warn("pre-patch callback failed for object '%s'\n",
1146 obj->name);
1147 goto err;
1148 }
1149
1150 ret = klp_patch_object(obj);
1151 if (ret) {
1152 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1153 patch->mod->name, obj->mod->name, ret);
1154
1155 klp_post_unpatch_callback(obj);
1156 goto err;
1157 }
1158
1159 if (patch != klp_transition_patch)
1160 klp_post_patch_callback(obj);
1161
1162 break;
1163 }
1164 }
1165
1166 mutex_unlock(&klp_mutex);
1167
1168 return 0;
1169
1170err:
1171 /*
1172 * If a patch is unsuccessfully applied, return
1173 * error to the module loader.
1174 */
1175 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1176 patch->mod->name, obj->mod->name, obj->mod->name);
1177 mod->klp_alive = false;
1178 obj->mod = NULL;
1179 klp_cleanup_module_patches_limited(mod, patch);
1180 mutex_unlock(&klp_mutex);
1181
1182 return ret;
1183}
1184
1185void klp_module_going(struct module *mod)
1186{
1187 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1188 mod->state != MODULE_STATE_COMING))
1189 return;
1190
1191 mutex_lock(&klp_mutex);
1192 /*
1193 * Each module has to know that klp_module_going()
1194 * has been called. We never know what module will
1195 * get patched by a new patch.
1196 */
1197 mod->klp_alive = false;
1198
1199 klp_cleanup_module_patches_limited(mod, NULL);
1200
1201 mutex_unlock(&klp_mutex);
1202}
1203
1204static int __init klp_init(void)
1205{
1206 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1207 if (!klp_root_kobj)
1208 return -ENOMEM;
1209
1210 return 0;
1211}
1212
1213module_init(klp_init);
1/*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/ftrace.h>
28#include <linux/list.h>
29#include <linux/kallsyms.h>
30#include <linux/livepatch.h>
31#include <asm/cacheflush.h>
32
33/**
34 * struct klp_ops - structure for tracking registered ftrace ops structs
35 *
36 * A single ftrace_ops is shared between all enabled replacement functions
37 * (klp_func structs) which have the same old_addr. This allows the switch
38 * between function versions to happen instantaneously by updating the klp_ops
39 * struct's func_stack list. The winner is the klp_func at the top of the
40 * func_stack (front of the list).
41 *
42 * @node: node for the global klp_ops list
43 * @func_stack: list head for the stack of klp_func's (active func is on top)
44 * @fops: registered ftrace ops struct
45 */
46struct klp_ops {
47 struct list_head node;
48 struct list_head func_stack;
49 struct ftrace_ops fops;
50};
51
52/*
53 * The klp_mutex protects the global lists and state transitions of any
54 * structure reachable from them. References to any structure must be obtained
55 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
56 * ensure it gets consistent data).
57 */
58static DEFINE_MUTEX(klp_mutex);
59
60static LIST_HEAD(klp_patches);
61static LIST_HEAD(klp_ops);
62
63static struct kobject *klp_root_kobj;
64
65static struct klp_ops *klp_find_ops(unsigned long old_addr)
66{
67 struct klp_ops *ops;
68 struct klp_func *func;
69
70 list_for_each_entry(ops, &klp_ops, node) {
71 func = list_first_entry(&ops->func_stack, struct klp_func,
72 stack_node);
73 if (func->old_addr == old_addr)
74 return ops;
75 }
76
77 return NULL;
78}
79
80static bool klp_is_module(struct klp_object *obj)
81{
82 return obj->name;
83}
84
85static bool klp_is_object_loaded(struct klp_object *obj)
86{
87 return !obj->name || obj->mod;
88}
89
90/* sets obj->mod if object is not vmlinux and module is found */
91static void klp_find_object_module(struct klp_object *obj)
92{
93 struct module *mod;
94
95 if (!klp_is_module(obj))
96 return;
97
98 mutex_lock(&module_mutex);
99 /*
100 * We do not want to block removal of patched modules and therefore
101 * we do not take a reference here. The patches are removed by
102 * klp_module_going() instead.
103 */
104 mod = find_module(obj->name);
105 /*
106 * Do not mess work of klp_module_coming() and klp_module_going().
107 * Note that the patch might still be needed before klp_module_going()
108 * is called. Module functions can be called even in the GOING state
109 * until mod->exit() finishes. This is especially important for
110 * patches that modify semantic of the functions.
111 */
112 if (mod && mod->klp_alive)
113 obj->mod = mod;
114
115 mutex_unlock(&module_mutex);
116}
117
118/* klp_mutex must be held by caller */
119static bool klp_is_patch_registered(struct klp_patch *patch)
120{
121 struct klp_patch *mypatch;
122
123 list_for_each_entry(mypatch, &klp_patches, list)
124 if (mypatch == patch)
125 return true;
126
127 return false;
128}
129
130static bool klp_initialized(void)
131{
132 return !!klp_root_kobj;
133}
134
135struct klp_find_arg {
136 const char *objname;
137 const char *name;
138 unsigned long addr;
139 unsigned long count;
140 unsigned long pos;
141};
142
143static int klp_find_callback(void *data, const char *name,
144 struct module *mod, unsigned long addr)
145{
146 struct klp_find_arg *args = data;
147
148 if ((mod && !args->objname) || (!mod && args->objname))
149 return 0;
150
151 if (strcmp(args->name, name))
152 return 0;
153
154 if (args->objname && strcmp(args->objname, mod->name))
155 return 0;
156
157 args->addr = addr;
158 args->count++;
159
160 /*
161 * Finish the search when the symbol is found for the desired position
162 * or the position is not defined for a non-unique symbol.
163 */
164 if ((args->pos && (args->count == args->pos)) ||
165 (!args->pos && (args->count > 1)))
166 return 1;
167
168 return 0;
169}
170
171static int klp_find_object_symbol(const char *objname, const char *name,
172 unsigned long sympos, unsigned long *addr)
173{
174 struct klp_find_arg args = {
175 .objname = objname,
176 .name = name,
177 .addr = 0,
178 .count = 0,
179 .pos = sympos,
180 };
181
182 mutex_lock(&module_mutex);
183 kallsyms_on_each_symbol(klp_find_callback, &args);
184 mutex_unlock(&module_mutex);
185
186 /*
187 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
188 * otherwise ensure the symbol position count matches sympos.
189 */
190 if (args.addr == 0)
191 pr_err("symbol '%s' not found in symbol table\n", name);
192 else if (args.count > 1 && sympos == 0) {
193 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
194 name, objname);
195 } else if (sympos != args.count && sympos > 0) {
196 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
197 sympos, name, objname ? objname : "vmlinux");
198 } else {
199 *addr = args.addr;
200 return 0;
201 }
202
203 *addr = 0;
204 return -EINVAL;
205}
206
207/*
208 * external symbols are located outside the parent object (where the parent
209 * object is either vmlinux or the kmod being patched).
210 */
211static int klp_find_external_symbol(struct module *pmod, const char *name,
212 unsigned long *addr)
213{
214 const struct kernel_symbol *sym;
215
216 /* first, check if it's an exported symbol */
217 preempt_disable();
218 sym = find_symbol(name, NULL, NULL, true, true);
219 if (sym) {
220 *addr = sym->value;
221 preempt_enable();
222 return 0;
223 }
224 preempt_enable();
225
226 /*
227 * Check if it's in another .o within the patch module. This also
228 * checks that the external symbol is unique.
229 */
230 return klp_find_object_symbol(pmod->name, name, 0, addr);
231}
232
233static int klp_write_object_relocations(struct module *pmod,
234 struct klp_object *obj)
235{
236 int ret = 0;
237 unsigned long val;
238 struct klp_reloc *reloc;
239
240 if (WARN_ON(!klp_is_object_loaded(obj)))
241 return -EINVAL;
242
243 if (WARN_ON(!obj->relocs))
244 return -EINVAL;
245
246 module_disable_ro(pmod);
247
248 for (reloc = obj->relocs; reloc->name; reloc++) {
249 /* discover the address of the referenced symbol */
250 if (reloc->external) {
251 if (reloc->sympos > 0) {
252 pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n",
253 reloc->name);
254 ret = -EINVAL;
255 goto out;
256 }
257 ret = klp_find_external_symbol(pmod, reloc->name, &val);
258 } else
259 ret = klp_find_object_symbol(obj->name,
260 reloc->name,
261 reloc->sympos,
262 &val);
263 if (ret)
264 goto out;
265
266 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
267 val + reloc->addend);
268 if (ret) {
269 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
270 reloc->name, val, ret);
271 goto out;
272 }
273 }
274
275out:
276 module_enable_ro(pmod);
277 return ret;
278}
279
280static void notrace klp_ftrace_handler(unsigned long ip,
281 unsigned long parent_ip,
282 struct ftrace_ops *fops,
283 struct pt_regs *regs)
284{
285 struct klp_ops *ops;
286 struct klp_func *func;
287
288 ops = container_of(fops, struct klp_ops, fops);
289
290 rcu_read_lock();
291 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
292 stack_node);
293 if (WARN_ON_ONCE(!func))
294 goto unlock;
295
296 klp_arch_set_pc(regs, (unsigned long)func->new_func);
297unlock:
298 rcu_read_unlock();
299}
300
301static void klp_disable_func(struct klp_func *func)
302{
303 struct klp_ops *ops;
304
305 if (WARN_ON(func->state != KLP_ENABLED))
306 return;
307 if (WARN_ON(!func->old_addr))
308 return;
309
310 ops = klp_find_ops(func->old_addr);
311 if (WARN_ON(!ops))
312 return;
313
314 if (list_is_singular(&ops->func_stack)) {
315 WARN_ON(unregister_ftrace_function(&ops->fops));
316 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
317
318 list_del_rcu(&func->stack_node);
319 list_del(&ops->node);
320 kfree(ops);
321 } else {
322 list_del_rcu(&func->stack_node);
323 }
324
325 func->state = KLP_DISABLED;
326}
327
328static int klp_enable_func(struct klp_func *func)
329{
330 struct klp_ops *ops;
331 int ret;
332
333 if (WARN_ON(!func->old_addr))
334 return -EINVAL;
335
336 if (WARN_ON(func->state != KLP_DISABLED))
337 return -EINVAL;
338
339 ops = klp_find_ops(func->old_addr);
340 if (!ops) {
341 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
342 if (!ops)
343 return -ENOMEM;
344
345 ops->fops.func = klp_ftrace_handler;
346 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
347 FTRACE_OPS_FL_DYNAMIC |
348 FTRACE_OPS_FL_IPMODIFY;
349
350 list_add(&ops->node, &klp_ops);
351
352 INIT_LIST_HEAD(&ops->func_stack);
353 list_add_rcu(&func->stack_node, &ops->func_stack);
354
355 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
356 if (ret) {
357 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
358 func->old_name, ret);
359 goto err;
360 }
361
362 ret = register_ftrace_function(&ops->fops);
363 if (ret) {
364 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
365 func->old_name, ret);
366 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
367 goto err;
368 }
369
370
371 } else {
372 list_add_rcu(&func->stack_node, &ops->func_stack);
373 }
374
375 func->state = KLP_ENABLED;
376
377 return 0;
378
379err:
380 list_del_rcu(&func->stack_node);
381 list_del(&ops->node);
382 kfree(ops);
383 return ret;
384}
385
386static void klp_disable_object(struct klp_object *obj)
387{
388 struct klp_func *func;
389
390 klp_for_each_func(obj, func)
391 if (func->state == KLP_ENABLED)
392 klp_disable_func(func);
393
394 obj->state = KLP_DISABLED;
395}
396
397static int klp_enable_object(struct klp_object *obj)
398{
399 struct klp_func *func;
400 int ret;
401
402 if (WARN_ON(obj->state != KLP_DISABLED))
403 return -EINVAL;
404
405 if (WARN_ON(!klp_is_object_loaded(obj)))
406 return -EINVAL;
407
408 klp_for_each_func(obj, func) {
409 ret = klp_enable_func(func);
410 if (ret) {
411 klp_disable_object(obj);
412 return ret;
413 }
414 }
415 obj->state = KLP_ENABLED;
416
417 return 0;
418}
419
420static int __klp_disable_patch(struct klp_patch *patch)
421{
422 struct klp_object *obj;
423
424 /* enforce stacking: only the last enabled patch can be disabled */
425 if (!list_is_last(&patch->list, &klp_patches) &&
426 list_next_entry(patch, list)->state == KLP_ENABLED)
427 return -EBUSY;
428
429 pr_notice("disabling patch '%s'\n", patch->mod->name);
430
431 klp_for_each_object(patch, obj) {
432 if (obj->state == KLP_ENABLED)
433 klp_disable_object(obj);
434 }
435
436 patch->state = KLP_DISABLED;
437
438 return 0;
439}
440
441/**
442 * klp_disable_patch() - disables a registered patch
443 * @patch: The registered, enabled patch to be disabled
444 *
445 * Unregisters the patched functions from ftrace.
446 *
447 * Return: 0 on success, otherwise error
448 */
449int klp_disable_patch(struct klp_patch *patch)
450{
451 int ret;
452
453 mutex_lock(&klp_mutex);
454
455 if (!klp_is_patch_registered(patch)) {
456 ret = -EINVAL;
457 goto err;
458 }
459
460 if (patch->state == KLP_DISABLED) {
461 ret = -EINVAL;
462 goto err;
463 }
464
465 ret = __klp_disable_patch(patch);
466
467err:
468 mutex_unlock(&klp_mutex);
469 return ret;
470}
471EXPORT_SYMBOL_GPL(klp_disable_patch);
472
473static int __klp_enable_patch(struct klp_patch *patch)
474{
475 struct klp_object *obj;
476 int ret;
477
478 if (WARN_ON(patch->state != KLP_DISABLED))
479 return -EINVAL;
480
481 /* enforce stacking: only the first disabled patch can be enabled */
482 if (patch->list.prev != &klp_patches &&
483 list_prev_entry(patch, list)->state == KLP_DISABLED)
484 return -EBUSY;
485
486 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
487 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
488
489 pr_notice("enabling patch '%s'\n", patch->mod->name);
490
491 klp_for_each_object(patch, obj) {
492 if (!klp_is_object_loaded(obj))
493 continue;
494
495 ret = klp_enable_object(obj);
496 if (ret)
497 goto unregister;
498 }
499
500 patch->state = KLP_ENABLED;
501
502 return 0;
503
504unregister:
505 WARN_ON(__klp_disable_patch(patch));
506 return ret;
507}
508
509/**
510 * klp_enable_patch() - enables a registered patch
511 * @patch: The registered, disabled patch to be enabled
512 *
513 * Performs the needed symbol lookups and code relocations,
514 * then registers the patched functions with ftrace.
515 *
516 * Return: 0 on success, otherwise error
517 */
518int klp_enable_patch(struct klp_patch *patch)
519{
520 int ret;
521
522 mutex_lock(&klp_mutex);
523
524 if (!klp_is_patch_registered(patch)) {
525 ret = -EINVAL;
526 goto err;
527 }
528
529 ret = __klp_enable_patch(patch);
530
531err:
532 mutex_unlock(&klp_mutex);
533 return ret;
534}
535EXPORT_SYMBOL_GPL(klp_enable_patch);
536
537/*
538 * Sysfs Interface
539 *
540 * /sys/kernel/livepatch
541 * /sys/kernel/livepatch/<patch>
542 * /sys/kernel/livepatch/<patch>/enabled
543 * /sys/kernel/livepatch/<patch>/<object>
544 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
545 */
546
547static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
548 const char *buf, size_t count)
549{
550 struct klp_patch *patch;
551 int ret;
552 unsigned long val;
553
554 ret = kstrtoul(buf, 10, &val);
555 if (ret)
556 return -EINVAL;
557
558 if (val != KLP_DISABLED && val != KLP_ENABLED)
559 return -EINVAL;
560
561 patch = container_of(kobj, struct klp_patch, kobj);
562
563 mutex_lock(&klp_mutex);
564
565 if (val == patch->state) {
566 /* already in requested state */
567 ret = -EINVAL;
568 goto err;
569 }
570
571 if (val == KLP_ENABLED) {
572 ret = __klp_enable_patch(patch);
573 if (ret)
574 goto err;
575 } else {
576 ret = __klp_disable_patch(patch);
577 if (ret)
578 goto err;
579 }
580
581 mutex_unlock(&klp_mutex);
582
583 return count;
584
585err:
586 mutex_unlock(&klp_mutex);
587 return ret;
588}
589
590static ssize_t enabled_show(struct kobject *kobj,
591 struct kobj_attribute *attr, char *buf)
592{
593 struct klp_patch *patch;
594
595 patch = container_of(kobj, struct klp_patch, kobj);
596 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
597}
598
599static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
600static struct attribute *klp_patch_attrs[] = {
601 &enabled_kobj_attr.attr,
602 NULL
603};
604
605static void klp_kobj_release_patch(struct kobject *kobj)
606{
607 /*
608 * Once we have a consistency model we'll need to module_put() the
609 * patch module here. See klp_register_patch() for more details.
610 */
611}
612
613static struct kobj_type klp_ktype_patch = {
614 .release = klp_kobj_release_patch,
615 .sysfs_ops = &kobj_sysfs_ops,
616 .default_attrs = klp_patch_attrs,
617};
618
619static void klp_kobj_release_object(struct kobject *kobj)
620{
621}
622
623static struct kobj_type klp_ktype_object = {
624 .release = klp_kobj_release_object,
625 .sysfs_ops = &kobj_sysfs_ops,
626};
627
628static void klp_kobj_release_func(struct kobject *kobj)
629{
630}
631
632static struct kobj_type klp_ktype_func = {
633 .release = klp_kobj_release_func,
634 .sysfs_ops = &kobj_sysfs_ops,
635};
636
637/*
638 * Free all functions' kobjects in the array up to some limit. When limit is
639 * NULL, all kobjects are freed.
640 */
641static void klp_free_funcs_limited(struct klp_object *obj,
642 struct klp_func *limit)
643{
644 struct klp_func *func;
645
646 for (func = obj->funcs; func->old_name && func != limit; func++)
647 kobject_put(&func->kobj);
648}
649
650/* Clean up when a patched object is unloaded */
651static void klp_free_object_loaded(struct klp_object *obj)
652{
653 struct klp_func *func;
654
655 obj->mod = NULL;
656
657 klp_for_each_func(obj, func)
658 func->old_addr = 0;
659}
660
661/*
662 * Free all objects' kobjects in the array up to some limit. When limit is
663 * NULL, all kobjects are freed.
664 */
665static void klp_free_objects_limited(struct klp_patch *patch,
666 struct klp_object *limit)
667{
668 struct klp_object *obj;
669
670 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
671 klp_free_funcs_limited(obj, NULL);
672 kobject_put(&obj->kobj);
673 }
674}
675
676static void klp_free_patch(struct klp_patch *patch)
677{
678 klp_free_objects_limited(patch, NULL);
679 if (!list_empty(&patch->list))
680 list_del(&patch->list);
681 kobject_put(&patch->kobj);
682}
683
684static int klp_init_func(struct klp_object *obj, struct klp_func *func)
685{
686 INIT_LIST_HEAD(&func->stack_node);
687 func->state = KLP_DISABLED;
688
689 /* The format for the sysfs directory is <function,sympos> where sympos
690 * is the nth occurrence of this symbol in kallsyms for the patched
691 * object. If the user selects 0 for old_sympos, then 1 will be used
692 * since a unique symbol will be the first occurrence.
693 */
694 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
695 &obj->kobj, "%s,%lu", func->old_name,
696 func->old_sympos ? func->old_sympos : 1);
697}
698
699/* parts of the initialization that is done only when the object is loaded */
700static int klp_init_object_loaded(struct klp_patch *patch,
701 struct klp_object *obj)
702{
703 struct klp_func *func;
704 int ret;
705
706 if (obj->relocs) {
707 ret = klp_write_object_relocations(patch->mod, obj);
708 if (ret)
709 return ret;
710 }
711
712 klp_for_each_func(obj, func) {
713 ret = klp_find_object_symbol(obj->name, func->old_name,
714 func->old_sympos,
715 &func->old_addr);
716 if (ret)
717 return ret;
718 }
719
720 return 0;
721}
722
723static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
724{
725 struct klp_func *func;
726 int ret;
727 const char *name;
728
729 if (!obj->funcs)
730 return -EINVAL;
731
732 obj->state = KLP_DISABLED;
733 obj->mod = NULL;
734
735 klp_find_object_module(obj);
736
737 name = klp_is_module(obj) ? obj->name : "vmlinux";
738 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
739 &patch->kobj, "%s", name);
740 if (ret)
741 return ret;
742
743 klp_for_each_func(obj, func) {
744 ret = klp_init_func(obj, func);
745 if (ret)
746 goto free;
747 }
748
749 if (klp_is_object_loaded(obj)) {
750 ret = klp_init_object_loaded(patch, obj);
751 if (ret)
752 goto free;
753 }
754
755 return 0;
756
757free:
758 klp_free_funcs_limited(obj, func);
759 kobject_put(&obj->kobj);
760 return ret;
761}
762
763static int klp_init_patch(struct klp_patch *patch)
764{
765 struct klp_object *obj;
766 int ret;
767
768 if (!patch->objs)
769 return -EINVAL;
770
771 mutex_lock(&klp_mutex);
772
773 patch->state = KLP_DISABLED;
774
775 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
776 klp_root_kobj, "%s", patch->mod->name);
777 if (ret)
778 goto unlock;
779
780 klp_for_each_object(patch, obj) {
781 ret = klp_init_object(patch, obj);
782 if (ret)
783 goto free;
784 }
785
786 list_add_tail(&patch->list, &klp_patches);
787
788 mutex_unlock(&klp_mutex);
789
790 return 0;
791
792free:
793 klp_free_objects_limited(patch, obj);
794 kobject_put(&patch->kobj);
795unlock:
796 mutex_unlock(&klp_mutex);
797 return ret;
798}
799
800/**
801 * klp_unregister_patch() - unregisters a patch
802 * @patch: Disabled patch to be unregistered
803 *
804 * Frees the data structures and removes the sysfs interface.
805 *
806 * Return: 0 on success, otherwise error
807 */
808int klp_unregister_patch(struct klp_patch *patch)
809{
810 int ret = 0;
811
812 mutex_lock(&klp_mutex);
813
814 if (!klp_is_patch_registered(patch)) {
815 ret = -EINVAL;
816 goto out;
817 }
818
819 if (patch->state == KLP_ENABLED) {
820 ret = -EBUSY;
821 goto out;
822 }
823
824 klp_free_patch(patch);
825
826out:
827 mutex_unlock(&klp_mutex);
828 return ret;
829}
830EXPORT_SYMBOL_GPL(klp_unregister_patch);
831
832/**
833 * klp_register_patch() - registers a patch
834 * @patch: Patch to be registered
835 *
836 * Initializes the data structure associated with the patch and
837 * creates the sysfs interface.
838 *
839 * Return: 0 on success, otherwise error
840 */
841int klp_register_patch(struct klp_patch *patch)
842{
843 int ret;
844
845 if (!klp_initialized())
846 return -ENODEV;
847
848 if (!patch || !patch->mod)
849 return -EINVAL;
850
851 /*
852 * A reference is taken on the patch module to prevent it from being
853 * unloaded. Right now, we don't allow patch modules to unload since
854 * there is currently no method to determine if a thread is still
855 * running in the patched code contained in the patch module once
856 * the ftrace registration is successful.
857 */
858 if (!try_module_get(patch->mod))
859 return -ENODEV;
860
861 ret = klp_init_patch(patch);
862 if (ret)
863 module_put(patch->mod);
864
865 return ret;
866}
867EXPORT_SYMBOL_GPL(klp_register_patch);
868
869int klp_module_coming(struct module *mod)
870{
871 int ret;
872 struct klp_patch *patch;
873 struct klp_object *obj;
874
875 if (WARN_ON(mod->state != MODULE_STATE_COMING))
876 return -EINVAL;
877
878 mutex_lock(&klp_mutex);
879 /*
880 * Each module has to know that klp_module_coming()
881 * has been called. We never know what module will
882 * get patched by a new patch.
883 */
884 mod->klp_alive = true;
885
886 list_for_each_entry(patch, &klp_patches, list) {
887 klp_for_each_object(patch, obj) {
888 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
889 continue;
890
891 obj->mod = mod;
892
893 ret = klp_init_object_loaded(patch, obj);
894 if (ret) {
895 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
896 patch->mod->name, obj->mod->name, ret);
897 goto err;
898 }
899
900 if (patch->state == KLP_DISABLED)
901 break;
902
903 pr_notice("applying patch '%s' to loading module '%s'\n",
904 patch->mod->name, obj->mod->name);
905
906 ret = klp_enable_object(obj);
907 if (ret) {
908 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
909 patch->mod->name, obj->mod->name, ret);
910 goto err;
911 }
912
913 break;
914 }
915 }
916
917 mutex_unlock(&klp_mutex);
918
919 return 0;
920
921err:
922 /*
923 * If a patch is unsuccessfully applied, return
924 * error to the module loader.
925 */
926 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
927 patch->mod->name, obj->mod->name, obj->mod->name);
928 mod->klp_alive = false;
929 klp_free_object_loaded(obj);
930 mutex_unlock(&klp_mutex);
931
932 return ret;
933}
934
935void klp_module_going(struct module *mod)
936{
937 struct klp_patch *patch;
938 struct klp_object *obj;
939
940 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
941 mod->state != MODULE_STATE_COMING))
942 return;
943
944 mutex_lock(&klp_mutex);
945 /*
946 * Each module has to know that klp_module_going()
947 * has been called. We never know what module will
948 * get patched by a new patch.
949 */
950 mod->klp_alive = false;
951
952 list_for_each_entry(patch, &klp_patches, list) {
953 klp_for_each_object(patch, obj) {
954 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
955 continue;
956
957 if (patch->state != KLP_DISABLED) {
958 pr_notice("reverting patch '%s' on unloading module '%s'\n",
959 patch->mod->name, obj->mod->name);
960 klp_disable_object(obj);
961 }
962
963 klp_free_object_loaded(obj);
964 break;
965 }
966 }
967
968 mutex_unlock(&klp_mutex);
969}
970
971static int __init klp_init(void)
972{
973 int ret;
974
975 ret = klp_check_compiler_support();
976 if (ret) {
977 pr_info("Your compiler is too old; turning off.\n");
978 return -EINVAL;
979 }
980
981 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
982 if (!klp_root_kobj)
983 return -ENOMEM;
984
985 return 0;
986}
987
988module_init(klp_init);