Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * core.c - Kernel Live Patching Core
  3 *
  4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  5 * Copyright (C) 2014 SUSE
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; either version 2
 10 * of the License, or (at your option) any later version.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
 19 */
 20
 21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 22
 23#include <linux/module.h>
 24#include <linux/kernel.h>
 25#include <linux/mutex.h>
 26#include <linux/slab.h>
 27#include <linux/ftrace.h>
 28#include <linux/list.h>
 29#include <linux/kallsyms.h>
 30#include <linux/livepatch.h>
 
 
 
 
 31#include <asm/cacheflush.h>
 
 
 
 
 32
 33/**
 34 * struct klp_ops - structure for tracking registered ftrace ops structs
 
 
 35 *
 36 * A single ftrace_ops is shared between all enabled replacement functions
 37 * (klp_func structs) which have the same old_addr.  This allows the switch
 38 * between function versions to happen instantaneously by updating the klp_ops
 39 * struct's func_stack list.  The winner is the klp_func at the top of the
 40 * func_stack (front of the list).
 41 *
 42 * @node:	node for the global klp_ops list
 43 * @func_stack:	list head for the stack of klp_func's (active func is on top)
 44 * @fops:	registered ftrace ops struct
 45 */
 46struct klp_ops {
 47	struct list_head node;
 48	struct list_head func_stack;
 49	struct ftrace_ops fops;
 50};
 51
 52/*
 53 * The klp_mutex protects the global lists and state transitions of any
 54 * structure reachable from them.  References to any structure must be obtained
 55 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
 56 * ensure it gets consistent data).
 57 */
 58static DEFINE_MUTEX(klp_mutex);
 59
 60static LIST_HEAD(klp_patches);
 61static LIST_HEAD(klp_ops);
 62
 63static struct kobject *klp_root_kobj;
 64
 65static struct klp_ops *klp_find_ops(unsigned long old_addr)
 66{
 67	struct klp_ops *ops;
 68	struct klp_func *func;
 69
 70	list_for_each_entry(ops, &klp_ops, node) {
 71		func = list_first_entry(&ops->func_stack, struct klp_func,
 72					stack_node);
 73		if (func->old_addr == old_addr)
 74			return ops;
 75	}
 76
 77	return NULL;
 78}
 79
 80static bool klp_is_module(struct klp_object *obj)
 81{
 82	return obj->name;
 83}
 84
 85static bool klp_is_object_loaded(struct klp_object *obj)
 86{
 87	return !obj->name || obj->mod;
 88}
 89
 90/* sets obj->mod if object is not vmlinux and module is found */
 91static void klp_find_object_module(struct klp_object *obj)
 92{
 93	struct module *mod;
 94
 95	if (!klp_is_module(obj))
 96		return;
 97
 98	mutex_lock(&module_mutex);
 99	/*
100	 * We do not want to block removal of patched modules and therefore
101	 * we do not take a reference here. The patches are removed by
102	 * klp_module_going() instead.
103	 */
104	mod = find_module(obj->name);
105	/*
106	 * Do not mess work of klp_module_coming() and klp_module_going().
107	 * Note that the patch might still be needed before klp_module_going()
108	 * is called. Module functions can be called even in the GOING state
109	 * until mod->exit() finishes. This is especially important for
110	 * patches that modify semantic of the functions.
111	 */
112	if (mod && mod->klp_alive)
113		obj->mod = mod;
114
115	mutex_unlock(&module_mutex);
116}
117
118/* klp_mutex must be held by caller */
119static bool klp_is_patch_registered(struct klp_patch *patch)
120{
121	struct klp_patch *mypatch;
 
122
123	list_for_each_entry(mypatch, &klp_patches, list)
124		if (mypatch == patch)
125			return true;
 
 
 
 
 
 
 
 
126
127	return false;
128}
129
130static bool klp_initialized(void)
 
131{
132	return !!klp_root_kobj;
 
 
 
 
 
 
 
 
 
 
 
 
 
133}
134
135struct klp_find_arg {
136	const char *objname;
137	const char *name;
138	unsigned long addr;
139	unsigned long count;
140	unsigned long pos;
141};
142
143static int klp_find_callback(void *data, const char *name,
144			     struct module *mod, unsigned long addr)
145{
146	struct klp_find_arg *args = data;
147
148	if ((mod && !args->objname) || (!mod && args->objname))
149		return 0;
150
151	if (strcmp(args->name, name))
152		return 0;
153
154	if (args->objname && strcmp(args->objname, mod->name))
155		return 0;
156
157	args->addr = addr;
158	args->count++;
159
160	/*
161	 * Finish the search when the symbol is found for the desired position
162	 * or the position is not defined for a non-unique symbol.
163	 */
164	if ((args->pos && (args->count == args->pos)) ||
165	    (!args->pos && (args->count > 1)))
166		return 1;
167
168	return 0;
169}
170
171static int klp_find_object_symbol(const char *objname, const char *name,
172				  unsigned long sympos, unsigned long *addr)
173{
174	struct klp_find_arg args = {
175		.objname = objname,
176		.name = name,
177		.addr = 0,
178		.count = 0,
179		.pos = sympos,
180	};
181
182	mutex_lock(&module_mutex);
183	kallsyms_on_each_symbol(klp_find_callback, &args);
 
 
 
184	mutex_unlock(&module_mutex);
185
186	/*
187	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
188	 * otherwise ensure the symbol position count matches sympos.
189	 */
190	if (args.addr == 0)
191		pr_err("symbol '%s' not found in symbol table\n", name);
192	else if (args.count > 1 && sympos == 0) {
193		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
194		       name, objname);
195	} else if (sympos != args.count && sympos > 0) {
196		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
197		       sympos, name, objname ? objname : "vmlinux");
198	} else {
199		*addr = args.addr;
200		return 0;
201	}
202
203	*addr = 0;
204	return -EINVAL;
205}
206
207/*
208 * external symbols are located outside the parent object (where the parent
209 * object is either vmlinux or the kmod being patched).
210 */
211static int klp_find_external_symbol(struct module *pmod, const char *name,
212				    unsigned long *addr)
213{
214	const struct kernel_symbol *sym;
215
216	/* first, check if it's an exported symbol */
217	preempt_disable();
218	sym = find_symbol(name, NULL, NULL, true, true);
219	if (sym) {
220		*addr = sym->value;
221		preempt_enable();
222		return 0;
223	}
224	preempt_enable();
225
226	/*
227	 * Check if it's in another .o within the patch module. This also
228	 * checks that the external symbol is unique.
 
 
 
 
 
 
229	 */
230	return klp_find_object_symbol(pmod->name, name, 0, addr);
231}
232
233static int klp_write_object_relocations(struct module *pmod,
234					struct klp_object *obj)
235{
236	int ret = 0;
237	unsigned long val;
238	struct klp_reloc *reloc;
 
 
 
239
240	if (WARN_ON(!klp_is_object_loaded(obj)))
241		return -EINVAL;
 
 
 
 
 
 
 
242
243	if (WARN_ON(!obj->relocs))
244		return -EINVAL;
245
246	module_disable_ro(pmod);
 
 
 
 
 
 
 
 
 
 
247
248	for (reloc = obj->relocs; reloc->name; reloc++) {
249		/* discover the address of the referenced symbol */
250		if (reloc->external) {
251			if (reloc->sympos > 0) {
252				pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n",
253				       reloc->name);
254				ret = -EINVAL;
255				goto out;
256			}
257			ret = klp_find_external_symbol(pmod, reloc->name, &val);
258		} else
259			ret = klp_find_object_symbol(obj->name,
260						     reloc->name,
261						     reloc->sympos,
262						     &val);
263		if (ret)
264			goto out;
265
266		ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
267					     val + reloc->addend);
268		if (ret) {
269			pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
270			       reloc->name, val, ret);
271			goto out;
272		}
273	}
274
275out:
276	module_enable_ro(pmod);
277	return ret;
278}
279
280static void notrace klp_ftrace_handler(unsigned long ip,
281				       unsigned long parent_ip,
282				       struct ftrace_ops *fops,
283				       struct pt_regs *regs)
284{
285	struct klp_ops *ops;
286	struct klp_func *func;
287
288	ops = container_of(fops, struct klp_ops, fops);
289
290	rcu_read_lock();
291	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
292				      stack_node);
293	if (WARN_ON_ONCE(!func))
294		goto unlock;
295
296	klp_arch_set_pc(regs, (unsigned long)func->new_func);
297unlock:
298	rcu_read_unlock();
299}
300
301static void klp_disable_func(struct klp_func *func)
302{
303	struct klp_ops *ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
305	if (WARN_ON(func->state != KLP_ENABLED))
306		return;
307	if (WARN_ON(!func->old_addr))
308		return;
 
 
 
 
 
 
 
 
309
310	ops = klp_find_ops(func->old_addr);
311	if (WARN_ON(!ops))
312		return;
313
314	if (list_is_singular(&ops->func_stack)) {
315		WARN_ON(unregister_ftrace_function(&ops->fops));
316		WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
317
318		list_del_rcu(&func->stack_node);
319		list_del(&ops->node);
320		kfree(ops);
321	} else {
322		list_del_rcu(&func->stack_node);
323	}
324
325	func->state = KLP_DISABLED;
326}
327
328static int klp_enable_func(struct klp_func *func)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329{
330	struct klp_ops *ops;
331	int ret;
 
332
333	if (WARN_ON(!func->old_addr))
334		return -EINVAL;
335
336	if (WARN_ON(func->state != KLP_DISABLED))
337		return -EINVAL;
338
339	ops = klp_find_ops(func->old_addr);
340	if (!ops) {
341		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
342		if (!ops)
343			return -ENOMEM;
344
345		ops->fops.func = klp_ftrace_handler;
346		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
347				  FTRACE_OPS_FL_DYNAMIC |
348				  FTRACE_OPS_FL_IPMODIFY;
349
350		list_add(&ops->node, &klp_ops);
351
352		INIT_LIST_HEAD(&ops->func_stack);
353		list_add_rcu(&func->stack_node, &ops->func_stack);
354
355		ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
356		if (ret) {
357			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
358			       func->old_name, ret);
359			goto err;
360		}
361
362		ret = register_ftrace_function(&ops->fops);
363		if (ret) {
364			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
365			       func->old_name, ret);
366			ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
367			goto err;
368		}
369
 
370
371	} else {
372		list_add_rcu(&func->stack_node, &ops->func_stack);
 
 
373	}
374
375	func->state = KLP_ENABLED;
 
 
 
 
 
 
 
 
 
 
 
 
376
377	return 0;
 
378
379err:
380	list_del_rcu(&func->stack_node);
381	list_del(&ops->node);
382	kfree(ops);
383	return ret;
384}
385
386static void klp_disable_object(struct klp_object *obj)
 
387{
388	struct klp_func *func;
389
390	klp_for_each_func(obj, func)
391		if (func->state == KLP_ENABLED)
392			klp_disable_func(func);
 
 
 
 
 
393
394	obj->state = KLP_DISABLED;
 
 
395}
396
397static int klp_enable_object(struct klp_object *obj)
 
398{
399	struct klp_func *func;
400	int ret;
 
401
402	if (WARN_ON(obj->state != KLP_DISABLED))
403		return -EINVAL;
 
404
405	if (WARN_ON(!klp_is_object_loaded(obj)))
406		return -EINVAL;
407
408	klp_for_each_func(obj, func) {
409		ret = klp_enable_func(func);
410		if (ret) {
411			klp_disable_object(obj);
412			return ret;
413		}
414	}
415	obj->state = KLP_ENABLED;
416
417	return 0;
 
 
 
 
418}
419
420static int __klp_disable_patch(struct klp_patch *patch)
 
 
 
 
 
 
 
 
 
 
 
421{
422	struct klp_object *obj;
 
 
423
424	/* enforce stacking: only the last enabled patch can be disabled */
425	if (!list_is_last(&patch->list, &klp_patches) &&
426	    list_next_entry(patch, list)->state == KLP_ENABLED)
427		return -EBUSY;
428
429	pr_notice("disabling patch '%s'\n", patch->mod->name);
 
 
 
430
431	klp_for_each_object(patch, obj) {
432		if (obj->state == KLP_ENABLED)
433			klp_disable_object(obj);
 
 
 
 
 
 
 
434	}
435
436	patch->state = KLP_DISABLED;
 
437
438	return 0;
439}
440
441/**
442 * klp_disable_patch() - disables a registered patch
443 * @patch:	The registered, enabled patch to be disabled
444 *
445 * Unregisters the patched functions from ftrace.
446 *
447 * Return: 0 on success, otherwise error
448 */
449int klp_disable_patch(struct klp_patch *patch)
450{
451	int ret;
452
453	mutex_lock(&klp_mutex);
454
455	if (!klp_is_patch_registered(patch)) {
456		ret = -EINVAL;
457		goto err;
458	}
459
460	if (patch->state == KLP_DISABLED) {
461		ret = -EINVAL;
462		goto err;
 
 
 
 
 
 
 
463	}
464
465	ret = __klp_disable_patch(patch);
 
 
 
 
 
 
466
467err:
468	mutex_unlock(&klp_mutex);
469	return ret;
470}
471EXPORT_SYMBOL_GPL(klp_disable_patch);
472
473static int __klp_enable_patch(struct klp_patch *patch)
 
474{
475	struct klp_object *obj;
476	int ret;
477
478	if (WARN_ON(patch->state != KLP_DISABLED))
479		return -EINVAL;
480
481	/* enforce stacking: only the first disabled patch can be enabled */
482	if (patch->list.prev != &klp_patches &&
483	    list_prev_entry(patch, list)->state == KLP_DISABLED)
484		return -EBUSY;
485
486	pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
487	add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
488
489	pr_notice("enabling patch '%s'\n", patch->mod->name);
 
 
 
 
490
491	klp_for_each_object(patch, obj) {
492		if (!klp_is_object_loaded(obj))
 
493			continue;
494
495		ret = klp_enable_object(obj);
496		if (ret)
497			goto unregister;
498	}
499
500	patch->state = KLP_ENABLED;
501
502	return 0;
503
504unregister:
505	WARN_ON(__klp_disable_patch(patch));
506	return ret;
507}
508
509/**
510 * klp_enable_patch() - enables a registered patch
511 * @patch:	The registered, disabled patch to be enabled
512 *
513 * Performs the needed symbol lookups and code relocations,
514 * then registers the patched functions with ftrace.
515 *
516 * Return: 0 on success, otherwise error
517 */
518int klp_enable_patch(struct klp_patch *patch)
519{
520	int ret;
521
522	mutex_lock(&klp_mutex);
523
524	if (!klp_is_patch_registered(patch)) {
525		ret = -EINVAL;
526		goto err;
527	}
528
529	ret = __klp_enable_patch(patch);
530
531err:
532	mutex_unlock(&klp_mutex);
533	return ret;
534}
535EXPORT_SYMBOL_GPL(klp_enable_patch);
536
537/*
538 * Sysfs Interface
539 *
540 * /sys/kernel/livepatch
541 * /sys/kernel/livepatch/<patch>
542 * /sys/kernel/livepatch/<patch>/enabled
543 * /sys/kernel/livepatch/<patch>/<object>
544 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
545 */
546
547static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
548			     const char *buf, size_t count)
549{
550	struct klp_patch *patch;
551	int ret;
552	unsigned long val;
553
554	ret = kstrtoul(buf, 10, &val);
555	if (ret)
556		return -EINVAL;
557
558	if (val != KLP_DISABLED && val != KLP_ENABLED)
559		return -EINVAL;
560
561	patch = container_of(kobj, struct klp_patch, kobj);
562
563	mutex_lock(&klp_mutex);
564
565	if (val == patch->state) {
566		/* already in requested state */
567		ret = -EINVAL;
568		goto err;
569	}
570
571	if (val == KLP_ENABLED) {
572		ret = __klp_enable_patch(patch);
573		if (ret)
574			goto err;
575	} else {
576		ret = __klp_disable_patch(patch);
577		if (ret)
578			goto err;
579	}
580
581	mutex_unlock(&klp_mutex);
582
583	return count;
584
585err:
586	mutex_unlock(&klp_mutex);
587	return ret;
588}
589
590static ssize_t enabled_show(struct kobject *kobj,
591			    struct kobj_attribute *attr, char *buf)
592{
593	struct klp_patch *patch;
594
595	patch = container_of(kobj, struct klp_patch, kobj);
596	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
597}
598
599static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
600static struct attribute *klp_patch_attrs[] = {
601	&enabled_kobj_attr.attr,
602	NULL
603};
604
605static void klp_kobj_release_patch(struct kobject *kobj)
606{
607	/*
608	 * Once we have a consistency model we'll need to module_put() the
609	 * patch module here.  See klp_register_patch() for more details.
610	 */
611}
612
613static struct kobj_type klp_ktype_patch = {
614	.release = klp_kobj_release_patch,
615	.sysfs_ops = &kobj_sysfs_ops,
616	.default_attrs = klp_patch_attrs,
617};
618
619static void klp_kobj_release_object(struct kobject *kobj)
620{
 
 
 
 
 
 
621}
622
623static struct kobj_type klp_ktype_object = {
624	.release = klp_kobj_release_object,
625	.sysfs_ops = &kobj_sysfs_ops,
626};
627
628static void klp_kobj_release_func(struct kobject *kobj)
629{
 
 
 
 
 
 
630}
631
632static struct kobj_type klp_ktype_func = {
633	.release = klp_kobj_release_func,
634	.sysfs_ops = &kobj_sysfs_ops,
635};
636
637/*
638 * Free all functions' kobjects in the array up to some limit. When limit is
639 * NULL, all kobjects are freed.
640 */
641static void klp_free_funcs_limited(struct klp_object *obj,
642				   struct klp_func *limit)
643{
644	struct klp_func *func;
645
646	for (func = obj->funcs; func->old_name && func != limit; func++)
 
 
 
 
647		kobject_put(&func->kobj);
 
648}
649
650/* Clean up when a patched object is unloaded */
651static void klp_free_object_loaded(struct klp_object *obj)
652{
653	struct klp_func *func;
654
655	obj->mod = NULL;
656
657	klp_for_each_func(obj, func)
658		func->old_addr = 0;
 
 
 
 
659}
660
661/*
662 * Free all objects' kobjects in the array up to some limit. When limit is
663 * NULL, all kobjects are freed.
664 */
665static void klp_free_objects_limited(struct klp_patch *patch,
666				     struct klp_object *limit)
667{
668	struct klp_object *obj;
669
670	for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
671		klp_free_funcs_limited(obj, NULL);
 
 
 
 
 
672		kobject_put(&obj->kobj);
673	}
674}
675
676static void klp_free_patch(struct klp_patch *patch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677{
678	klp_free_objects_limited(patch, NULL);
679	if (!list_empty(&patch->list))
680		list_del(&patch->list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
681	kobject_put(&patch->kobj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
682}
683
684static int klp_init_func(struct klp_object *obj, struct klp_func *func)
685{
 
 
 
 
 
 
 
 
 
 
 
 
 
686	INIT_LIST_HEAD(&func->stack_node);
687	func->state = KLP_DISABLED;
 
688
689	/* The format for the sysfs directory is <function,sympos> where sympos
690	 * is the nth occurrence of this symbol in kallsyms for the patched
691	 * object. If the user selects 0 for old_sympos, then 1 will be used
692	 * since a unique symbol will be the first occurrence.
693	 */
694	return kobject_init_and_add(&func->kobj, &klp_ktype_func,
695				    &obj->kobj, "%s,%lu", func->old_name,
696				    func->old_sympos ? func->old_sympos : 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
697}
698
699/* parts of the initialization that is done only when the object is loaded */
700static int klp_init_object_loaded(struct klp_patch *patch,
701				  struct klp_object *obj)
702{
703	struct klp_func *func;
704	int ret;
705
706	if (obj->relocs) {
707		ret = klp_write_object_relocations(patch->mod, obj);
 
 
 
 
 
 
708		if (ret)
709			return ret;
710	}
711
712	klp_for_each_func(obj, func) {
713		ret = klp_find_object_symbol(obj->name, func->old_name,
714					     func->old_sympos,
715					     &func->old_addr);
716		if (ret)
717			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
718	}
719
720	return 0;
721}
722
723static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
724{
725	struct klp_func *func;
726	int ret;
727	const char *name;
728
729	if (!obj->funcs)
730		return -EINVAL;
731
732	obj->state = KLP_DISABLED;
733	obj->mod = NULL;
734
735	klp_find_object_module(obj);
736
737	name = klp_is_module(obj) ? obj->name : "vmlinux";
738	ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
739				   &patch->kobj, "%s", name);
740	if (ret)
741		return ret;
742
743	klp_for_each_func(obj, func) {
744		ret = klp_init_func(obj, func);
745		if (ret)
746			goto free;
747	}
748
749	if (klp_is_object_loaded(obj)) {
750		ret = klp_init_object_loaded(patch, obj);
751		if (ret)
752			goto free;
753	}
754
755	return 0;
756
757free:
758	klp_free_funcs_limited(obj, func);
759	kobject_put(&obj->kobj);
760	return ret;
761}
762
763static int klp_init_patch(struct klp_patch *patch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
764{
765	struct klp_object *obj;
766	int ret;
767
768	if (!patch->objs)
769		return -EINVAL;
770
771	mutex_lock(&klp_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
772
773	patch->state = KLP_DISABLED;
 
 
 
774
775	ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
776				   klp_root_kobj, "%s", patch->mod->name);
 
 
 
 
 
 
 
 
 
 
777	if (ret)
778		goto unlock;
 
 
 
 
 
 
779
780	klp_for_each_object(patch, obj) {
781		ret = klp_init_object(patch, obj);
782		if (ret)
783			goto free;
784	}
785
786	list_add_tail(&patch->list, &klp_patches);
787
788	mutex_unlock(&klp_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
789
790	return 0;
 
791
792free:
793	klp_free_objects_limited(patch, obj);
794	kobject_put(&patch->kobj);
795unlock:
796	mutex_unlock(&klp_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
797	return ret;
798}
799
800/**
801 * klp_unregister_patch() - unregisters a patch
802 * @patch:	Disabled patch to be unregistered
803 *
804 * Frees the data structures and removes the sysfs interface.
 
 
 
 
 
805 *
806 * Return: 0 on success, otherwise error
807 */
808int klp_unregister_patch(struct klp_patch *patch)
809{
810	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
811
812	mutex_lock(&klp_mutex);
813
814	if (!klp_is_patch_registered(patch)) {
815		ret = -EINVAL;
816		goto out;
 
 
817	}
818
819	if (patch->state == KLP_ENABLED) {
820		ret = -EBUSY;
821		goto out;
 
822	}
823
824	klp_free_patch(patch);
 
 
 
 
 
 
825
826out:
827	mutex_unlock(&klp_mutex);
 
 
 
 
 
 
 
 
 
 
828	return ret;
829}
830EXPORT_SYMBOL_GPL(klp_unregister_patch);
831
832/**
833 * klp_register_patch() - registers a patch
834 * @patch:	Patch to be registered
835 *
836 * Initializes the data structure associated with the patch and
837 * creates the sysfs interface.
 
 
 
838 *
839 * Return: 0 on success, otherwise error
 
 
 
 
840 */
841int klp_register_patch(struct klp_patch *patch)
842{
843	int ret;
844
845	if (!klp_initialized())
846		return -ENODEV;
 
847
848	if (!patch || !patch->mod)
849		return -EINVAL;
 
 
850
851	/*
852	 * A reference is taken on the patch module to prevent it from being
853	 * unloaded.  Right now, we don't allow patch modules to unload since
854	 * there is currently no method to determine if a thread is still
855	 * running in the patched code contained in the patch module once
856	 * the ftrace registration is successful.
857	 */
858	if (!try_module_get(patch->mod))
859		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
860
861	ret = klp_init_patch(patch);
862	if (ret)
863		module_put(patch->mod);
 
 
 
 
 
 
 
864
865	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
866}
867EXPORT_SYMBOL_GPL(klp_register_patch);
868
869int klp_module_coming(struct module *mod)
870{
871	int ret;
872	struct klp_patch *patch;
873	struct klp_object *obj;
874
875	if (WARN_ON(mod->state != MODULE_STATE_COMING))
876		return -EINVAL;
877
 
 
 
 
 
878	mutex_lock(&klp_mutex);
879	/*
880	 * Each module has to know that klp_module_coming()
881	 * has been called. We never know what module will
882	 * get patched by a new patch.
883	 */
884	mod->klp_alive = true;
885
886	list_for_each_entry(patch, &klp_patches, list) {
887		klp_for_each_object(patch, obj) {
888			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
889				continue;
890
891			obj->mod = mod;
892
893			ret = klp_init_object_loaded(patch, obj);
894			if (ret) {
895				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
896					patch->mod->name, obj->mod->name, ret);
897				goto err;
898			}
899
900			if (patch->state == KLP_DISABLED)
901				break;
902
903			pr_notice("applying patch '%s' to loading module '%s'\n",
904				  patch->mod->name, obj->mod->name);
905
906			ret = klp_enable_object(obj);
 
 
 
 
 
 
 
907			if (ret) {
908				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
909					patch->mod->name, obj->mod->name, ret);
 
 
910				goto err;
911			}
912
 
 
 
913			break;
914		}
915	}
916
917	mutex_unlock(&klp_mutex);
918
919	return 0;
920
921err:
922	/*
923	 * If a patch is unsuccessfully applied, return
924	 * error to the module loader.
925	 */
926	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
927		patch->mod->name, obj->mod->name, obj->mod->name);
928	mod->klp_alive = false;
929	klp_free_object_loaded(obj);
 
930	mutex_unlock(&klp_mutex);
931
932	return ret;
933}
934
935void klp_module_going(struct module *mod)
936{
937	struct klp_patch *patch;
938	struct klp_object *obj;
939
940	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
941		    mod->state != MODULE_STATE_COMING))
942		return;
943
944	mutex_lock(&klp_mutex);
945	/*
946	 * Each module has to know that klp_module_going()
947	 * has been called. We never know what module will
948	 * get patched by a new patch.
949	 */
950	mod->klp_alive = false;
951
952	list_for_each_entry(patch, &klp_patches, list) {
953		klp_for_each_object(patch, obj) {
954			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
955				continue;
956
957			if (patch->state != KLP_DISABLED) {
958				pr_notice("reverting patch '%s' on unloading module '%s'\n",
959					  patch->mod->name, obj->mod->name);
960				klp_disable_object(obj);
961			}
962
963			klp_free_object_loaded(obj);
964			break;
965		}
966	}
967
968	mutex_unlock(&klp_mutex);
969}
970
971static int __init klp_init(void)
972{
973	int ret;
974
975	ret = klp_check_compiler_support();
976	if (ret) {
977		pr_info("Your compiler is too old; turning off.\n");
978		return -EINVAL;
979	}
980
981	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
982	if (!klp_root_kobj)
983		return -ENOMEM;
984
985	return 0;
986}
987
988module_init(klp_init);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * core.c - Kernel Live Patching Core
   4 *
   5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   6 * Copyright (C) 2014 SUSE
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/mutex.h>
  14#include <linux/slab.h>
 
  15#include <linux/list.h>
  16#include <linux/kallsyms.h>
  17#include <linux/livepatch.h>
  18#include <linux/elf.h>
  19#include <linux/moduleloader.h>
  20#include <linux/completion.h>
  21#include <linux/memory.h>
  22#include <asm/cacheflush.h>
  23#include "core.h"
  24#include "patch.h"
  25#include "state.h"
  26#include "transition.h"
  27
  28/*
  29 * klp_mutex is a coarse lock which serializes access to klp data.  All
  30 * accesses to klp-related variables and structures must have mutex protection,
  31 * except within the following functions which carefully avoid the need for it:
  32 *
  33 * - klp_ftrace_handler()
  34 * - klp_update_patch_state()
  35 */
  36DEFINE_MUTEX(klp_mutex);
 
 
 
 
 
 
 
 
 
 
 
  37
  38/*
  39 * Actively used patches: enabled or in transition. Note that replaced
  40 * or disabled patches are not listed even though the related kernel
  41 * module still can be loaded.
 
  42 */
  43LIST_HEAD(klp_patches);
 
 
 
  44
  45static struct kobject *klp_root_kobj;
  46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  47static bool klp_is_module(struct klp_object *obj)
  48{
  49	return obj->name;
  50}
  51
 
 
 
 
 
  52/* sets obj->mod if object is not vmlinux and module is found */
  53static void klp_find_object_module(struct klp_object *obj)
  54{
  55	struct module *mod;
  56
  57	if (!klp_is_module(obj))
  58		return;
  59
  60	mutex_lock(&module_mutex);
  61	/*
  62	 * We do not want to block removal of patched modules and therefore
  63	 * we do not take a reference here. The patches are removed by
  64	 * klp_module_going() instead.
  65	 */
  66	mod = find_module(obj->name);
  67	/*
  68	 * Do not mess work of klp_module_coming() and klp_module_going().
  69	 * Note that the patch might still be needed before klp_module_going()
  70	 * is called. Module functions can be called even in the GOING state
  71	 * until mod->exit() finishes. This is especially important for
  72	 * patches that modify semantic of the functions.
  73	 */
  74	if (mod && mod->klp_alive)
  75		obj->mod = mod;
  76
  77	mutex_unlock(&module_mutex);
  78}
  79
  80static bool klp_initialized(void)
 
  81{
  82	return !!klp_root_kobj;
  83}
  84
  85static struct klp_func *klp_find_func(struct klp_object *obj,
  86				      struct klp_func *old_func)
  87{
  88	struct klp_func *func;
  89
  90	klp_for_each_func(obj, func) {
  91		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
  92		    (old_func->old_sympos == func->old_sympos)) {
  93			return func;
  94		}
  95	}
  96
  97	return NULL;
  98}
  99
 100static struct klp_object *klp_find_object(struct klp_patch *patch,
 101					  struct klp_object *old_obj)
 102{
 103	struct klp_object *obj;
 104
 105	klp_for_each_object(patch, obj) {
 106		if (klp_is_module(old_obj)) {
 107			if (klp_is_module(obj) &&
 108			    strcmp(old_obj->name, obj->name) == 0) {
 109				return obj;
 110			}
 111		} else if (!klp_is_module(obj)) {
 112			return obj;
 113		}
 114	}
 115
 116	return NULL;
 117}
 118
 119struct klp_find_arg {
 120	const char *objname;
 121	const char *name;
 122	unsigned long addr;
 123	unsigned long count;
 124	unsigned long pos;
 125};
 126
 127static int klp_find_callback(void *data, const char *name,
 128			     struct module *mod, unsigned long addr)
 129{
 130	struct klp_find_arg *args = data;
 131
 132	if ((mod && !args->objname) || (!mod && args->objname))
 133		return 0;
 134
 135	if (strcmp(args->name, name))
 136		return 0;
 137
 138	if (args->objname && strcmp(args->objname, mod->name))
 139		return 0;
 140
 141	args->addr = addr;
 142	args->count++;
 143
 144	/*
 145	 * Finish the search when the symbol is found for the desired position
 146	 * or the position is not defined for a non-unique symbol.
 147	 */
 148	if ((args->pos && (args->count == args->pos)) ||
 149	    (!args->pos && (args->count > 1)))
 150		return 1;
 151
 152	return 0;
 153}
 154
 155static int klp_find_object_symbol(const char *objname, const char *name,
 156				  unsigned long sympos, unsigned long *addr)
 157{
 158	struct klp_find_arg args = {
 159		.objname = objname,
 160		.name = name,
 161		.addr = 0,
 162		.count = 0,
 163		.pos = sympos,
 164	};
 165
 166	mutex_lock(&module_mutex);
 167	if (objname)
 168		module_kallsyms_on_each_symbol(klp_find_callback, &args);
 169	else
 170		kallsyms_on_each_symbol(klp_find_callback, &args);
 171	mutex_unlock(&module_mutex);
 172
 173	/*
 174	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
 175	 * otherwise ensure the symbol position count matches sympos.
 176	 */
 177	if (args.addr == 0)
 178		pr_err("symbol '%s' not found in symbol table\n", name);
 179	else if (args.count > 1 && sympos == 0) {
 180		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
 181		       name, objname);
 182	} else if (sympos != args.count && sympos > 0) {
 183		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
 184		       sympos, name, objname ? objname : "vmlinux");
 185	} else {
 186		*addr = args.addr;
 187		return 0;
 188	}
 189
 190	*addr = 0;
 191	return -EINVAL;
 192}
 193
 194static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
 195			       unsigned int symndx, Elf_Shdr *relasec,
 196			       const char *sec_objname)
 197{
 198	int i, cnt, ret;
 199	char sym_objname[MODULE_NAME_LEN];
 200	char sym_name[KSYM_NAME_LEN];
 201	Elf_Rela *relas;
 202	Elf_Sym *sym;
 203	unsigned long sympos, addr;
 204	bool sym_vmlinux;
 205	bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
 
 
 
 
 
 
 206
 207	/*
 208	 * Since the field widths for sym_objname and sym_name in the sscanf()
 209	 * call are hard-coded and correspond to MODULE_NAME_LEN and
 210	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
 211	 * and KSYM_NAME_LEN have the values we expect them to have.
 212	 *
 213	 * Because the value of MODULE_NAME_LEN can differ among architectures,
 214	 * we use the smallest/strictest upper bound possible (56, based on
 215	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
 216	 */
 217	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
 
 218
 219	relas = (Elf_Rela *) relasec->sh_addr;
 220	/* For each rela in this klp relocation section */
 221	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
 222		sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
 223		if (sym->st_shndx != SHN_LIVEPATCH) {
 224			pr_err("symbol %s is not marked as a livepatch symbol\n",
 225			       strtab + sym->st_name);
 226			return -EINVAL;
 227		}
 228
 229		/* Format: .klp.sym.sym_objname.sym_name,sympos */
 230		cnt = sscanf(strtab + sym->st_name,
 231			     ".klp.sym.%55[^.].%127[^,],%lu",
 232			     sym_objname, sym_name, &sympos);
 233		if (cnt != 3) {
 234			pr_err("symbol %s has an incorrectly formatted name\n",
 235			       strtab + sym->st_name);
 236			return -EINVAL;
 237		}
 238
 239		sym_vmlinux = !strcmp(sym_objname, "vmlinux");
 
 240
 241		/*
 242		 * Prevent module-specific KLP rela sections from referencing
 243		 * vmlinux symbols.  This helps prevent ordering issues with
 244		 * module special section initializations.  Presumably such
 245		 * symbols are exported and normal relas can be used instead.
 246		 */
 247		if (!sec_vmlinux && sym_vmlinux) {
 248			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
 249			       sym_name);
 250			return -EINVAL;
 251		}
 252
 253		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
 254		ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
 255					     sym_name, sympos, &addr);
 
 
 
 
 
 
 
 
 
 
 
 
 256		if (ret)
 257			return ret;
 258
 259		sym->st_value = addr;
 
 
 
 
 
 
 260	}
 261
 262	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263}
 264
 265/*
 266 * At a high-level, there are two types of klp relocation sections: those which
 267 * reference symbols which live in vmlinux; and those which reference symbols
 268 * which live in other modules.  This function is called for both types:
 269 *
 270 * 1) When a klp module itself loads, the module code calls this function to
 271 *    write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
 272 *    These relocations are written to the klp module text to allow the patched
 273 *    code/data to reference unexported vmlinux symbols.  They're written as
 274 *    early as possible to ensure that other module init code (.e.g.,
 275 *    jump_label_apply_nops) can access any unexported vmlinux symbols which
 276 *    might be referenced by the klp module's special sections.
 277 *
 278 * 2) When a to-be-patched module loads -- or is already loaded when a
 279 *    corresponding klp module loads -- klp code calls this function to write
 280 *    module-specific klp relocations (.klp.rela.{module}.* sections).  These
 281 *    are written to the klp module text to allow the patched code/data to
 282 *    reference symbols which live in the to-be-patched module or one of its
 283 *    module dependencies.  Exported symbols are supported, in addition to
 284 *    unexported symbols, in order to enable late module patching, which allows
 285 *    the to-be-patched module to be loaded and patched sometime *after* the
 286 *    klp module is loaded.
 287 */
 288int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
 289			     const char *shstrtab, const char *strtab,
 290			     unsigned int symndx, unsigned int secndx,
 291			     const char *objname)
 292{
 293	int cnt, ret;
 294	char sec_objname[MODULE_NAME_LEN];
 295	Elf_Shdr *sec = sechdrs + secndx;
 296
 297	/*
 298	 * Format: .klp.rela.sec_objname.section_name
 299	 * See comment in klp_resolve_symbols() for an explanation
 300	 * of the selected field width value.
 301	 */
 302	cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
 303		     sec_objname);
 304	if (cnt != 1) {
 305		pr_err("section %s has an incorrectly formatted name\n",
 306		       shstrtab + sec->sh_name);
 307		return -EINVAL;
 308	}
 309
 310	if (strcmp(objname ? objname : "vmlinux", sec_objname))
 311		return 0;
 
 312
 313	ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname);
 314	if (ret)
 315		return ret;
 
 
 
 
 
 
 
 316
 317	return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
 318}
 319
 320/*
 321 * Sysfs Interface
 322 *
 323 * /sys/kernel/livepatch
 324 * /sys/kernel/livepatch/<patch>
 325 * /sys/kernel/livepatch/<patch>/enabled
 326 * /sys/kernel/livepatch/<patch>/transition
 327 * /sys/kernel/livepatch/<patch>/force
 328 * /sys/kernel/livepatch/<patch>/<object>
 329 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
 330 */
 331static int __klp_disable_patch(struct klp_patch *patch);
 332
 333static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 334			     const char *buf, size_t count)
 335{
 336	struct klp_patch *patch;
 337	int ret;
 338	bool enabled;
 339
 340	ret = kstrtobool(buf, &enabled);
 341	if (ret)
 342		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 343
 344	patch = container_of(kobj, struct klp_patch, kobj);
 
 
 
 
 
 
 345
 346	mutex_lock(&klp_mutex);
 347
 348	if (patch->enabled == enabled) {
 349		/* already in requested state */
 350		ret = -EINVAL;
 351		goto out;
 352	}
 353
 354	/*
 355	 * Allow to reverse a pending transition in both ways. It might be
 356	 * necessary to complete the transition without forcing and breaking
 357	 * the system integrity.
 358	 *
 359	 * Do not allow to re-enable a disabled patch.
 360	 */
 361	if (patch == klp_transition_patch)
 362		klp_reverse_transition();
 363	else if (!enabled)
 364		ret = __klp_disable_patch(patch);
 365	else
 366		ret = -EINVAL;
 367
 368out:
 369	mutex_unlock(&klp_mutex);
 370
 371	if (ret)
 372		return ret;
 373	return count;
 
 
 374}
 375
 376static ssize_t enabled_show(struct kobject *kobj,
 377			    struct kobj_attribute *attr, char *buf)
 378{
 379	struct klp_patch *patch;
 380
 381	patch = container_of(kobj, struct klp_patch, kobj);
 382	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
 383}
 384
 385static ssize_t transition_show(struct kobject *kobj,
 386			       struct kobj_attribute *attr, char *buf)
 387{
 388	struct klp_patch *patch;
 389
 390	patch = container_of(kobj, struct klp_patch, kobj);
 391	return snprintf(buf, PAGE_SIZE-1, "%d\n",
 392			patch == klp_transition_patch);
 393}
 394
 395static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
 396			   const char *buf, size_t count)
 397{
 398	struct klp_patch *patch;
 399	int ret;
 400	bool val;
 401
 402	ret = kstrtobool(buf, &val);
 403	if (ret)
 404		return ret;
 405
 406	if (!val)
 407		return count;
 408
 409	mutex_lock(&klp_mutex);
 410
 411	patch = container_of(kobj, struct klp_patch, kobj);
 412	if (patch != klp_transition_patch) {
 413		mutex_unlock(&klp_mutex);
 414		return -EINVAL;
 415	}
 
 416
 417	klp_force_transition();
 418
 419	mutex_unlock(&klp_mutex);
 420
 421	return count;
 422}
 423
 424static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 425static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
 426static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
 427static struct attribute *klp_patch_attrs[] = {
 428	&enabled_kobj_attr.attr,
 429	&transition_kobj_attr.attr,
 430	&force_kobj_attr.attr,
 431	NULL
 432};
 433ATTRIBUTE_GROUPS(klp_patch);
 434
 435static void klp_free_object_dynamic(struct klp_object *obj)
 436{
 437	kfree(obj->name);
 438	kfree(obj);
 439}
 440
 441static void klp_init_func_early(struct klp_object *obj,
 442				struct klp_func *func);
 443static void klp_init_object_early(struct klp_patch *patch,
 444				  struct klp_object *obj);
 445
 446static struct klp_object *klp_alloc_object_dynamic(const char *name,
 447						   struct klp_patch *patch)
 448{
 449	struct klp_object *obj;
 450
 451	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 452	if (!obj)
 453		return NULL;
 454
 455	if (name) {
 456		obj->name = kstrdup(name, GFP_KERNEL);
 457		if (!obj->name) {
 458			kfree(obj);
 459			return NULL;
 460		}
 461	}
 462
 463	klp_init_object_early(patch, obj);
 464	obj->dynamic = true;
 465
 466	return obj;
 467}
 468
 469static void klp_free_func_nop(struct klp_func *func)
 
 
 
 
 
 
 
 
 470{
 471	kfree(func->old_name);
 472	kfree(func);
 473}
 474
 475static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
 476					   struct klp_object *obj)
 477{
 478	struct klp_func *func;
 479
 480	func = kzalloc(sizeof(*func), GFP_KERNEL);
 481	if (!func)
 482		return NULL;
 483
 484	if (old_func->old_name) {
 485		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
 486		if (!func->old_name) {
 487			kfree(func);
 488			return NULL;
 489		}
 490	}
 491
 492	klp_init_func_early(obj, func);
 493	/*
 494	 * func->new_func is same as func->old_func. These addresses are
 495	 * set when the object is loaded, see klp_init_object_loaded().
 496	 */
 497	func->old_sympos = old_func->old_sympos;
 498	func->nop = true;
 499
 500	return func;
 
 
 501}
 
 502
 503static int klp_add_object_nops(struct klp_patch *patch,
 504			       struct klp_object *old_obj)
 505{
 506	struct klp_object *obj;
 507	struct klp_func *func, *old_func;
 
 
 
 
 
 
 
 
 508
 509	obj = klp_find_object(patch, old_obj);
 
 510
 511	if (!obj) {
 512		obj = klp_alloc_object_dynamic(old_obj->name, patch);
 513		if (!obj)
 514			return -ENOMEM;
 515	}
 516
 517	klp_for_each_func(old_obj, old_func) {
 518		func = klp_find_func(obj, old_func);
 519		if (func)
 520			continue;
 521
 522		func = klp_alloc_func_nop(old_func, obj);
 523		if (!func)
 524			return -ENOMEM;
 525	}
 526
 
 
 527	return 0;
 
 
 
 
 528}
 529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530/*
 531 * Add 'nop' functions which simply return to the caller to run
 532 * the original function. The 'nop' functions are added to a
 533 * patch to facilitate a 'replace' mode.
 
 
 
 
 534 */
 535static int klp_add_nops(struct klp_patch *patch)
 
 
 536{
 537	struct klp_patch *old_patch;
 538	struct klp_object *old_obj;
 
 539
 540	klp_for_each_patch(old_patch) {
 541		klp_for_each_object(old_patch, old_obj) {
 542			int err;
 543
 544			err = klp_add_object_nops(patch, old_obj);
 545			if (err)
 546				return err;
 547		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 548	}
 549
 550	return 0;
 
 
 
 
 
 
 551}
 552
 553static void klp_kobj_release_patch(struct kobject *kobj)
 
 554{
 555	struct klp_patch *patch;
 556
 557	patch = container_of(kobj, struct klp_patch, kobj);
 558	complete(&patch->finish);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 559}
 560
 561static struct kobj_type klp_ktype_patch = {
 562	.release = klp_kobj_release_patch,
 563	.sysfs_ops = &kobj_sysfs_ops,
 564	.default_groups = klp_patch_groups,
 565};
 566
 567static void klp_kobj_release_object(struct kobject *kobj)
 568{
 569	struct klp_object *obj;
 570
 571	obj = container_of(kobj, struct klp_object, kobj);
 572
 573	if (obj->dynamic)
 574		klp_free_object_dynamic(obj);
 575}
 576
 577static struct kobj_type klp_ktype_object = {
 578	.release = klp_kobj_release_object,
 579	.sysfs_ops = &kobj_sysfs_ops,
 580};
 581
 582static void klp_kobj_release_func(struct kobject *kobj)
 583{
 584	struct klp_func *func;
 585
 586	func = container_of(kobj, struct klp_func, kobj);
 587
 588	if (func->nop)
 589		klp_free_func_nop(func);
 590}
 591
 592static struct kobj_type klp_ktype_func = {
 593	.release = klp_kobj_release_func,
 594	.sysfs_ops = &kobj_sysfs_ops,
 595};
 596
 597static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
 
 
 
 
 
 598{
 599	struct klp_func *func, *tmp_func;
 600
 601	klp_for_each_func_safe(obj, func, tmp_func) {
 602		if (nops_only && !func->nop)
 603			continue;
 604
 605		list_del(&func->node);
 606		kobject_put(&func->kobj);
 607	}
 608}
 609
 610/* Clean up when a patched object is unloaded */
 611static void klp_free_object_loaded(struct klp_object *obj)
 612{
 613	struct klp_func *func;
 614
 615	obj->mod = NULL;
 616
 617	klp_for_each_func(obj, func) {
 618		func->old_func = NULL;
 619
 620		if (func->nop)
 621			func->new_func = NULL;
 622	}
 623}
 624
 625static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
 
 
 
 
 
 626{
 627	struct klp_object *obj, *tmp_obj;
 628
 629	klp_for_each_object_safe(patch, obj, tmp_obj) {
 630		__klp_free_funcs(obj, nops_only);
 631
 632		if (nops_only && !obj->dynamic)
 633			continue;
 634
 635		list_del(&obj->node);
 636		kobject_put(&obj->kobj);
 637	}
 638}
 639
 640static void klp_free_objects(struct klp_patch *patch)
 641{
 642	__klp_free_objects(patch, false);
 643}
 644
 645static void klp_free_objects_dynamic(struct klp_patch *patch)
 646{
 647	__klp_free_objects(patch, true);
 648}
 649
 650/*
 651 * This function implements the free operations that can be called safely
 652 * under klp_mutex.
 653 *
 654 * The operation must be completed by calling klp_free_patch_finish()
 655 * outside klp_mutex.
 656 */
 657static void klp_free_patch_start(struct klp_patch *patch)
 658{
 
 659	if (!list_empty(&patch->list))
 660		list_del(&patch->list);
 661
 662	klp_free_objects(patch);
 663}
 664
 665/*
 666 * This function implements the free part that must be called outside
 667 * klp_mutex.
 668 *
 669 * It must be called after klp_free_patch_start(). And it has to be
 670 * the last function accessing the livepatch structures when the patch
 671 * gets disabled.
 672 */
 673static void klp_free_patch_finish(struct klp_patch *patch)
 674{
 675	/*
 676	 * Avoid deadlock with enabled_store() sysfs callback by
 677	 * calling this outside klp_mutex. It is safe because
 678	 * this is called when the patch gets disabled and it
 679	 * cannot get enabled again.
 680	 */
 681	kobject_put(&patch->kobj);
 682	wait_for_completion(&patch->finish);
 683
 684	/* Put the module after the last access to struct klp_patch. */
 685	if (!patch->forced)
 686		module_put(patch->mod);
 687}
 688
 689/*
 690 * The livepatch might be freed from sysfs interface created by the patch.
 691 * This work allows to wait until the interface is destroyed in a separate
 692 * context.
 693 */
 694static void klp_free_patch_work_fn(struct work_struct *work)
 695{
 696	struct klp_patch *patch =
 697		container_of(work, struct klp_patch, free_work);
 698
 699	klp_free_patch_finish(patch);
 700}
 701
 702void klp_free_patch_async(struct klp_patch *patch)
 703{
 704	klp_free_patch_start(patch);
 705	schedule_work(&patch->free_work);
 706}
 707
 708void klp_free_replaced_patches_async(struct klp_patch *new_patch)
 709{
 710	struct klp_patch *old_patch, *tmp_patch;
 711
 712	klp_for_each_patch_safe(old_patch, tmp_patch) {
 713		if (old_patch == new_patch)
 714			return;
 715		klp_free_patch_async(old_patch);
 716	}
 717}
 718
 719static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 720{
 721	if (!func->old_name)
 722		return -EINVAL;
 723
 724	/*
 725	 * NOPs get the address later. The patched module must be loaded,
 726	 * see klp_init_object_loaded().
 727	 */
 728	if (!func->new_func && !func->nop)
 729		return -EINVAL;
 730
 731	if (strlen(func->old_name) >= KSYM_NAME_LEN)
 732		return -EINVAL;
 733
 734	INIT_LIST_HEAD(&func->stack_node);
 735	func->patched = false;
 736	func->transition = false;
 737
 738	/* The format for the sysfs directory is <function,sympos> where sympos
 739	 * is the nth occurrence of this symbol in kallsyms for the patched
 740	 * object. If the user selects 0 for old_sympos, then 1 will be used
 741	 * since a unique symbol will be the first occurrence.
 742	 */
 743	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
 744			   func->old_name,
 745			   func->old_sympos ? func->old_sympos : 1);
 746}
 747
 748static int klp_apply_object_relocs(struct klp_patch *patch,
 749				   struct klp_object *obj)
 750{
 751	int i, ret;
 752	struct klp_modinfo *info = patch->mod->klp_info;
 753
 754	for (i = 1; i < info->hdr.e_shnum; i++) {
 755		Elf_Shdr *sec = info->sechdrs + i;
 756
 757		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
 758			continue;
 759
 760		ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
 761					       info->secstrings,
 762					       patch->mod->core_kallsyms.strtab,
 763					       info->symndx, i, obj->name);
 764		if (ret)
 765			return ret;
 766	}
 767
 768	return 0;
 769}
 770
 771/* parts of the initialization that is done only when the object is loaded */
 772static int klp_init_object_loaded(struct klp_patch *patch,
 773				  struct klp_object *obj)
 774{
 775	struct klp_func *func;
 776	int ret;
 777
 778	if (klp_is_module(obj)) {
 779		/*
 780		 * Only write module-specific relocations here
 781		 * (.klp.rela.{module}.*).  vmlinux-specific relocations were
 782		 * written earlier during the initialization of the klp module
 783		 * itself.
 784		 */
 785		ret = klp_apply_object_relocs(patch, obj);
 786		if (ret)
 787			return ret;
 788	}
 789
 790	klp_for_each_func(obj, func) {
 791		ret = klp_find_object_symbol(obj->name, func->old_name,
 792					     func->old_sympos,
 793					     (unsigned long *)&func->old_func);
 794		if (ret)
 795			return ret;
 796
 797		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
 798						  &func->old_size, NULL);
 799		if (!ret) {
 800			pr_err("kallsyms size lookup failed for '%s'\n",
 801			       func->old_name);
 802			return -ENOENT;
 803		}
 804
 805		if (func->nop)
 806			func->new_func = func->old_func;
 807
 808		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
 809						  &func->new_size, NULL);
 810		if (!ret) {
 811			pr_err("kallsyms size lookup failed for '%s' replacement\n",
 812			       func->old_name);
 813			return -ENOENT;
 814		}
 815	}
 816
 817	return 0;
 818}
 819
 820static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 821{
 822	struct klp_func *func;
 823	int ret;
 824	const char *name;
 825
 826	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
 827		return -EINVAL;
 828
 829	obj->patched = false;
 830	obj->mod = NULL;
 831
 832	klp_find_object_module(obj);
 833
 834	name = klp_is_module(obj) ? obj->name : "vmlinux";
 835	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
 
 836	if (ret)
 837		return ret;
 838
 839	klp_for_each_func(obj, func) {
 840		ret = klp_init_func(obj, func);
 841		if (ret)
 842			return ret;
 843	}
 844
 845	if (klp_is_object_loaded(obj))
 846		ret = klp_init_object_loaded(patch, obj);
 
 
 
 
 
 847
 
 
 
 848	return ret;
 849}
 850
 851static void klp_init_func_early(struct klp_object *obj,
 852				struct klp_func *func)
 853{
 854	kobject_init(&func->kobj, &klp_ktype_func);
 855	list_add_tail(&func->node, &obj->func_list);
 856}
 857
 858static void klp_init_object_early(struct klp_patch *patch,
 859				  struct klp_object *obj)
 860{
 861	INIT_LIST_HEAD(&obj->func_list);
 862	kobject_init(&obj->kobj, &klp_ktype_object);
 863	list_add_tail(&obj->node, &patch->obj_list);
 864}
 865
 866static int klp_init_patch_early(struct klp_patch *patch)
 867{
 868	struct klp_object *obj;
 869	struct klp_func *func;
 870
 871	if (!patch->objs)
 872		return -EINVAL;
 873
 874	INIT_LIST_HEAD(&patch->list);
 875	INIT_LIST_HEAD(&patch->obj_list);
 876	kobject_init(&patch->kobj, &klp_ktype_patch);
 877	patch->enabled = false;
 878	patch->forced = false;
 879	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
 880	init_completion(&patch->finish);
 881
 882	klp_for_each_object_static(patch, obj) {
 883		if (!obj->funcs)
 884			return -EINVAL;
 885
 886		klp_init_object_early(patch, obj);
 887
 888		klp_for_each_func_static(obj, func) {
 889			klp_init_func_early(obj, func);
 890		}
 891	}
 892
 893	if (!try_module_get(patch->mod))
 894		return -ENODEV;
 895
 896	return 0;
 897}
 898
 899static int klp_init_patch(struct klp_patch *patch)
 900{
 901	struct klp_object *obj;
 902	int ret;
 903
 904	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
 905	if (ret)
 906		return ret;
 907
 908	if (patch->replace) {
 909		ret = klp_add_nops(patch);
 910		if (ret)
 911			return ret;
 912	}
 913
 914	klp_for_each_object(patch, obj) {
 915		ret = klp_init_object(patch, obj);
 916		if (ret)
 917			return ret;
 918	}
 919
 920	list_add_tail(&patch->list, &klp_patches);
 921
 922	return 0;
 923}
 924
 925static int __klp_disable_patch(struct klp_patch *patch)
 926{
 927	struct klp_object *obj;
 928
 929	if (WARN_ON(!patch->enabled))
 930		return -EINVAL;
 931
 932	if (klp_transition_patch)
 933		return -EBUSY;
 934
 935	klp_init_transition(patch, KLP_UNPATCHED);
 936
 937	klp_for_each_object(patch, obj)
 938		if (obj->patched)
 939			klp_pre_unpatch_callback(obj);
 940
 941	/*
 942	 * Enforce the order of the func->transition writes in
 943	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
 944	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
 945	 * is called shortly after klp_update_patch_state() switches the task,
 946	 * this ensures the handler sees that func->transition is set.
 947	 */
 948	smp_wmb();
 949
 950	klp_start_transition();
 951	patch->enabled = false;
 952	klp_try_complete_transition();
 953
 954	return 0;
 955}
 956
 957static int __klp_enable_patch(struct klp_patch *patch)
 958{
 959	struct klp_object *obj;
 960	int ret;
 961
 962	if (klp_transition_patch)
 963		return -EBUSY;
 964
 965	if (WARN_ON(patch->enabled))
 966		return -EINVAL;
 967
 968	pr_notice("enabling patch '%s'\n", patch->mod->name);
 969
 970	klp_init_transition(patch, KLP_PATCHED);
 971
 972	/*
 973	 * Enforce the order of the func->transition writes in
 974	 * klp_init_transition() and the ops->func_stack writes in
 975	 * klp_patch_object(), so that klp_ftrace_handler() will see the
 976	 * func->transition updates before the handler is registered and the
 977	 * new funcs become visible to the handler.
 978	 */
 979	smp_wmb();
 980
 981	klp_for_each_object(patch, obj) {
 982		if (!klp_is_object_loaded(obj))
 983			continue;
 984
 985		ret = klp_pre_patch_callback(obj);
 986		if (ret) {
 987			pr_warn("pre-patch callback failed for object '%s'\n",
 988				klp_is_module(obj) ? obj->name : "vmlinux");
 989			goto err;
 990		}
 991
 992		ret = klp_patch_object(obj);
 993		if (ret) {
 994			pr_warn("failed to patch object '%s'\n",
 995				klp_is_module(obj) ? obj->name : "vmlinux");
 996			goto err;
 997		}
 998	}
 999
1000	klp_start_transition();
1001	patch->enabled = true;
1002	klp_try_complete_transition();
1003
1004	return 0;
1005err:
1006	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
1007
1008	klp_cancel_transition();
1009	return ret;
1010}
1011
1012/**
1013 * klp_enable_patch() - enable the livepatch
1014 * @patch:	patch to be enabled
1015 *
1016 * Initializes the data structure associated with the patch, creates the sysfs
1017 * interface, performs the needed symbol lookups and code relocations,
1018 * registers the patched functions with ftrace.
1019 *
1020 * This function is supposed to be called from the livepatch module_init()
1021 * callback.
1022 *
1023 * Return: 0 on success, otherwise error
1024 */
1025int klp_enable_patch(struct klp_patch *patch)
1026{
1027	int ret;
1028
1029	if (!patch || !patch->mod)
1030		return -EINVAL;
1031
1032	if (!is_livepatch_module(patch->mod)) {
1033		pr_err("module %s is not marked as a livepatch module\n",
1034		       patch->mod->name);
1035		return -EINVAL;
1036	}
1037
1038	if (!klp_initialized())
1039		return -ENODEV;
1040
1041	if (!klp_have_reliable_stack()) {
1042		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1043		pr_warn("The livepatch transition may never complete.\n");
1044	}
1045
1046	mutex_lock(&klp_mutex);
1047
1048	if (!klp_is_patch_compatible(patch)) {
1049		pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1050			patch->mod->name);
1051		mutex_unlock(&klp_mutex);
1052		return -EINVAL;
1053	}
1054
1055	ret = klp_init_patch_early(patch);
1056	if (ret) {
1057		mutex_unlock(&klp_mutex);
1058		return ret;
1059	}
1060
1061	ret = klp_init_patch(patch);
1062	if (ret)
1063		goto err;
1064
1065	ret = __klp_enable_patch(patch);
1066	if (ret)
1067		goto err;
1068
 
1069	mutex_unlock(&klp_mutex);
1070
1071	return 0;
1072
1073err:
1074	klp_free_patch_start(patch);
1075
1076	mutex_unlock(&klp_mutex);
1077
1078	klp_free_patch_finish(patch);
1079
1080	return ret;
1081}
1082EXPORT_SYMBOL_GPL(klp_enable_patch);
1083
1084/*
1085 * This function unpatches objects from the replaced livepatches.
 
1086 *
1087 * We could be pretty aggressive here. It is called in the situation where
1088 * these structures are no longer accessed from the ftrace handler.
1089 * All functions are redirected by the klp_transition_patch. They
1090 * use either a new code or they are in the original code because
1091 * of the special nop function patches.
1092 *
1093 * The only exception is when the transition was forced. In this case,
1094 * klp_ftrace_handler() might still see the replaced patch on the stack.
1095 * Fortunately, it is carefully designed to work with removed functions
1096 * thanks to RCU. We only have to keep the patches on the system. Also
1097 * this is handled transparently by patch->module_put.
1098 */
1099void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
1100{
1101	struct klp_patch *old_patch;
1102
1103	klp_for_each_patch(old_patch) {
1104		if (old_patch == new_patch)
1105			return;
1106
1107		old_patch->enabled = false;
1108		klp_unpatch_objects(old_patch);
1109	}
1110}
1111
1112/*
1113 * This function removes the dynamically allocated 'nop' functions.
1114 *
1115 * We could be pretty aggressive. NOPs do not change the existing
1116 * behavior except for adding unnecessary delay by the ftrace handler.
1117 *
1118 * It is safe even when the transition was forced. The ftrace handler
1119 * will see a valid ops->func_stack entry thanks to RCU.
1120 *
1121 * We could even free the NOPs structures. They must be the last entry
1122 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1123 * It does the same as klp_synchronize_transition() to make sure that
1124 * nobody is inside the ftrace handler once the operation finishes.
1125 *
1126 * IMPORTANT: It must be called right after removing the replaced patches!
1127 */
1128void klp_discard_nops(struct klp_patch *new_patch)
1129{
1130	klp_unpatch_objects_dynamic(klp_transition_patch);
1131	klp_free_objects_dynamic(klp_transition_patch);
1132}
1133
1134/*
1135 * Remove parts of patches that touch a given kernel module. The list of
1136 * patches processed might be limited. When limit is NULL, all patches
1137 * will be handled.
1138 */
1139static void klp_cleanup_module_patches_limited(struct module *mod,
1140					       struct klp_patch *limit)
1141{
1142	struct klp_patch *patch;
1143	struct klp_object *obj;
1144
1145	klp_for_each_patch(patch) {
1146		if (patch == limit)
1147			break;
1148
1149		klp_for_each_object(patch, obj) {
1150			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1151				continue;
1152
1153			if (patch != klp_transition_patch)
1154				klp_pre_unpatch_callback(obj);
1155
1156			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1157				  patch->mod->name, obj->mod->name);
1158			klp_unpatch_object(obj);
1159
1160			klp_post_unpatch_callback(obj);
1161
1162			klp_free_object_loaded(obj);
1163			break;
1164		}
1165	}
1166}
 
1167
1168int klp_module_coming(struct module *mod)
1169{
1170	int ret;
1171	struct klp_patch *patch;
1172	struct klp_object *obj;
1173
1174	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1175		return -EINVAL;
1176
1177	if (!strcmp(mod->name, "vmlinux")) {
1178		pr_err("vmlinux.ko: invalid module name");
1179		return -EINVAL;
1180	}
1181
1182	mutex_lock(&klp_mutex);
1183	/*
1184	 * Each module has to know that klp_module_coming()
1185	 * has been called. We never know what module will
1186	 * get patched by a new patch.
1187	 */
1188	mod->klp_alive = true;
1189
1190	klp_for_each_patch(patch) {
1191		klp_for_each_object(patch, obj) {
1192			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1193				continue;
1194
1195			obj->mod = mod;
1196
1197			ret = klp_init_object_loaded(patch, obj);
1198			if (ret) {
1199				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1200					patch->mod->name, obj->mod->name, ret);
1201				goto err;
1202			}
1203
 
 
 
1204			pr_notice("applying patch '%s' to loading module '%s'\n",
1205				  patch->mod->name, obj->mod->name);
1206
1207			ret = klp_pre_patch_callback(obj);
1208			if (ret) {
1209				pr_warn("pre-patch callback failed for object '%s'\n",
1210					obj->name);
1211				goto err;
1212			}
1213
1214			ret = klp_patch_object(obj);
1215			if (ret) {
1216				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1217					patch->mod->name, obj->mod->name, ret);
1218
1219				klp_post_unpatch_callback(obj);
1220				goto err;
1221			}
1222
1223			if (patch != klp_transition_patch)
1224				klp_post_patch_callback(obj);
1225
1226			break;
1227		}
1228	}
1229
1230	mutex_unlock(&klp_mutex);
1231
1232	return 0;
1233
1234err:
1235	/*
1236	 * If a patch is unsuccessfully applied, return
1237	 * error to the module loader.
1238	 */
1239	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1240		patch->mod->name, obj->mod->name, obj->mod->name);
1241	mod->klp_alive = false;
1242	obj->mod = NULL;
1243	klp_cleanup_module_patches_limited(mod, patch);
1244	mutex_unlock(&klp_mutex);
1245
1246	return ret;
1247}
1248
1249void klp_module_going(struct module *mod)
1250{
 
 
 
1251	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1252		    mod->state != MODULE_STATE_COMING))
1253		return;
1254
1255	mutex_lock(&klp_mutex);
1256	/*
1257	 * Each module has to know that klp_module_going()
1258	 * has been called. We never know what module will
1259	 * get patched by a new patch.
1260	 */
1261	mod->klp_alive = false;
1262
1263	klp_cleanup_module_patches_limited(mod, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1264
1265	mutex_unlock(&klp_mutex);
1266}
1267
1268static int __init klp_init(void)
1269{
 
 
 
 
 
 
 
 
1270	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1271	if (!klp_root_kobj)
1272		return -ENOMEM;
1273
1274	return 0;
1275}
1276
1277module_init(klp_init);