Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * patch.c - livepatch patching functions
  4 *
  5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  6 * Copyright (C) 2014 SUSE
  7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
  8 */
  9
 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11
 12#include <linux/livepatch.h>
 13#include <linux/list.h>
 14#include <linux/ftrace.h>
 15#include <linux/rculist.h>
 16#include <linux/slab.h>
 17#include <linux/bug.h>
 18#include <linux/printk.h>
 19#include "core.h"
 20#include "patch.h"
 21#include "transition.h"
 22
 23static LIST_HEAD(klp_ops);
 24
 25struct klp_ops *klp_find_ops(void *old_func)
 26{
 27	struct klp_ops *ops;
 28	struct klp_func *func;
 29
 30	list_for_each_entry(ops, &klp_ops, node) {
 31		func = list_first_entry(&ops->func_stack, struct klp_func,
 32					stack_node);
 33		if (func->old_func == old_func)
 34			return ops;
 35	}
 36
 37	return NULL;
 38}
 39
 40static void notrace klp_ftrace_handler(unsigned long ip,
 41				       unsigned long parent_ip,
 42				       struct ftrace_ops *fops,
 43				       struct pt_regs *regs)
 44{
 45	struct klp_ops *ops;
 46	struct klp_func *func;
 47	int patch_state;
 
 48
 49	ops = container_of(fops, struct klp_ops, fops);
 50
 51	/*
 52	 * A variant of synchronize_rcu() is used to allow patching functions
 53	 * where RCU is not watching, see klp_synchronize_transition().
 
 
 54	 */
 55	preempt_disable_notrace();
 
 
 56
 57	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 58				      stack_node);
 59
 60	/*
 61	 * func should never be NULL because preemption should be disabled here
 62	 * and unregister_ftrace_function() does the equivalent of a
 63	 * synchronize_rcu() before the func_stack removal.
 64	 */
 65	if (WARN_ON_ONCE(!func))
 66		goto unlock;
 67
 68	/*
 69	 * In the enable path, enforce the order of the ops->func_stack and
 70	 * func->transition reads.  The corresponding write barrier is in
 71	 * __klp_enable_patch().
 72	 *
 73	 * (Note that this barrier technically isn't needed in the disable
 74	 * path.  In the rare case where klp_update_patch_state() runs before
 75	 * this handler, its TIF_PATCH_PENDING read and this func->transition
 76	 * read need to be ordered.  But klp_update_patch_state() already
 77	 * enforces that.)
 78	 */
 79	smp_rmb();
 80
 81	if (unlikely(func->transition)) {
 82
 83		/*
 84		 * Enforce the order of the func->transition and
 85		 * current->patch_state reads.  Otherwise we could read an
 86		 * out-of-date task state and pick the wrong function.  The
 87		 * corresponding write barrier is in klp_init_transition().
 88		 */
 89		smp_rmb();
 90
 91		patch_state = current->patch_state;
 92
 93		WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
 94
 95		if (patch_state == KLP_UNPATCHED) {
 96			/*
 97			 * Use the previously patched version of the function.
 98			 * If no previous patches exist, continue with the
 99			 * original function.
100			 */
101			func = list_entry_rcu(func->stack_node.next,
102					      struct klp_func, stack_node);
103
104			if (&func->stack_node == &ops->func_stack)
105				goto unlock;
106		}
107	}
108
109	/*
110	 * NOPs are used to replace existing patches with original code.
111	 * Do nothing! Setting pc would cause an infinite loop.
112	 */
113	if (func->nop)
114		goto unlock;
115
116	klp_arch_set_pc(regs, (unsigned long)func->new_func);
117
118unlock:
119	preempt_enable_notrace();
120}
121
122/*
123 * Convert a function address into the appropriate ftrace location.
124 *
125 * Usually this is just the address of the function, but on some architectures
126 * it's more complicated so allow them to provide a custom behaviour.
127 */
128#ifndef klp_get_ftrace_location
129static unsigned long klp_get_ftrace_location(unsigned long faddr)
130{
131	return faddr;
132}
133#endif
134
135static void klp_unpatch_func(struct klp_func *func)
136{
137	struct klp_ops *ops;
138
139	if (WARN_ON(!func->patched))
140		return;
141	if (WARN_ON(!func->old_func))
142		return;
143
144	ops = klp_find_ops(func->old_func);
145	if (WARN_ON(!ops))
146		return;
147
148	if (list_is_singular(&ops->func_stack)) {
149		unsigned long ftrace_loc;
150
151		ftrace_loc =
152			klp_get_ftrace_location((unsigned long)func->old_func);
153		if (WARN_ON(!ftrace_loc))
154			return;
155
156		WARN_ON(unregister_ftrace_function(&ops->fops));
157		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
158
159		list_del_rcu(&func->stack_node);
160		list_del(&ops->node);
161		kfree(ops);
162	} else {
163		list_del_rcu(&func->stack_node);
164	}
165
166	func->patched = false;
167}
168
169static int klp_patch_func(struct klp_func *func)
170{
171	struct klp_ops *ops;
172	int ret;
173
174	if (WARN_ON(!func->old_func))
175		return -EINVAL;
176
177	if (WARN_ON(func->patched))
178		return -EINVAL;
179
180	ops = klp_find_ops(func->old_func);
181	if (!ops) {
182		unsigned long ftrace_loc;
183
184		ftrace_loc =
185			klp_get_ftrace_location((unsigned long)func->old_func);
186		if (!ftrace_loc) {
187			pr_err("failed to find location for function '%s'\n",
188				func->old_name);
189			return -EINVAL;
190		}
191
192		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
193		if (!ops)
194			return -ENOMEM;
195
196		ops->fops.func = klp_ftrace_handler;
197		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
198				  FTRACE_OPS_FL_DYNAMIC |
199				  FTRACE_OPS_FL_IPMODIFY;
 
 
 
200
201		list_add(&ops->node, &klp_ops);
202
203		INIT_LIST_HEAD(&ops->func_stack);
204		list_add_rcu(&func->stack_node, &ops->func_stack);
205
206		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
207		if (ret) {
208			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
209			       func->old_name, ret);
210			goto err;
211		}
212
213		ret = register_ftrace_function(&ops->fops);
214		if (ret) {
215			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
216			       func->old_name, ret);
217			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
218			goto err;
219		}
220
221
222	} else {
223		list_add_rcu(&func->stack_node, &ops->func_stack);
224	}
225
226	func->patched = true;
227
228	return 0;
229
230err:
231	list_del_rcu(&func->stack_node);
232	list_del(&ops->node);
233	kfree(ops);
234	return ret;
235}
236
237static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
238{
239	struct klp_func *func;
240
241	klp_for_each_func(obj, func) {
242		if (nops_only && !func->nop)
243			continue;
244
245		if (func->patched)
246			klp_unpatch_func(func);
247	}
248
249	if (obj->dynamic || !nops_only)
250		obj->patched = false;
251}
252
253
254void klp_unpatch_object(struct klp_object *obj)
255{
256	__klp_unpatch_object(obj, false);
257}
258
259int klp_patch_object(struct klp_object *obj)
260{
261	struct klp_func *func;
262	int ret;
263
264	if (WARN_ON(obj->patched))
265		return -EINVAL;
266
267	klp_for_each_func(obj, func) {
268		ret = klp_patch_func(func);
269		if (ret) {
270			klp_unpatch_object(obj);
271			return ret;
272		}
273	}
274	obj->patched = true;
275
276	return 0;
277}
278
279static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
280{
281	struct klp_object *obj;
282
283	klp_for_each_object(patch, obj)
284		if (obj->patched)
285			__klp_unpatch_object(obj, nops_only);
286}
287
288void klp_unpatch_objects(struct klp_patch *patch)
289{
290	__klp_unpatch_objects(patch, false);
291}
292
293void klp_unpatch_objects_dynamic(struct klp_patch *patch)
294{
295	__klp_unpatch_objects(patch, true);
296}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * patch.c - livepatch patching functions
  4 *
  5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  6 * Copyright (C) 2014 SUSE
  7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
  8 */
  9
 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11
 12#include <linux/livepatch.h>
 13#include <linux/list.h>
 14#include <linux/ftrace.h>
 15#include <linux/rculist.h>
 16#include <linux/slab.h>
 17#include <linux/bug.h>
 18#include <linux/printk.h>
 19#include "core.h"
 20#include "patch.h"
 21#include "transition.h"
 22
 23static LIST_HEAD(klp_ops);
 24
 25struct klp_ops *klp_find_ops(void *old_func)
 26{
 27	struct klp_ops *ops;
 28	struct klp_func *func;
 29
 30	list_for_each_entry(ops, &klp_ops, node) {
 31		func = list_first_entry(&ops->func_stack, struct klp_func,
 32					stack_node);
 33		if (func->old_func == old_func)
 34			return ops;
 35	}
 36
 37	return NULL;
 38}
 39
 40static void notrace klp_ftrace_handler(unsigned long ip,
 41				       unsigned long parent_ip,
 42				       struct ftrace_ops *fops,
 43				       struct ftrace_regs *fregs)
 44{
 45	struct klp_ops *ops;
 46	struct klp_func *func;
 47	int patch_state;
 48	int bit;
 49
 50	ops = container_of(fops, struct klp_ops, fops);
 51
 52	/*
 53	 * The ftrace_test_recursion_trylock() will disable preemption,
 54	 * which is required for the variant of synchronize_rcu() that is
 55	 * used to allow patching functions where RCU is not watching.
 56	 * See klp_synchronize_transition() for more details.
 57	 */
 58	bit = ftrace_test_recursion_trylock(ip, parent_ip);
 59	if (WARN_ON_ONCE(bit < 0))
 60		return;
 61
 62	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 63				      stack_node);
 64
 65	/*
 66	 * func should never be NULL because preemption should be disabled here
 67	 * and unregister_ftrace_function() does the equivalent of a
 68	 * synchronize_rcu() before the func_stack removal.
 69	 */
 70	if (WARN_ON_ONCE(!func))
 71		goto unlock;
 72
 73	/*
 74	 * In the enable path, enforce the order of the ops->func_stack and
 75	 * func->transition reads.  The corresponding write barrier is in
 76	 * __klp_enable_patch().
 77	 *
 78	 * (Note that this barrier technically isn't needed in the disable
 79	 * path.  In the rare case where klp_update_patch_state() runs before
 80	 * this handler, its TIF_PATCH_PENDING read and this func->transition
 81	 * read need to be ordered.  But klp_update_patch_state() already
 82	 * enforces that.)
 83	 */
 84	smp_rmb();
 85
 86	if (unlikely(func->transition)) {
 87
 88		/*
 89		 * Enforce the order of the func->transition and
 90		 * current->patch_state reads.  Otherwise we could read an
 91		 * out-of-date task state and pick the wrong function.  The
 92		 * corresponding write barrier is in klp_init_transition().
 93		 */
 94		smp_rmb();
 95
 96		patch_state = current->patch_state;
 97
 98		WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
 99
100		if (patch_state == KLP_UNPATCHED) {
101			/*
102			 * Use the previously patched version of the function.
103			 * If no previous patches exist, continue with the
104			 * original function.
105			 */
106			func = list_entry_rcu(func->stack_node.next,
107					      struct klp_func, stack_node);
108
109			if (&func->stack_node == &ops->func_stack)
110				goto unlock;
111		}
112	}
113
114	/*
115	 * NOPs are used to replace existing patches with original code.
116	 * Do nothing! Setting pc would cause an infinite loop.
117	 */
118	if (func->nop)
119		goto unlock;
120
121	ftrace_regs_set_instruction_pointer(fregs, (unsigned long)func->new_func);
122
123unlock:
124	ftrace_test_recursion_unlock(bit);
 
 
 
 
 
 
 
 
 
 
 
 
125}
 
126
127static void klp_unpatch_func(struct klp_func *func)
128{
129	struct klp_ops *ops;
130
131	if (WARN_ON(!func->patched))
132		return;
133	if (WARN_ON(!func->old_func))
134		return;
135
136	ops = klp_find_ops(func->old_func);
137	if (WARN_ON(!ops))
138		return;
139
140	if (list_is_singular(&ops->func_stack)) {
141		unsigned long ftrace_loc;
142
143		ftrace_loc = ftrace_location((unsigned long)func->old_func);
 
144		if (WARN_ON(!ftrace_loc))
145			return;
146
147		WARN_ON(unregister_ftrace_function(&ops->fops));
148		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
149
150		list_del_rcu(&func->stack_node);
151		list_del(&ops->node);
152		kfree(ops);
153	} else {
154		list_del_rcu(&func->stack_node);
155	}
156
157	func->patched = false;
158}
159
160static int klp_patch_func(struct klp_func *func)
161{
162	struct klp_ops *ops;
163	int ret;
164
165	if (WARN_ON(!func->old_func))
166		return -EINVAL;
167
168	if (WARN_ON(func->patched))
169		return -EINVAL;
170
171	ops = klp_find_ops(func->old_func);
172	if (!ops) {
173		unsigned long ftrace_loc;
174
175		ftrace_loc = ftrace_location((unsigned long)func->old_func);
 
176		if (!ftrace_loc) {
177			pr_err("failed to find location for function '%s'\n",
178				func->old_name);
179			return -EINVAL;
180		}
181
182		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
183		if (!ops)
184			return -ENOMEM;
185
186		ops->fops.func = klp_ftrace_handler;
187		ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
188#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
189				  FTRACE_OPS_FL_SAVE_REGS |
190#endif
191				  FTRACE_OPS_FL_IPMODIFY |
192				  FTRACE_OPS_FL_PERMANENT;
193
194		list_add(&ops->node, &klp_ops);
195
196		INIT_LIST_HEAD(&ops->func_stack);
197		list_add_rcu(&func->stack_node, &ops->func_stack);
198
199		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
200		if (ret) {
201			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
202			       func->old_name, ret);
203			goto err;
204		}
205
206		ret = register_ftrace_function(&ops->fops);
207		if (ret) {
208			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
209			       func->old_name, ret);
210			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
211			goto err;
212		}
213
214
215	} else {
216		list_add_rcu(&func->stack_node, &ops->func_stack);
217	}
218
219	func->patched = true;
220
221	return 0;
222
223err:
224	list_del_rcu(&func->stack_node);
225	list_del(&ops->node);
226	kfree(ops);
227	return ret;
228}
229
230static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
231{
232	struct klp_func *func;
233
234	klp_for_each_func(obj, func) {
235		if (nops_only && !func->nop)
236			continue;
237
238		if (func->patched)
239			klp_unpatch_func(func);
240	}
241
242	if (obj->dynamic || !nops_only)
243		obj->patched = false;
244}
245
246
247void klp_unpatch_object(struct klp_object *obj)
248{
249	__klp_unpatch_object(obj, false);
250}
251
252int klp_patch_object(struct klp_object *obj)
253{
254	struct klp_func *func;
255	int ret;
256
257	if (WARN_ON(obj->patched))
258		return -EINVAL;
259
260	klp_for_each_func(obj, func) {
261		ret = klp_patch_func(func);
262		if (ret) {
263			klp_unpatch_object(obj);
264			return ret;
265		}
266	}
267	obj->patched = true;
268
269	return 0;
270}
271
272static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
273{
274	struct klp_object *obj;
275
276	klp_for_each_object(patch, obj)
277		if (obj->patched)
278			__klp_unpatch_object(obj, nops_only);
279}
280
281void klp_unpatch_objects(struct klp_patch *patch)
282{
283	__klp_unpatch_objects(patch, false);
284}
285
286void klp_unpatch_objects_dynamic(struct klp_patch *patch)
287{
288	__klp_unpatch_objects(patch, true);
289}