Loading...
1/*
2 * patch.c - livepatch patching functions
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/livepatch.h>
25#include <linux/list.h>
26#include <linux/ftrace.h>
27#include <linux/rculist.h>
28#include <linux/slab.h>
29#include <linux/bug.h>
30#include <linux/printk.h>
31#include "core.h"
32#include "patch.h"
33#include "transition.h"
34
35static LIST_HEAD(klp_ops);
36
37struct klp_ops *klp_find_ops(unsigned long old_addr)
38{
39 struct klp_ops *ops;
40 struct klp_func *func;
41
42 list_for_each_entry(ops, &klp_ops, node) {
43 func = list_first_entry(&ops->func_stack, struct klp_func,
44 stack_node);
45 if (func->old_addr == old_addr)
46 return ops;
47 }
48
49 return NULL;
50}
51
52static void notrace klp_ftrace_handler(unsigned long ip,
53 unsigned long parent_ip,
54 struct ftrace_ops *fops,
55 struct pt_regs *regs)
56{
57 struct klp_ops *ops;
58 struct klp_func *func;
59 int patch_state;
60
61 ops = container_of(fops, struct klp_ops, fops);
62
63 /*
64 * A variant of synchronize_sched() is used to allow patching functions
65 * where RCU is not watching, see klp_synchronize_transition().
66 */
67 preempt_disable_notrace();
68
69 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
70 stack_node);
71
72 /*
73 * func should never be NULL because preemption should be disabled here
74 * and unregister_ftrace_function() does the equivalent of a
75 * synchronize_sched() before the func_stack removal.
76 */
77 if (WARN_ON_ONCE(!func))
78 goto unlock;
79
80 /*
81 * In the enable path, enforce the order of the ops->func_stack and
82 * func->transition reads. The corresponding write barrier is in
83 * __klp_enable_patch().
84 *
85 * (Note that this barrier technically isn't needed in the disable
86 * path. In the rare case where klp_update_patch_state() runs before
87 * this handler, its TIF_PATCH_PENDING read and this func->transition
88 * read need to be ordered. But klp_update_patch_state() already
89 * enforces that.)
90 */
91 smp_rmb();
92
93 if (unlikely(func->transition)) {
94
95 /*
96 * Enforce the order of the func->transition and
97 * current->patch_state reads. Otherwise we could read an
98 * out-of-date task state and pick the wrong function. The
99 * corresponding write barrier is in klp_init_transition().
100 */
101 smp_rmb();
102
103 patch_state = current->patch_state;
104
105 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
106
107 if (patch_state == KLP_UNPATCHED) {
108 /*
109 * Use the previously patched version of the function.
110 * If no previous patches exist, continue with the
111 * original function.
112 */
113 func = list_entry_rcu(func->stack_node.next,
114 struct klp_func, stack_node);
115
116 if (&func->stack_node == &ops->func_stack)
117 goto unlock;
118 }
119 }
120
121 klp_arch_set_pc(regs, (unsigned long)func->new_func);
122unlock:
123 preempt_enable_notrace();
124}
125
126/*
127 * Convert a function address into the appropriate ftrace location.
128 *
129 * Usually this is just the address of the function, but on some architectures
130 * it's more complicated so allow them to provide a custom behaviour.
131 */
132#ifndef klp_get_ftrace_location
133static unsigned long klp_get_ftrace_location(unsigned long faddr)
134{
135 return faddr;
136}
137#endif
138
139static void klp_unpatch_func(struct klp_func *func)
140{
141 struct klp_ops *ops;
142
143 if (WARN_ON(!func->patched))
144 return;
145 if (WARN_ON(!func->old_addr))
146 return;
147
148 ops = klp_find_ops(func->old_addr);
149 if (WARN_ON(!ops))
150 return;
151
152 if (list_is_singular(&ops->func_stack)) {
153 unsigned long ftrace_loc;
154
155 ftrace_loc = klp_get_ftrace_location(func->old_addr);
156 if (WARN_ON(!ftrace_loc))
157 return;
158
159 WARN_ON(unregister_ftrace_function(&ops->fops));
160 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
161
162 list_del_rcu(&func->stack_node);
163 list_del(&ops->node);
164 kfree(ops);
165 } else {
166 list_del_rcu(&func->stack_node);
167 }
168
169 func->patched = false;
170}
171
172static int klp_patch_func(struct klp_func *func)
173{
174 struct klp_ops *ops;
175 int ret;
176
177 if (WARN_ON(!func->old_addr))
178 return -EINVAL;
179
180 if (WARN_ON(func->patched))
181 return -EINVAL;
182
183 ops = klp_find_ops(func->old_addr);
184 if (!ops) {
185 unsigned long ftrace_loc;
186
187 ftrace_loc = klp_get_ftrace_location(func->old_addr);
188 if (!ftrace_loc) {
189 pr_err("failed to find location for function '%s'\n",
190 func->old_name);
191 return -EINVAL;
192 }
193
194 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
195 if (!ops)
196 return -ENOMEM;
197
198 ops->fops.func = klp_ftrace_handler;
199 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
200 FTRACE_OPS_FL_DYNAMIC |
201 FTRACE_OPS_FL_IPMODIFY;
202
203 list_add(&ops->node, &klp_ops);
204
205 INIT_LIST_HEAD(&ops->func_stack);
206 list_add_rcu(&func->stack_node, &ops->func_stack);
207
208 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
209 if (ret) {
210 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
211 func->old_name, ret);
212 goto err;
213 }
214
215 ret = register_ftrace_function(&ops->fops);
216 if (ret) {
217 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
218 func->old_name, ret);
219 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
220 goto err;
221 }
222
223
224 } else {
225 list_add_rcu(&func->stack_node, &ops->func_stack);
226 }
227
228 func->patched = true;
229
230 return 0;
231
232err:
233 list_del_rcu(&func->stack_node);
234 list_del(&ops->node);
235 kfree(ops);
236 return ret;
237}
238
239void klp_unpatch_object(struct klp_object *obj)
240{
241 struct klp_func *func;
242
243 klp_for_each_func(obj, func)
244 if (func->patched)
245 klp_unpatch_func(func);
246
247 obj->patched = false;
248}
249
250int klp_patch_object(struct klp_object *obj)
251{
252 struct klp_func *func;
253 int ret;
254
255 if (WARN_ON(obj->patched))
256 return -EINVAL;
257
258 klp_for_each_func(obj, func) {
259 ret = klp_patch_func(func);
260 if (ret) {
261 klp_unpatch_object(obj);
262 return ret;
263 }
264 }
265 obj->patched = true;
266
267 return 0;
268}
269
270void klp_unpatch_objects(struct klp_patch *patch)
271{
272 struct klp_object *obj;
273
274 klp_for_each_object(patch, obj)
275 if (obj->patched)
276 klp_unpatch_object(obj);
277}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * patch.c - livepatch patching functions
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/livepatch.h>
13#include <linux/list.h>
14#include <linux/ftrace.h>
15#include <linux/rculist.h>
16#include <linux/slab.h>
17#include <linux/bug.h>
18#include <linux/printk.h>
19#include "core.h"
20#include "patch.h"
21#include "transition.h"
22
23static LIST_HEAD(klp_ops);
24
25struct klp_ops *klp_find_ops(void *old_func)
26{
27 struct klp_ops *ops;
28 struct klp_func *func;
29
30 list_for_each_entry(ops, &klp_ops, node) {
31 func = list_first_entry(&ops->func_stack, struct klp_func,
32 stack_node);
33 if (func->old_func == old_func)
34 return ops;
35 }
36
37 return NULL;
38}
39
40static void notrace klp_ftrace_handler(unsigned long ip,
41 unsigned long parent_ip,
42 struct ftrace_ops *fops,
43 struct pt_regs *regs)
44{
45 struct klp_ops *ops;
46 struct klp_func *func;
47 int patch_state;
48
49 ops = container_of(fops, struct klp_ops, fops);
50
51 /*
52 * A variant of synchronize_rcu() is used to allow patching functions
53 * where RCU is not watching, see klp_synchronize_transition().
54 */
55 preempt_disable_notrace();
56
57 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
58 stack_node);
59
60 /*
61 * func should never be NULL because preemption should be disabled here
62 * and unregister_ftrace_function() does the equivalent of a
63 * synchronize_rcu() before the func_stack removal.
64 */
65 if (WARN_ON_ONCE(!func))
66 goto unlock;
67
68 /*
69 * In the enable path, enforce the order of the ops->func_stack and
70 * func->transition reads. The corresponding write barrier is in
71 * __klp_enable_patch().
72 *
73 * (Note that this barrier technically isn't needed in the disable
74 * path. In the rare case where klp_update_patch_state() runs before
75 * this handler, its TIF_PATCH_PENDING read and this func->transition
76 * read need to be ordered. But klp_update_patch_state() already
77 * enforces that.)
78 */
79 smp_rmb();
80
81 if (unlikely(func->transition)) {
82
83 /*
84 * Enforce the order of the func->transition and
85 * current->patch_state reads. Otherwise we could read an
86 * out-of-date task state and pick the wrong function. The
87 * corresponding write barrier is in klp_init_transition().
88 */
89 smp_rmb();
90
91 patch_state = current->patch_state;
92
93 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
94
95 if (patch_state == KLP_UNPATCHED) {
96 /*
97 * Use the previously patched version of the function.
98 * If no previous patches exist, continue with the
99 * original function.
100 */
101 func = list_entry_rcu(func->stack_node.next,
102 struct klp_func, stack_node);
103
104 if (&func->stack_node == &ops->func_stack)
105 goto unlock;
106 }
107 }
108
109 /*
110 * NOPs are used to replace existing patches with original code.
111 * Do nothing! Setting pc would cause an infinite loop.
112 */
113 if (func->nop)
114 goto unlock;
115
116 klp_arch_set_pc(regs, (unsigned long)func->new_func);
117
118unlock:
119 preempt_enable_notrace();
120}
121
122/*
123 * Convert a function address into the appropriate ftrace location.
124 *
125 * Usually this is just the address of the function, but on some architectures
126 * it's more complicated so allow them to provide a custom behaviour.
127 */
128#ifndef klp_get_ftrace_location
129static unsigned long klp_get_ftrace_location(unsigned long faddr)
130{
131 return faddr;
132}
133#endif
134
135static void klp_unpatch_func(struct klp_func *func)
136{
137 struct klp_ops *ops;
138
139 if (WARN_ON(!func->patched))
140 return;
141 if (WARN_ON(!func->old_func))
142 return;
143
144 ops = klp_find_ops(func->old_func);
145 if (WARN_ON(!ops))
146 return;
147
148 if (list_is_singular(&ops->func_stack)) {
149 unsigned long ftrace_loc;
150
151 ftrace_loc =
152 klp_get_ftrace_location((unsigned long)func->old_func);
153 if (WARN_ON(!ftrace_loc))
154 return;
155
156 WARN_ON(unregister_ftrace_function(&ops->fops));
157 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
158
159 list_del_rcu(&func->stack_node);
160 list_del(&ops->node);
161 kfree(ops);
162 } else {
163 list_del_rcu(&func->stack_node);
164 }
165
166 func->patched = false;
167}
168
169static int klp_patch_func(struct klp_func *func)
170{
171 struct klp_ops *ops;
172 int ret;
173
174 if (WARN_ON(!func->old_func))
175 return -EINVAL;
176
177 if (WARN_ON(func->patched))
178 return -EINVAL;
179
180 ops = klp_find_ops(func->old_func);
181 if (!ops) {
182 unsigned long ftrace_loc;
183
184 ftrace_loc =
185 klp_get_ftrace_location((unsigned long)func->old_func);
186 if (!ftrace_loc) {
187 pr_err("failed to find location for function '%s'\n",
188 func->old_name);
189 return -EINVAL;
190 }
191
192 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
193 if (!ops)
194 return -ENOMEM;
195
196 ops->fops.func = klp_ftrace_handler;
197 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
198 FTRACE_OPS_FL_DYNAMIC |
199 FTRACE_OPS_FL_IPMODIFY |
200 FTRACE_OPS_FL_PERMANENT;
201
202 list_add(&ops->node, &klp_ops);
203
204 INIT_LIST_HEAD(&ops->func_stack);
205 list_add_rcu(&func->stack_node, &ops->func_stack);
206
207 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
208 if (ret) {
209 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
210 func->old_name, ret);
211 goto err;
212 }
213
214 ret = register_ftrace_function(&ops->fops);
215 if (ret) {
216 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
217 func->old_name, ret);
218 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
219 goto err;
220 }
221
222
223 } else {
224 list_add_rcu(&func->stack_node, &ops->func_stack);
225 }
226
227 func->patched = true;
228
229 return 0;
230
231err:
232 list_del_rcu(&func->stack_node);
233 list_del(&ops->node);
234 kfree(ops);
235 return ret;
236}
237
238static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
239{
240 struct klp_func *func;
241
242 klp_for_each_func(obj, func) {
243 if (nops_only && !func->nop)
244 continue;
245
246 if (func->patched)
247 klp_unpatch_func(func);
248 }
249
250 if (obj->dynamic || !nops_only)
251 obj->patched = false;
252}
253
254
255void klp_unpatch_object(struct klp_object *obj)
256{
257 __klp_unpatch_object(obj, false);
258}
259
260int klp_patch_object(struct klp_object *obj)
261{
262 struct klp_func *func;
263 int ret;
264
265 if (WARN_ON(obj->patched))
266 return -EINVAL;
267
268 klp_for_each_func(obj, func) {
269 ret = klp_patch_func(func);
270 if (ret) {
271 klp_unpatch_object(obj);
272 return ret;
273 }
274 }
275 obj->patched = true;
276
277 return 0;
278}
279
280static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
281{
282 struct klp_object *obj;
283
284 klp_for_each_object(patch, obj)
285 if (obj->patched)
286 __klp_unpatch_object(obj, nops_only);
287}
288
289void klp_unpatch_objects(struct klp_patch *patch)
290{
291 __klp_unpatch_objects(patch, false);
292}
293
294void klp_unpatch_objects_dynamic(struct klp_patch *patch)
295{
296 __klp_unpatch_objects(patch, true);
297}