Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * transition.c - Kernel Live Patching transition functions
  4 *
  5 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/cpu.h>
 11#include <linux/stacktrace.h>
 12#include "core.h"
 13#include "patch.h"
 14#include "transition.h"
 
 15
 16#define MAX_STACK_ENTRIES  100
 17#define STACK_ERR_BUF_SIZE 128
 18
 19#define SIGNALS_TIMEOUT 15
 20
 21struct klp_patch *klp_transition_patch;
 22
 23static int klp_target_state = KLP_UNDEFINED;
 24
 25static unsigned int klp_signals_cnt;
 26
 27/*
 28 * This work can be performed periodically to finish patching or unpatching any
 29 * "straggler" tasks which failed to transition in the first attempt.
 30 */
 31static void klp_transition_work_fn(struct work_struct *work)
 32{
 33	mutex_lock(&klp_mutex);
 34
 35	if (klp_transition_patch)
 36		klp_try_complete_transition();
 37
 38	mutex_unlock(&klp_mutex);
 39}
 40static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
 41
 42/*
 43 * This function is just a stub to implement a hard force
 44 * of synchronize_rcu(). This requires synchronizing
 45 * tasks even in userspace and idle.
 46 */
 47static void klp_sync(struct work_struct *work)
 48{
 49}
 50
 51/*
 52 * We allow to patch also functions where RCU is not watching,
 53 * e.g. before user_exit(). We can not rely on the RCU infrastructure
 54 * to do the synchronization. Instead hard force the sched synchronization.
 55 *
 56 * This approach allows to use RCU functions for manipulating func_stack
 57 * safely.
 58 */
 59static void klp_synchronize_transition(void)
 60{
 61	schedule_on_each_cpu(klp_sync);
 62}
 63
 64/*
 65 * The transition to the target patch state is complete.  Clean up the data
 66 * structures.
 67 */
 68static void klp_complete_transition(void)
 69{
 70	struct klp_object *obj;
 71	struct klp_func *func;
 72	struct task_struct *g, *task;
 73	unsigned int cpu;
 74
 75	pr_debug("'%s': completing %s transition\n",
 76		 klp_transition_patch->mod->name,
 77		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
 78
 79	if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
 80		klp_unpatch_replaced_patches(klp_transition_patch);
 81		klp_discard_nops(klp_transition_patch);
 82	}
 83
 84	if (klp_target_state == KLP_UNPATCHED) {
 85		/*
 86		 * All tasks have transitioned to KLP_UNPATCHED so we can now
 87		 * remove the new functions from the func_stack.
 88		 */
 89		klp_unpatch_objects(klp_transition_patch);
 90
 91		/*
 92		 * Make sure klp_ftrace_handler() can no longer see functions
 93		 * from this patch on the ops->func_stack.  Otherwise, after
 94		 * func->transition gets cleared, the handler may choose a
 95		 * removed function.
 96		 */
 97		klp_synchronize_transition();
 98	}
 99
100	klp_for_each_object(klp_transition_patch, obj)
101		klp_for_each_func(obj, func)
102			func->transition = false;
103
104	/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
105	if (klp_target_state == KLP_PATCHED)
106		klp_synchronize_transition();
107
108	read_lock(&tasklist_lock);
109	for_each_process_thread(g, task) {
110		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
111		task->patch_state = KLP_UNDEFINED;
112	}
113	read_unlock(&tasklist_lock);
114
115	for_each_possible_cpu(cpu) {
116		task = idle_task(cpu);
117		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
118		task->patch_state = KLP_UNDEFINED;
119	}
120
121	klp_for_each_object(klp_transition_patch, obj) {
122		if (!klp_is_object_loaded(obj))
123			continue;
124		if (klp_target_state == KLP_PATCHED)
125			klp_post_patch_callback(obj);
126		else if (klp_target_state == KLP_UNPATCHED)
127			klp_post_unpatch_callback(obj);
128	}
129
130	pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
131		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
132
 
 
 
 
 
 
 
133	klp_target_state = KLP_UNDEFINED;
134	klp_transition_patch = NULL;
135}
136
137/*
138 * This is called in the error path, to cancel a transition before it has
139 * started, i.e. klp_init_transition() has been called but
140 * klp_start_transition() hasn't.  If the transition *has* been started,
141 * klp_reverse_transition() should be used instead.
142 */
143void klp_cancel_transition(void)
144{
145	if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
146		return;
147
148	pr_debug("'%s': canceling patching transition, going to unpatch\n",
149		 klp_transition_patch->mod->name);
150
151	klp_target_state = KLP_UNPATCHED;
152	klp_complete_transition();
153}
154
155/*
156 * Switch the patched state of the task to the set of functions in the target
157 * patch state.
158 *
159 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
160 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
161 */
162void klp_update_patch_state(struct task_struct *task)
163{
164	/*
165	 * A variant of synchronize_rcu() is used to allow patching functions
166	 * where RCU is not watching, see klp_synchronize_transition().
167	 */
168	preempt_disable_notrace();
169
170	/*
171	 * This test_and_clear_tsk_thread_flag() call also serves as a read
172	 * barrier (smp_rmb) for two cases:
173	 *
174	 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
175	 *    klp_target_state read.  The corresponding write barrier is in
176	 *    klp_init_transition().
177	 *
178	 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
179	 *    of func->transition, if klp_ftrace_handler() is called later on
180	 *    the same CPU.  See __klp_disable_patch().
181	 */
182	if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
183		task->patch_state = READ_ONCE(klp_target_state);
184
185	preempt_enable_notrace();
186}
187
188/*
189 * Determine whether the given stack trace includes any references to a
190 * to-be-patched or to-be-unpatched function.
191 */
192static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
193				unsigned int nr_entries)
194{
195	unsigned long func_addr, func_size, address;
196	struct klp_ops *ops;
197	int i;
198
199	if (klp_target_state == KLP_UNPATCHED) {
200		 /*
201		  * Check for the to-be-unpatched function
202		  * (the func itself).
203		  */
204		func_addr = (unsigned long)func->new_func;
205		func_size = func->new_size;
206	} else {
207		/*
208		 * Check for the to-be-patched function
209		 * (the previous func).
210		 */
211		ops = klp_find_ops(func->old_func);
212
213		if (list_is_singular(&ops->func_stack)) {
214			/* original function */
215			func_addr = (unsigned long)func->old_func;
216			func_size = func->old_size;
 
 
 
217		} else {
218			/* previously patched function */
219			struct klp_func *prev;
 
 
 
220
221			prev = list_next_entry(func, stack_node);
222			func_addr = (unsigned long)prev->new_func;
223			func_size = prev->new_size;
 
 
 
 
 
 
 
 
 
224		}
225	}
226
227	for (i = 0; i < nr_entries; i++) {
228		address = entries[i];
229
230		if (address >= func_addr && address < func_addr + func_size)
231			return -EAGAIN;
232	}
233
234	return 0;
235}
236
237/*
238 * Determine whether it's safe to transition the task to the target patch state
239 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
240 */
241static int klp_check_stack(struct task_struct *task, const char **oldname)
242{
243	static unsigned long entries[MAX_STACK_ENTRIES];
 
244	struct klp_object *obj;
245	struct klp_func *func;
246	int ret, nr_entries;
247
248	ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
249	if (ret < 0)
250		return -EINVAL;
251	nr_entries = ret;
 
 
 
 
 
 
 
 
252
253	klp_for_each_object(klp_transition_patch, obj) {
254		if (!obj->patched)
255			continue;
256		klp_for_each_func(obj, func) {
257			ret = klp_check_stack_func(func, entries, nr_entries);
258			if (ret) {
259				*oldname = func->old_name;
260				return -EADDRINUSE;
 
 
 
261			}
262		}
263	}
264
265	return 0;
266}
267
268static int klp_check_and_switch_task(struct task_struct *task, void *arg)
269{
270	int ret;
271
272	if (task_curr(task) && task != current)
273		return -EBUSY;
274
275	ret = klp_check_stack(task, arg);
276	if (ret)
277		return ret;
278
279	clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
280	task->patch_state = klp_target_state;
281	return 0;
282}
283
284/*
285 * Try to safely switch a task to the target patch state.  If it's currently
286 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
287 * if the stack is unreliable, return false.
288 */
289static bool klp_try_switch_task(struct task_struct *task)
290{
291	const char *old_name;
 
292	int ret;
 
 
 
 
293
294	/* check if this task has already switched over */
295	if (task->patch_state == klp_target_state)
296		return true;
297
298	/*
299	 * For arches which don't have reliable stack traces, we have to rely
300	 * on other methods (e.g., switching tasks at kernel exit).
301	 */
302	if (!klp_have_reliable_stack())
303		return false;
304
305	/*
306	 * Now try to check the stack for any to-be-patched or to-be-unpatched
307	 * functions.  If all goes well, switch the task to the target patch
308	 * state.
309	 */
310	ret = task_call_func(task, klp_check_and_switch_task, &old_name);
311	switch (ret) {
312	case 0:		/* success */
313		break;
314
315	case -EBUSY:	/* klp_check_and_switch_task() */
316		pr_debug("%s: %s:%d is running\n",
317			 __func__, task->comm, task->pid);
318		break;
319	case -EINVAL:	/* klp_check_and_switch_task() */
320		pr_debug("%s: %s:%d has an unreliable stack\n",
321			 __func__, task->comm, task->pid);
322		break;
323	case -EADDRINUSE: /* klp_check_and_switch_task() */
324		pr_debug("%s: %s:%d is sleeping on function %s\n",
325			 __func__, task->comm, task->pid, old_name);
326		break;
327
328	default:
329		pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n",
330			 __func__, ret, task->comm, task->pid);
331		break;
332	}
333
334	return !ret;
335}
 
336
337/*
338 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
339 * Kthreads with TIF_PATCH_PENDING set are woken up.
340 */
341static void klp_send_signals(void)
342{
343	struct task_struct *g, *task;
344
345	if (klp_signals_cnt == SIGNALS_TIMEOUT)
346		pr_notice("signaling remaining tasks\n");
347
348	read_lock(&tasklist_lock);
349	for_each_process_thread(g, task) {
350		if (!klp_patch_pending(task))
351			continue;
 
 
 
 
 
 
 
 
352
353		/*
354		 * There is a small race here. We could see TIF_PATCH_PENDING
355		 * set and decide to wake up a kthread or send a fake signal.
356		 * Meanwhile the task could migrate itself and the action
357		 * would be meaningless. It is not serious though.
358		 */
359		if (task->flags & PF_KTHREAD) {
360			/*
361			 * Wake up a kthread which sleeps interruptedly and
362			 * still has not been migrated.
363			 */
364			wake_up_state(task, TASK_INTERRUPTIBLE);
365		} else {
366			/*
367			 * Send fake signal to all non-kthread tasks which are
368			 * still not migrated.
369			 */
370			set_notify_signal(task);
371		}
372	}
373	read_unlock(&tasklist_lock);
374}
375
376/*
377 * Try to switch all remaining tasks to the target patch state by walking the
378 * stacks of sleeping tasks and looking for any to-be-patched or
379 * to-be-unpatched functions.  If such functions are found, the task can't be
380 * switched yet.
381 *
382 * If any tasks are still stuck in the initial patch state, schedule a retry.
383 */
384void klp_try_complete_transition(void)
385{
386	unsigned int cpu;
387	struct task_struct *g, *task;
388	struct klp_patch *patch;
389	bool complete = true;
390
391	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
392
393	/*
394	 * Try to switch the tasks to the target patch state by walking their
395	 * stacks and looking for any to-be-patched or to-be-unpatched
396	 * functions.  If such functions are found on a stack, or if the stack
397	 * is deemed unreliable, the task can't be switched yet.
398	 *
399	 * Usually this will transition most (or all) of the tasks on a system
400	 * unless the patch includes changes to a very common function.
401	 */
402	read_lock(&tasklist_lock);
403	for_each_process_thread(g, task)
404		if (!klp_try_switch_task(task))
405			complete = false;
406	read_unlock(&tasklist_lock);
407
408	/*
409	 * Ditto for the idle "swapper" tasks.
410	 */
411	cpus_read_lock();
412	for_each_possible_cpu(cpu) {
413		task = idle_task(cpu);
414		if (cpu_online(cpu)) {
415			if (!klp_try_switch_task(task)) {
416				complete = false;
417				/* Make idle task go through the main loop. */
418				wake_up_if_idle(cpu);
419			}
420		} else if (task->patch_state != klp_target_state) {
421			/* offline idle tasks can be switched immediately */
422			clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
423			task->patch_state = klp_target_state;
424		}
425	}
426	cpus_read_unlock();
427
428	if (!complete) {
429		if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
430			klp_send_signals();
431		klp_signals_cnt++;
432
433		/*
434		 * Some tasks weren't able to be switched over.  Try again
435		 * later and/or wait for other methods like kernel exit
436		 * switching.
437		 */
438		schedule_delayed_work(&klp_transition_work,
439				      round_jiffies_relative(HZ));
440		return;
441	}
442
443	/* we're done, now cleanup the data structures */
444	patch = klp_transition_patch;
445	klp_complete_transition();
446
447	/*
448	 * It would make more sense to free the unused patches in
449	 * klp_complete_transition() but it is called also
450	 * from klp_cancel_transition().
451	 */
452	if (!patch->enabled)
453		klp_free_patch_async(patch);
454	else if (patch->replace)
455		klp_free_replaced_patches_async(patch);
456}
457
458/*
459 * Start the transition to the specified target patch state so tasks can begin
460 * switching to it.
461 */
462void klp_start_transition(void)
463{
464	struct task_struct *g, *task;
465	unsigned int cpu;
466
467	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
468
469	pr_notice("'%s': starting %s transition\n",
470		  klp_transition_patch->mod->name,
471		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
472
473	/*
474	 * Mark all normal tasks as needing a patch state update.  They'll
475	 * switch either in klp_try_complete_transition() or as they exit the
476	 * kernel.
477	 */
478	read_lock(&tasklist_lock);
479	for_each_process_thread(g, task)
480		if (task->patch_state != klp_target_state)
481			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
482	read_unlock(&tasklist_lock);
483
484	/*
485	 * Mark all idle tasks as needing a patch state update.  They'll switch
486	 * either in klp_try_complete_transition() or at the idle loop switch
487	 * point.
488	 */
489	for_each_possible_cpu(cpu) {
490		task = idle_task(cpu);
491		if (task->patch_state != klp_target_state)
492			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
493	}
494
495	klp_signals_cnt = 0;
496}
497
498/*
499 * Initialize the global target patch state and all tasks to the initial patch
500 * state, and initialize all function transition states to true in preparation
501 * for patching or unpatching.
502 */
503void klp_init_transition(struct klp_patch *patch, int state)
504{
505	struct task_struct *g, *task;
506	unsigned int cpu;
507	struct klp_object *obj;
508	struct klp_func *func;
509	int initial_state = !state;
510
511	WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
512
513	klp_transition_patch = patch;
514
515	/*
516	 * Set the global target patch state which tasks will switch to.  This
517	 * has no effect until the TIF_PATCH_PENDING flags get set later.
518	 */
519	klp_target_state = state;
520
521	pr_debug("'%s': initializing %s transition\n", patch->mod->name,
522		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
523
524	/*
525	 * Initialize all tasks to the initial patch state to prepare them for
526	 * switching to the target state.
527	 */
528	read_lock(&tasklist_lock);
529	for_each_process_thread(g, task) {
530		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
531		task->patch_state = initial_state;
532	}
533	read_unlock(&tasklist_lock);
534
535	/*
536	 * Ditto for the idle "swapper" tasks.
537	 */
538	for_each_possible_cpu(cpu) {
539		task = idle_task(cpu);
540		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
541		task->patch_state = initial_state;
542	}
543
544	/*
545	 * Enforce the order of the task->patch_state initializations and the
546	 * func->transition updates to ensure that klp_ftrace_handler() doesn't
547	 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
548	 *
549	 * Also enforce the order of the klp_target_state write and future
550	 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
551	 * set a task->patch_state to KLP_UNDEFINED.
552	 */
553	smp_wmb();
554
555	/*
556	 * Set the func transition states so klp_ftrace_handler() will know to
557	 * switch to the transition logic.
558	 *
559	 * When patching, the funcs aren't yet in the func_stack and will be
560	 * made visible to the ftrace handler shortly by the calls to
561	 * klp_patch_object().
562	 *
563	 * When unpatching, the funcs are already in the func_stack and so are
564	 * already visible to the ftrace handler.
565	 */
566	klp_for_each_object(patch, obj)
567		klp_for_each_func(obj, func)
568			func->transition = true;
569}
570
571/*
572 * This function can be called in the middle of an existing transition to
573 * reverse the direction of the target patch state.  This can be done to
574 * effectively cancel an existing enable or disable operation if there are any
575 * tasks which are stuck in the initial patch state.
576 */
577void klp_reverse_transition(void)
578{
579	unsigned int cpu;
580	struct task_struct *g, *task;
581
582	pr_debug("'%s': reversing transition from %s\n",
583		 klp_transition_patch->mod->name,
584		 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
585						   "unpatching to patching");
586
587	klp_transition_patch->enabled = !klp_transition_patch->enabled;
588
589	klp_target_state = !klp_target_state;
590
591	/*
592	 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
593	 * klp_update_patch_state() running in parallel with
594	 * klp_start_transition().
595	 */
596	read_lock(&tasklist_lock);
597	for_each_process_thread(g, task)
598		clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
599	read_unlock(&tasklist_lock);
600
601	for_each_possible_cpu(cpu)
602		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
603
604	/* Let any remaining calls to klp_update_patch_state() complete */
605	klp_synchronize_transition();
606
607	klp_start_transition();
608}
609
610/* Called from copy_process() during fork */
611void klp_copy_process(struct task_struct *child)
612{
 
613
614	/*
615	 * The parent process may have gone through a KLP transition since
616	 * the thread flag was copied in setup_thread_stack earlier. Bring
617	 * the task flag up to date with the parent here.
618	 *
619	 * The operation is serialized against all klp_*_transition()
620	 * operations by the tasklist_lock. The only exception is
621	 * klp_update_patch_state(current), but we cannot race with
622	 * that because we are current.
623	 */
624	if (test_tsk_thread_flag(current, TIF_PATCH_PENDING))
625		set_tsk_thread_flag(child, TIF_PATCH_PENDING);
626	else
627		clear_tsk_thread_flag(child, TIF_PATCH_PENDING);
628
629	child->patch_state = current->patch_state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
630}
631
632/*
633 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
634 * existing transition to finish.
635 *
636 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
637 * 'current'. This is not the case here and the consistency model could be
638 * broken. Administrator, who is the only one to execute the
639 * klp_force_transitions(), has to be aware of this.
640 */
641void klp_force_transition(void)
642{
643	struct klp_patch *patch;
644	struct task_struct *g, *task;
645	unsigned int cpu;
646
647	pr_warn("forcing remaining tasks to the patched state\n");
648
649	read_lock(&tasklist_lock);
650	for_each_process_thread(g, task)
651		klp_update_patch_state(task);
652	read_unlock(&tasklist_lock);
653
654	for_each_possible_cpu(cpu)
655		klp_update_patch_state(idle_task(cpu));
656
657	/* Set forced flag for patches being removed. */
658	if (klp_target_state == KLP_UNPATCHED)
659		klp_transition_patch->forced = true;
660	else if (klp_transition_patch->replace) {
661		klp_for_each_patch(patch) {
662			if (patch != klp_transition_patch)
663				patch->forced = true;
664		}
665	}
666}
v4.17
 
  1/*
  2 * transition.c - Kernel Live Patching transition functions
  3 *
  4 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version 2
  9 * of the License, or (at your option) any later version.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
 18 */
 19
 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 21
 22#include <linux/cpu.h>
 23#include <linux/stacktrace.h>
 24#include "core.h"
 25#include "patch.h"
 26#include "transition.h"
 27#include "../sched/sched.h"
 28
 29#define MAX_STACK_ENTRIES  100
 30#define STACK_ERR_BUF_SIZE 128
 31
 
 
 32struct klp_patch *klp_transition_patch;
 33
 34static int klp_target_state = KLP_UNDEFINED;
 35
 36static bool klp_forced = false;
 37
 38/*
 39 * This work can be performed periodically to finish patching or unpatching any
 40 * "straggler" tasks which failed to transition in the first attempt.
 41 */
 42static void klp_transition_work_fn(struct work_struct *work)
 43{
 44	mutex_lock(&klp_mutex);
 45
 46	if (klp_transition_patch)
 47		klp_try_complete_transition();
 48
 49	mutex_unlock(&klp_mutex);
 50}
 51static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
 52
 53/*
 54 * This function is just a stub to implement a hard force
 55 * of synchronize_sched(). This requires synchronizing
 56 * tasks even in userspace and idle.
 57 */
 58static void klp_sync(struct work_struct *work)
 59{
 60}
 61
 62/*
 63 * We allow to patch also functions where RCU is not watching,
 64 * e.g. before user_exit(). We can not rely on the RCU infrastructure
 65 * to do the synchronization. Instead hard force the sched synchronization.
 66 *
 67 * This approach allows to use RCU functions for manipulating func_stack
 68 * safely.
 69 */
 70static void klp_synchronize_transition(void)
 71{
 72	schedule_on_each_cpu(klp_sync);
 73}
 74
 75/*
 76 * The transition to the target patch state is complete.  Clean up the data
 77 * structures.
 78 */
 79static void klp_complete_transition(void)
 80{
 81	struct klp_object *obj;
 82	struct klp_func *func;
 83	struct task_struct *g, *task;
 84	unsigned int cpu;
 85
 86	pr_debug("'%s': completing %s transition\n",
 87		 klp_transition_patch->mod->name,
 88		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
 89
 
 
 
 
 
 90	if (klp_target_state == KLP_UNPATCHED) {
 91		/*
 92		 * All tasks have transitioned to KLP_UNPATCHED so we can now
 93		 * remove the new functions from the func_stack.
 94		 */
 95		klp_unpatch_objects(klp_transition_patch);
 96
 97		/*
 98		 * Make sure klp_ftrace_handler() can no longer see functions
 99		 * from this patch on the ops->func_stack.  Otherwise, after
100		 * func->transition gets cleared, the handler may choose a
101		 * removed function.
102		 */
103		klp_synchronize_transition();
104	}
105
106	klp_for_each_object(klp_transition_patch, obj)
107		klp_for_each_func(obj, func)
108			func->transition = false;
109
110	/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
111	if (klp_target_state == KLP_PATCHED)
112		klp_synchronize_transition();
113
114	read_lock(&tasklist_lock);
115	for_each_process_thread(g, task) {
116		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
117		task->patch_state = KLP_UNDEFINED;
118	}
119	read_unlock(&tasklist_lock);
120
121	for_each_possible_cpu(cpu) {
122		task = idle_task(cpu);
123		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
124		task->patch_state = KLP_UNDEFINED;
125	}
126
127	klp_for_each_object(klp_transition_patch, obj) {
128		if (!klp_is_object_loaded(obj))
129			continue;
130		if (klp_target_state == KLP_PATCHED)
131			klp_post_patch_callback(obj);
132		else if (klp_target_state == KLP_UNPATCHED)
133			klp_post_unpatch_callback(obj);
134	}
135
136	pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
137		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
138
139	/*
140	 * klp_forced set implies unbounded increase of module's ref count if
141	 * the module is disabled/enabled in a loop.
142	 */
143	if (!klp_forced && klp_target_state == KLP_UNPATCHED)
144		module_put(klp_transition_patch->mod);
145
146	klp_target_state = KLP_UNDEFINED;
147	klp_transition_patch = NULL;
148}
149
150/*
151 * This is called in the error path, to cancel a transition before it has
152 * started, i.e. klp_init_transition() has been called but
153 * klp_start_transition() hasn't.  If the transition *has* been started,
154 * klp_reverse_transition() should be used instead.
155 */
156void klp_cancel_transition(void)
157{
158	if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
159		return;
160
161	pr_debug("'%s': canceling patching transition, going to unpatch\n",
162		 klp_transition_patch->mod->name);
163
164	klp_target_state = KLP_UNPATCHED;
165	klp_complete_transition();
166}
167
168/*
169 * Switch the patched state of the task to the set of functions in the target
170 * patch state.
171 *
172 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
173 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
174 */
175void klp_update_patch_state(struct task_struct *task)
176{
177	/*
178	 * A variant of synchronize_sched() is used to allow patching functions
179	 * where RCU is not watching, see klp_synchronize_transition().
180	 */
181	preempt_disable_notrace();
182
183	/*
184	 * This test_and_clear_tsk_thread_flag() call also serves as a read
185	 * barrier (smp_rmb) for two cases:
186	 *
187	 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
188	 *    klp_target_state read.  The corresponding write barrier is in
189	 *    klp_init_transition().
190	 *
191	 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
192	 *    of func->transition, if klp_ftrace_handler() is called later on
193	 *    the same CPU.  See __klp_disable_patch().
194	 */
195	if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
196		task->patch_state = READ_ONCE(klp_target_state);
197
198	preempt_enable_notrace();
199}
200
201/*
202 * Determine whether the given stack trace includes any references to a
203 * to-be-patched or to-be-unpatched function.
204 */
205static int klp_check_stack_func(struct klp_func *func,
206				struct stack_trace *trace)
207{
208	unsigned long func_addr, func_size, address;
209	struct klp_ops *ops;
210	int i;
211
212	for (i = 0; i < trace->nr_entries; i++) {
213		address = trace->entries[i];
 
 
 
 
 
 
 
 
 
 
 
214
215		if (klp_target_state == KLP_UNPATCHED) {
216			 /*
217			  * Check for the to-be-unpatched function
218			  * (the func itself).
219			  */
220			func_addr = (unsigned long)func->new_func;
221			func_size = func->new_size;
222		} else {
223			/*
224			 * Check for the to-be-patched function
225			 * (the previous func).
226			 */
227			ops = klp_find_ops(func->old_addr);
228
229			if (list_is_singular(&ops->func_stack)) {
230				/* original function */
231				func_addr = func->old_addr;
232				func_size = func->old_size;
233			} else {
234				/* previously patched function */
235				struct klp_func *prev;
236
237				prev = list_next_entry(func, stack_node);
238				func_addr = (unsigned long)prev->new_func;
239				func_size = prev->new_size;
240			}
241		}
 
 
 
 
242
243		if (address >= func_addr && address < func_addr + func_size)
244			return -EAGAIN;
245	}
246
247	return 0;
248}
249
250/*
251 * Determine whether it's safe to transition the task to the target patch state
252 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
253 */
254static int klp_check_stack(struct task_struct *task, char *err_buf)
255{
256	static unsigned long entries[MAX_STACK_ENTRIES];
257	struct stack_trace trace;
258	struct klp_object *obj;
259	struct klp_func *func;
260	int ret;
261
262	trace.skip = 0;
263	trace.nr_entries = 0;
264	trace.max_entries = MAX_STACK_ENTRIES;
265	trace.entries = entries;
266	ret = save_stack_trace_tsk_reliable(task, &trace);
267	WARN_ON_ONCE(ret == -ENOSYS);
268	if (ret) {
269		snprintf(err_buf, STACK_ERR_BUF_SIZE,
270			 "%s: %s:%d has an unreliable stack\n",
271			 __func__, task->comm, task->pid);
272		return ret;
273	}
274
275	klp_for_each_object(klp_transition_patch, obj) {
276		if (!obj->patched)
277			continue;
278		klp_for_each_func(obj, func) {
279			ret = klp_check_stack_func(func, &trace);
280			if (ret) {
281				snprintf(err_buf, STACK_ERR_BUF_SIZE,
282					 "%s: %s:%d is sleeping on function %s\n",
283					 __func__, task->comm, task->pid,
284					 func->old_name);
285				return ret;
286			}
287		}
288	}
289
290	return 0;
291}
292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293/*
294 * Try to safely switch a task to the target patch state.  If it's currently
295 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
296 * if the stack is unreliable, return false.
297 */
298static bool klp_try_switch_task(struct task_struct *task)
299{
300	struct rq *rq;
301	struct rq_flags flags;
302	int ret;
303	bool success = false;
304	char err_buf[STACK_ERR_BUF_SIZE];
305
306	err_buf[0] = '\0';
307
308	/* check if this task has already switched over */
309	if (task->patch_state == klp_target_state)
310		return true;
311
312	/*
313	 * For arches which don't have reliable stack traces, we have to rely
314	 * on other methods (e.g., switching tasks at kernel exit).
315	 */
316	if (!klp_have_reliable_stack())
317		return false;
318
319	/*
320	 * Now try to check the stack for any to-be-patched or to-be-unpatched
321	 * functions.  If all goes well, switch the task to the target patch
322	 * state.
323	 */
324	rq = task_rq_lock(task, &flags);
 
 
 
325
326	if (task_running(rq, task) && task != current) {
327		snprintf(err_buf, STACK_ERR_BUF_SIZE,
328			 "%s: %s:%d is running\n", __func__, task->comm,
329			 task->pid);
330		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
331	}
332
333	ret = klp_check_stack(task, err_buf);
334	if (ret)
335		goto done;
336
337	success = true;
 
 
 
 
 
 
338
339	clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
340	task->patch_state = klp_target_state;
341
342done:
343	task_rq_unlock(rq, task, &flags);
344
345	/*
346	 * Due to console deadlock issues, pr_debug() can't be used while
347	 * holding the task rq lock.  Instead we have to use a temporary buffer
348	 * and print the debug message after releasing the lock.
349	 */
350	if (err_buf[0] != '\0')
351		pr_debug("%s", err_buf);
352
353	return success;
354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355}
356
357/*
358 * Try to switch all remaining tasks to the target patch state by walking the
359 * stacks of sleeping tasks and looking for any to-be-patched or
360 * to-be-unpatched functions.  If such functions are found, the task can't be
361 * switched yet.
362 *
363 * If any tasks are still stuck in the initial patch state, schedule a retry.
364 */
365void klp_try_complete_transition(void)
366{
367	unsigned int cpu;
368	struct task_struct *g, *task;
 
369	bool complete = true;
370
371	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
372
373	/*
374	 * Try to switch the tasks to the target patch state by walking their
375	 * stacks and looking for any to-be-patched or to-be-unpatched
376	 * functions.  If such functions are found on a stack, or if the stack
377	 * is deemed unreliable, the task can't be switched yet.
378	 *
379	 * Usually this will transition most (or all) of the tasks on a system
380	 * unless the patch includes changes to a very common function.
381	 */
382	read_lock(&tasklist_lock);
383	for_each_process_thread(g, task)
384		if (!klp_try_switch_task(task))
385			complete = false;
386	read_unlock(&tasklist_lock);
387
388	/*
389	 * Ditto for the idle "swapper" tasks.
390	 */
391	get_online_cpus();
392	for_each_possible_cpu(cpu) {
393		task = idle_task(cpu);
394		if (cpu_online(cpu)) {
395			if (!klp_try_switch_task(task))
396				complete = false;
 
 
 
397		} else if (task->patch_state != klp_target_state) {
398			/* offline idle tasks can be switched immediately */
399			clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
400			task->patch_state = klp_target_state;
401		}
402	}
403	put_online_cpus();
404
405	if (!complete) {
 
 
 
 
406		/*
407		 * Some tasks weren't able to be switched over.  Try again
408		 * later and/or wait for other methods like kernel exit
409		 * switching.
410		 */
411		schedule_delayed_work(&klp_transition_work,
412				      round_jiffies_relative(HZ));
413		return;
414	}
415
416	/* we're done, now cleanup the data structures */
 
417	klp_complete_transition();
 
 
 
 
 
 
 
 
 
 
418}
419
420/*
421 * Start the transition to the specified target patch state so tasks can begin
422 * switching to it.
423 */
424void klp_start_transition(void)
425{
426	struct task_struct *g, *task;
427	unsigned int cpu;
428
429	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
430
431	pr_notice("'%s': starting %s transition\n",
432		  klp_transition_patch->mod->name,
433		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
434
435	/*
436	 * Mark all normal tasks as needing a patch state update.  They'll
437	 * switch either in klp_try_complete_transition() or as they exit the
438	 * kernel.
439	 */
440	read_lock(&tasklist_lock);
441	for_each_process_thread(g, task)
442		if (task->patch_state != klp_target_state)
443			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
444	read_unlock(&tasklist_lock);
445
446	/*
447	 * Mark all idle tasks as needing a patch state update.  They'll switch
448	 * either in klp_try_complete_transition() or at the idle loop switch
449	 * point.
450	 */
451	for_each_possible_cpu(cpu) {
452		task = idle_task(cpu);
453		if (task->patch_state != klp_target_state)
454			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
455	}
 
 
456}
457
458/*
459 * Initialize the global target patch state and all tasks to the initial patch
460 * state, and initialize all function transition states to true in preparation
461 * for patching or unpatching.
462 */
463void klp_init_transition(struct klp_patch *patch, int state)
464{
465	struct task_struct *g, *task;
466	unsigned int cpu;
467	struct klp_object *obj;
468	struct klp_func *func;
469	int initial_state = !state;
470
471	WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
472
473	klp_transition_patch = patch;
474
475	/*
476	 * Set the global target patch state which tasks will switch to.  This
477	 * has no effect until the TIF_PATCH_PENDING flags get set later.
478	 */
479	klp_target_state = state;
480
481	pr_debug("'%s': initializing %s transition\n", patch->mod->name,
482		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
483
484	/*
485	 * Initialize all tasks to the initial patch state to prepare them for
486	 * switching to the target state.
487	 */
488	read_lock(&tasklist_lock);
489	for_each_process_thread(g, task) {
490		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
491		task->patch_state = initial_state;
492	}
493	read_unlock(&tasklist_lock);
494
495	/*
496	 * Ditto for the idle "swapper" tasks.
497	 */
498	for_each_possible_cpu(cpu) {
499		task = idle_task(cpu);
500		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
501		task->patch_state = initial_state;
502	}
503
504	/*
505	 * Enforce the order of the task->patch_state initializations and the
506	 * func->transition updates to ensure that klp_ftrace_handler() doesn't
507	 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
508	 *
509	 * Also enforce the order of the klp_target_state write and future
510	 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
511	 * set a task->patch_state to KLP_UNDEFINED.
512	 */
513	smp_wmb();
514
515	/*
516	 * Set the func transition states so klp_ftrace_handler() will know to
517	 * switch to the transition logic.
518	 *
519	 * When patching, the funcs aren't yet in the func_stack and will be
520	 * made visible to the ftrace handler shortly by the calls to
521	 * klp_patch_object().
522	 *
523	 * When unpatching, the funcs are already in the func_stack and so are
524	 * already visible to the ftrace handler.
525	 */
526	klp_for_each_object(patch, obj)
527		klp_for_each_func(obj, func)
528			func->transition = true;
529}
530
531/*
532 * This function can be called in the middle of an existing transition to
533 * reverse the direction of the target patch state.  This can be done to
534 * effectively cancel an existing enable or disable operation if there are any
535 * tasks which are stuck in the initial patch state.
536 */
537void klp_reverse_transition(void)
538{
539	unsigned int cpu;
540	struct task_struct *g, *task;
541
542	pr_debug("'%s': reversing transition from %s\n",
543		 klp_transition_patch->mod->name,
544		 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
545						   "unpatching to patching");
546
547	klp_transition_patch->enabled = !klp_transition_patch->enabled;
548
549	klp_target_state = !klp_target_state;
550
551	/*
552	 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
553	 * klp_update_patch_state() running in parallel with
554	 * klp_start_transition().
555	 */
556	read_lock(&tasklist_lock);
557	for_each_process_thread(g, task)
558		clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
559	read_unlock(&tasklist_lock);
560
561	for_each_possible_cpu(cpu)
562		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
563
564	/* Let any remaining calls to klp_update_patch_state() complete */
565	klp_synchronize_transition();
566
567	klp_start_transition();
568}
569
570/* Called from copy_process() during fork */
571void klp_copy_process(struct task_struct *child)
572{
573	child->patch_state = current->patch_state;
574
575	/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
576}
 
 
 
 
 
 
 
 
 
 
 
 
577
578/*
579 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
580 * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
581 * action currently.
582 */
583void klp_send_signals(void)
584{
585	struct task_struct *g, *task;
586
587	pr_notice("signaling remaining tasks\n");
588
589	read_lock(&tasklist_lock);
590	for_each_process_thread(g, task) {
591		if (!klp_patch_pending(task))
592			continue;
593
594		/*
595		 * There is a small race here. We could see TIF_PATCH_PENDING
596		 * set and decide to wake up a kthread or send a fake signal.
597		 * Meanwhile the task could migrate itself and the action
598		 * would be meaningless. It is not serious though.
599		 */
600		if (task->flags & PF_KTHREAD) {
601			/*
602			 * Wake up a kthread which sleeps interruptedly and
603			 * still has not been migrated.
604			 */
605			wake_up_state(task, TASK_INTERRUPTIBLE);
606		} else {
607			/*
608			 * Send fake signal to all non-kthread tasks which are
609			 * still not migrated.
610			 */
611			spin_lock_irq(&task->sighand->siglock);
612			signal_wake_up(task, 0);
613			spin_unlock_irq(&task->sighand->siglock);
614		}
615	}
616	read_unlock(&tasklist_lock);
617}
618
619/*
620 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
621 * existing transition to finish.
622 *
623 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
624 * 'current'. This is not the case here and the consistency model could be
625 * broken. Administrator, who is the only one to execute the
626 * klp_force_transitions(), has to be aware of this.
627 */
628void klp_force_transition(void)
629{
 
630	struct task_struct *g, *task;
631	unsigned int cpu;
632
633	pr_warn("forcing remaining tasks to the patched state\n");
634
635	read_lock(&tasklist_lock);
636	for_each_process_thread(g, task)
637		klp_update_patch_state(task);
638	read_unlock(&tasklist_lock);
639
640	for_each_possible_cpu(cpu)
641		klp_update_patch_state(idle_task(cpu));
642
643	klp_forced = true;
 
 
 
 
 
 
 
 
644}