Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Kernel thread helper functions.
   3 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
   4 *   Copyright (C) 2009 Red Hat, Inc.
   5 *
   6 * Creation is done via kthreadd, so that we get a clean environment
   7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
   8 * etc.).
   9 */
  10#include <uapi/linux/sched/types.h>
  11#include <linux/mm.h>
  12#include <linux/mmu_context.h>
  13#include <linux/sched.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/task.h>
  16#include <linux/kthread.h>
  17#include <linux/completion.h>
  18#include <linux/err.h>
  19#include <linux/cgroup.h>
  20#include <linux/cpuset.h>
  21#include <linux/unistd.h>
  22#include <linux/file.h>
  23#include <linux/export.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <linux/freezer.h>
  27#include <linux/ptrace.h>
  28#include <linux/uaccess.h>
  29#include <linux/numa.h>
  30#include <linux/sched/isolation.h>
  31#include <trace/events/sched.h>
  32
  33
  34static DEFINE_SPINLOCK(kthread_create_lock);
  35static LIST_HEAD(kthread_create_list);
  36struct task_struct *kthreadd_task;
  37
  38struct kthread_create_info
  39{
  40	/* Information passed to kthread() from kthreadd. */
  41	int (*threadfn)(void *data);
  42	void *data;
  43	int node;
  44
  45	/* Result passed back to kthread_create() from kthreadd. */
  46	struct task_struct *result;
  47	struct completion *done;
  48
  49	struct list_head list;
  50};
  51
  52struct kthread {
  53	unsigned long flags;
  54	unsigned int cpu;
  55	int (*threadfn)(void *);
  56	void *data;
  57	mm_segment_t oldfs;
  58	struct completion parked;
  59	struct completion exited;
  60#ifdef CONFIG_BLK_CGROUP
  61	struct cgroup_subsys_state *blkcg_css;
  62#endif
  63};
  64
  65enum KTHREAD_BITS {
  66	KTHREAD_IS_PER_CPU = 0,
  67	KTHREAD_SHOULD_STOP,
  68	KTHREAD_SHOULD_PARK,
  69};
  70
  71static inline struct kthread *to_kthread(struct task_struct *k)
  72{
  73	WARN_ON(!(k->flags & PF_KTHREAD));
  74	return (__force void *)k->set_child_tid;
  75}
  76
  77/*
  78 * Variant of to_kthread() that doesn't assume @p is a kthread.
  79 *
  80 * Per construction; when:
  81 *
  82 *   (p->flags & PF_KTHREAD) && p->set_child_tid
  83 *
  84 * the task is both a kthread and struct kthread is persistent. However
  85 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
  86 * begin_new_exec()).
  87 */
  88static inline struct kthread *__to_kthread(struct task_struct *p)
  89{
  90	void *kthread = (__force void *)p->set_child_tid;
  91	if (kthread && !(p->flags & PF_KTHREAD))
  92		kthread = NULL;
  93	return kthread;
  94}
  95
  96void set_kthread_struct(struct task_struct *p)
  97{
  98	struct kthread *kthread;
  99
 100	if (__to_kthread(p))
 101		return;
 102
 103	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
 104	/*
 105	 * We abuse ->set_child_tid to avoid the new member and because it
 106	 * can't be wrongly copied by copy_process(). We also rely on fact
 107	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
 108	 */
 109	p->set_child_tid = (__force void __user *)kthread;
 
 
 
 
 
 
 110}
 111
 112void free_kthread_struct(struct task_struct *k)
 113{
 114	struct kthread *kthread;
 115
 116	/*
 117	 * Can be NULL if this kthread was created by kernel_thread()
 118	 * or if kmalloc() in kthread() failed.
 119	 */
 120	kthread = to_kthread(k);
 121#ifdef CONFIG_BLK_CGROUP
 122	WARN_ON_ONCE(kthread && kthread->blkcg_css);
 123#endif
 124	kfree(kthread);
 125}
 126
 127/**
 128 * kthread_should_stop - should this kthread return now?
 129 *
 130 * When someone calls kthread_stop() on your kthread, it will be woken
 131 * and this will return true.  You should then return, and your return
 132 * value will be passed through to kthread_stop().
 133 */
 134bool kthread_should_stop(void)
 135{
 136	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
 137}
 138EXPORT_SYMBOL(kthread_should_stop);
 139
 140bool __kthread_should_park(struct task_struct *k)
 141{
 142	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
 143}
 144EXPORT_SYMBOL_GPL(__kthread_should_park);
 145
 146/**
 147 * kthread_should_park - should this kthread park now?
 148 *
 149 * When someone calls kthread_park() on your kthread, it will be woken
 150 * and this will return true.  You should then do the necessary
 151 * cleanup and call kthread_parkme()
 152 *
 153 * Similar to kthread_should_stop(), but this keeps the thread alive
 154 * and in a park position. kthread_unpark() "restarts" the thread and
 155 * calls the thread function again.
 156 */
 157bool kthread_should_park(void)
 158{
 159	return __kthread_should_park(current);
 160}
 161EXPORT_SYMBOL_GPL(kthread_should_park);
 162
 163/**
 164 * kthread_freezable_should_stop - should this freezable kthread return now?
 165 * @was_frozen: optional out parameter, indicates whether %current was frozen
 166 *
 167 * kthread_should_stop() for freezable kthreads, which will enter
 168 * refrigerator if necessary.  This function is safe from kthread_stop() /
 169 * freezer deadlock and freezable kthreads should use this function instead
 170 * of calling try_to_freeze() directly.
 171 */
 172bool kthread_freezable_should_stop(bool *was_frozen)
 173{
 174	bool frozen = false;
 175
 176	might_sleep();
 177
 178	if (unlikely(freezing(current)))
 179		frozen = __refrigerator(true);
 180
 181	if (was_frozen)
 182		*was_frozen = frozen;
 183
 184	return kthread_should_stop();
 185}
 186EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
 187
 188/**
 189 * kthread_func - return the function specified on kthread creation
 190 * @task: kthread task in question
 191 *
 192 * Returns NULL if the task is not a kthread.
 193 */
 194void *kthread_func(struct task_struct *task)
 195{
 196	struct kthread *kthread = __to_kthread(task);
 197	if (kthread)
 198		return kthread->threadfn;
 199	return NULL;
 200}
 201EXPORT_SYMBOL_GPL(kthread_func);
 202
 203/**
 204 * kthread_data - return data value specified on kthread creation
 205 * @task: kthread task in question
 206 *
 207 * Return the data value specified when kthread @task was created.
 208 * The caller is responsible for ensuring the validity of @task when
 209 * calling this function.
 210 */
 211void *kthread_data(struct task_struct *task)
 212{
 213	return to_kthread(task)->data;
 214}
 215EXPORT_SYMBOL_GPL(kthread_data);
 216
 217/**
 218 * kthread_probe_data - speculative version of kthread_data()
 219 * @task: possible kthread task in question
 220 *
 221 * @task could be a kthread task.  Return the data value specified when it
 222 * was created if accessible.  If @task isn't a kthread task or its data is
 223 * inaccessible for any reason, %NULL is returned.  This function requires
 224 * that @task itself is safe to dereference.
 225 */
 226void *kthread_probe_data(struct task_struct *task)
 227{
 228	struct kthread *kthread = __to_kthread(task);
 229	void *data = NULL;
 230
 231	if (kthread)
 232		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
 233	return data;
 234}
 235
 236static void __kthread_parkme(struct kthread *self)
 237{
 238	for (;;) {
 239		/*
 240		 * TASK_PARKED is a special state; we must serialize against
 241		 * possible pending wakeups to avoid store-store collisions on
 242		 * task->state.
 243		 *
 244		 * Such a collision might possibly result in the task state
 245		 * changin from TASK_PARKED and us failing the
 246		 * wait_task_inactive() in kthread_park().
 247		 */
 248		set_special_state(TASK_PARKED);
 249		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
 250			break;
 251
 252		/*
 253		 * Thread is going to call schedule(), do not preempt it,
 254		 * or the caller of kthread_park() may spend more time in
 255		 * wait_task_inactive().
 256		 */
 257		preempt_disable();
 258		complete(&self->parked);
 259		schedule_preempt_disabled();
 260		preempt_enable();
 261	}
 262	__set_current_state(TASK_RUNNING);
 263}
 264
 265void kthread_parkme(void)
 266{
 267	__kthread_parkme(to_kthread(current));
 268}
 269EXPORT_SYMBOL_GPL(kthread_parkme);
 270
 271static int kthread(void *_create)
 272{
 273	/* Copy data: it's on kthread's stack */
 274	struct kthread_create_info *create = _create;
 275	int (*threadfn)(void *data) = create->threadfn;
 276	void *data = create->data;
 277	struct completion *done;
 278	struct kthread *self;
 279	int ret;
 280
 281	set_kthread_struct(current);
 282	self = to_kthread(current);
 283
 284	/* If user was SIGKILLed, I release the structure. */
 285	done = xchg(&create->done, NULL);
 286	if (!done) {
 287		kfree(create);
 288		do_exit(-EINTR);
 289	}
 290
 291	if (!self) {
 292		create->result = ERR_PTR(-ENOMEM);
 293		complete(done);
 294		do_exit(-ENOMEM);
 295	}
 296
 297	self->threadfn = threadfn;
 298	self->data = data;
 299	init_completion(&self->exited);
 300	init_completion(&self->parked);
 301	current->vfork_done = &self->exited;
 302
 303	/* OK, tell user we're spawned, wait for stop or wakeup */
 304	__set_current_state(TASK_UNINTERRUPTIBLE);
 305	create->result = current;
 306	/*
 307	 * Thread is going to call schedule(), do not preempt it,
 308	 * or the creator may spend more time in wait_task_inactive().
 309	 */
 310	preempt_disable();
 311	complete(done);
 312	schedule_preempt_disabled();
 313	preempt_enable();
 314
 315	ret = -EINTR;
 316	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
 317		cgroup_kthread_ready();
 318		__kthread_parkme(self);
 319		ret = threadfn(data);
 320	}
 321	do_exit(ret);
 322}
 323
 324/* called from kernel_clone() to get node information for about to be created task */
 325int tsk_fork_get_node(struct task_struct *tsk)
 326{
 327#ifdef CONFIG_NUMA
 328	if (tsk == kthreadd_task)
 329		return tsk->pref_node_fork;
 330#endif
 331	return NUMA_NO_NODE;
 332}
 333
 334static void create_kthread(struct kthread_create_info *create)
 335{
 336	int pid;
 337
 338#ifdef CONFIG_NUMA
 339	current->pref_node_fork = create->node;
 340#endif
 341	/* We want our own signal handler (we take no signals by default). */
 342	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
 343	if (pid < 0) {
 344		/* If user was SIGKILLed, I release the structure. */
 345		struct completion *done = xchg(&create->done, NULL);
 346
 347		if (!done) {
 348			kfree(create);
 349			return;
 350		}
 351		create->result = ERR_PTR(pid);
 352		complete(done);
 353	}
 354}
 355
 356static __printf(4, 0)
 357struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 358						    void *data, int node,
 359						    const char namefmt[],
 360						    va_list args)
 361{
 362	DECLARE_COMPLETION_ONSTACK(done);
 363	struct task_struct *task;
 364	struct kthread_create_info *create = kmalloc(sizeof(*create),
 365						     GFP_KERNEL);
 366
 367	if (!create)
 368		return ERR_PTR(-ENOMEM);
 369	create->threadfn = threadfn;
 370	create->data = data;
 371	create->node = node;
 372	create->done = &done;
 373
 374	spin_lock(&kthread_create_lock);
 375	list_add_tail(&create->list, &kthread_create_list);
 376	spin_unlock(&kthread_create_lock);
 377
 378	wake_up_process(kthreadd_task);
 379	/*
 380	 * Wait for completion in killable state, for I might be chosen by
 381	 * the OOM killer while kthreadd is trying to allocate memory for
 382	 * new kernel thread.
 383	 */
 384	if (unlikely(wait_for_completion_killable(&done))) {
 385		/*
 386		 * If I was SIGKILLed before kthreadd (or new kernel thread)
 387		 * calls complete(), leave the cleanup of this structure to
 388		 * that thread.
 389		 */
 390		if (xchg(&create->done, NULL))
 391			return ERR_PTR(-EINTR);
 392		/*
 393		 * kthreadd (or new kernel thread) will call complete()
 394		 * shortly.
 395		 */
 396		wait_for_completion(&done);
 397	}
 398	task = create->result;
 399	if (!IS_ERR(task)) {
 400		static const struct sched_param param = { .sched_priority = 0 };
 401		char name[TASK_COMM_LEN];
 402
 403		/*
 404		 * task is already visible to other tasks, so updating
 405		 * COMM must be protected.
 406		 */
 407		vsnprintf(name, sizeof(name), namefmt, args);
 408		set_task_comm(task, name);
 409		/*
 410		 * root may have changed our (kthreadd's) priority or CPU mask.
 411		 * The kernel thread should not inherit these properties.
 412		 */
 413		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
 414		set_cpus_allowed_ptr(task,
 415				     housekeeping_cpumask(HK_FLAG_KTHREAD));
 416	}
 417	kfree(create);
 418	return task;
 419}
 420
 421/**
 422 * kthread_create_on_node - create a kthread.
 423 * @threadfn: the function to run until signal_pending(current).
 424 * @data: data ptr for @threadfn.
 425 * @node: task and thread structures for the thread are allocated on this node
 426 * @namefmt: printf-style name for the thread.
 427 *
 428 * Description: This helper function creates and names a kernel
 429 * thread.  The thread will be stopped: use wake_up_process() to start
 430 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 431 * is affine to all CPUs.
 432 *
 433 * If thread is going to be bound on a particular cpu, give its node
 434 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 435 * When woken, the thread will run @threadfn() with @data as its
 436 * argument. @threadfn() can either call do_exit() directly if it is a
 437 * standalone thread for which no one will call kthread_stop(), or
 438 * return when 'kthread_should_stop()' is true (which means
 439 * kthread_stop() has been called).  The return value should be zero
 440 * or a negative error number; it will be passed to kthread_stop().
 441 *
 442 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 443 */
 444struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 445					   void *data, int node,
 446					   const char namefmt[],
 447					   ...)
 448{
 449	struct task_struct *task;
 450	va_list args;
 451
 452	va_start(args, namefmt);
 453	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
 454	va_end(args);
 455
 456	return task;
 457}
 458EXPORT_SYMBOL(kthread_create_on_node);
 459
 460static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
 461{
 462	unsigned long flags;
 463
 464	if (!wait_task_inactive(p, state)) {
 465		WARN_ON(1);
 466		return;
 467	}
 468
 469	/* It's safe because the task is inactive. */
 470	raw_spin_lock_irqsave(&p->pi_lock, flags);
 471	do_set_cpus_allowed(p, mask);
 472	p->flags |= PF_NO_SETAFFINITY;
 473	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 474}
 475
 476static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
 477{
 478	__kthread_bind_mask(p, cpumask_of(cpu), state);
 479}
 480
 481void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
 482{
 483	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 484}
 485
 486/**
 487 * kthread_bind - bind a just-created kthread to a cpu.
 488 * @p: thread created by kthread_create().
 489 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 490 *
 491 * Description: This function is equivalent to set_cpus_allowed(),
 492 * except that @cpu doesn't need to be online, and the thread must be
 493 * stopped (i.e., just returned from kthread_create()).
 494 */
 495void kthread_bind(struct task_struct *p, unsigned int cpu)
 496{
 497	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 498}
 499EXPORT_SYMBOL(kthread_bind);
 500
 501/**
 502 * kthread_create_on_cpu - Create a cpu bound kthread
 503 * @threadfn: the function to run until signal_pending(current).
 504 * @data: data ptr for @threadfn.
 505 * @cpu: The cpu on which the thread should be bound,
 506 * @namefmt: printf-style name for the thread. Format is restricted
 507 *	     to "name.*%u". Code fills in cpu number.
 508 *
 509 * Description: This helper function creates and names a kernel thread
 510 */
 511struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 512					  void *data, unsigned int cpu,
 513					  const char *namefmt)
 514{
 515	struct task_struct *p;
 516
 517	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
 518				   cpu);
 519	if (IS_ERR(p))
 520		return p;
 521	kthread_bind(p, cpu);
 522	/* CPU hotplug need to bind once again when unparking the thread. */
 
 523	to_kthread(p)->cpu = cpu;
 524	return p;
 525}
 526
 527void kthread_set_per_cpu(struct task_struct *k, int cpu)
 528{
 529	struct kthread *kthread = to_kthread(k);
 530	if (!kthread)
 531		return;
 532
 533	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
 534
 535	if (cpu < 0) {
 536		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 537		return;
 538	}
 539
 540	kthread->cpu = cpu;
 541	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 542}
 543
 544bool kthread_is_per_cpu(struct task_struct *p)
 545{
 546	struct kthread *kthread = __to_kthread(p);
 547	if (!kthread)
 548		return false;
 549
 550	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 551}
 552
 553/**
 554 * kthread_unpark - unpark a thread created by kthread_create().
 555 * @k:		thread created by kthread_create().
 556 *
 557 * Sets kthread_should_park() for @k to return false, wakes it, and
 558 * waits for it to return. If the thread is marked percpu then its
 559 * bound to the cpu again.
 560 */
 561void kthread_unpark(struct task_struct *k)
 562{
 563	struct kthread *kthread = to_kthread(k);
 564
 565	/*
 566	 * Newly created kthread was parked when the CPU was offline.
 567	 * The binding was lost and we need to set it again.
 568	 */
 569	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
 570		__kthread_bind(k, kthread->cpu, TASK_PARKED);
 571
 572	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 573	/*
 574	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
 575	 */
 576	wake_up_state(k, TASK_PARKED);
 577}
 578EXPORT_SYMBOL_GPL(kthread_unpark);
 579
 580/**
 581 * kthread_park - park a thread created by kthread_create().
 582 * @k: thread created by kthread_create().
 583 *
 584 * Sets kthread_should_park() for @k to return true, wakes it, and
 585 * waits for it to return. This can also be called after kthread_create()
 586 * instead of calling wake_up_process(): the thread will park without
 587 * calling threadfn().
 588 *
 589 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 590 * If called by the kthread itself just the park bit is set.
 591 */
 592int kthread_park(struct task_struct *k)
 593{
 594	struct kthread *kthread = to_kthread(k);
 595
 596	if (WARN_ON(k->flags & PF_EXITING))
 597		return -ENOSYS;
 598
 599	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
 600		return -EBUSY;
 601
 602	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 603	if (k != current) {
 604		wake_up_process(k);
 605		/*
 606		 * Wait for __kthread_parkme() to complete(), this means we
 607		 * _will_ have TASK_PARKED and are about to call schedule().
 608		 */
 609		wait_for_completion(&kthread->parked);
 610		/*
 611		 * Now wait for that schedule() to complete and the task to
 612		 * get scheduled out.
 613		 */
 614		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
 615	}
 616
 617	return 0;
 618}
 619EXPORT_SYMBOL_GPL(kthread_park);
 620
 621/**
 622 * kthread_stop - stop a thread created by kthread_create().
 623 * @k: thread created by kthread_create().
 624 *
 625 * Sets kthread_should_stop() for @k to return true, wakes it, and
 626 * waits for it to exit. This can also be called after kthread_create()
 627 * instead of calling wake_up_process(): the thread will exit without
 628 * calling threadfn().
 629 *
 630 * If threadfn() may call do_exit() itself, the caller must ensure
 631 * task_struct can't go away.
 632 *
 633 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 634 * was never called.
 635 */
 636int kthread_stop(struct task_struct *k)
 637{
 638	struct kthread *kthread;
 639	int ret;
 640
 641	trace_sched_kthread_stop(k);
 642
 643	get_task_struct(k);
 644	kthread = to_kthread(k);
 645	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
 646	kthread_unpark(k);
 647	wake_up_process(k);
 648	wait_for_completion(&kthread->exited);
 649	ret = k->exit_code;
 650	put_task_struct(k);
 651
 652	trace_sched_kthread_stop_ret(ret);
 653	return ret;
 654}
 655EXPORT_SYMBOL(kthread_stop);
 656
 657int kthreadd(void *unused)
 658{
 659	struct task_struct *tsk = current;
 660
 661	/* Setup a clean context for our children to inherit. */
 662	set_task_comm(tsk, "kthreadd");
 663	ignore_signals(tsk);
 664	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
 665	set_mems_allowed(node_states[N_MEMORY]);
 666
 667	current->flags |= PF_NOFREEZE;
 668	cgroup_init_kthreadd();
 669
 670	for (;;) {
 671		set_current_state(TASK_INTERRUPTIBLE);
 672		if (list_empty(&kthread_create_list))
 673			schedule();
 674		__set_current_state(TASK_RUNNING);
 675
 676		spin_lock(&kthread_create_lock);
 677		while (!list_empty(&kthread_create_list)) {
 678			struct kthread_create_info *create;
 679
 680			create = list_entry(kthread_create_list.next,
 681					    struct kthread_create_info, list);
 682			list_del_init(&create->list);
 683			spin_unlock(&kthread_create_lock);
 684
 685			create_kthread(create);
 686
 687			spin_lock(&kthread_create_lock);
 688		}
 689		spin_unlock(&kthread_create_lock);
 690	}
 691
 692	return 0;
 693}
 694
 695void __kthread_init_worker(struct kthread_worker *worker,
 696				const char *name,
 697				struct lock_class_key *key)
 698{
 699	memset(worker, 0, sizeof(struct kthread_worker));
 700	raw_spin_lock_init(&worker->lock);
 701	lockdep_set_class_and_name(&worker->lock, key, name);
 702	INIT_LIST_HEAD(&worker->work_list);
 703	INIT_LIST_HEAD(&worker->delayed_work_list);
 704}
 705EXPORT_SYMBOL_GPL(__kthread_init_worker);
 706
 707/**
 708 * kthread_worker_fn - kthread function to process kthread_worker
 709 * @worker_ptr: pointer to initialized kthread_worker
 710 *
 711 * This function implements the main cycle of kthread worker. It processes
 712 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 713 * is empty.
 714 *
 715 * The works are not allowed to keep any locks, disable preemption or interrupts
 716 * when they finish. There is defined a safe point for freezing when one work
 717 * finishes and before a new one is started.
 718 *
 719 * Also the works must not be handled by more than one worker at the same time,
 720 * see also kthread_queue_work().
 721 */
 722int kthread_worker_fn(void *worker_ptr)
 723{
 724	struct kthread_worker *worker = worker_ptr;
 725	struct kthread_work *work;
 726
 727	/*
 728	 * FIXME: Update the check and remove the assignment when all kthread
 729	 * worker users are created using kthread_create_worker*() functions.
 730	 */
 731	WARN_ON(worker->task && worker->task != current);
 732	worker->task = current;
 733
 734	if (worker->flags & KTW_FREEZABLE)
 735		set_freezable();
 736
 737repeat:
 738	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
 739
 740	if (kthread_should_stop()) {
 741		__set_current_state(TASK_RUNNING);
 742		raw_spin_lock_irq(&worker->lock);
 743		worker->task = NULL;
 744		raw_spin_unlock_irq(&worker->lock);
 745		return 0;
 746	}
 747
 748	work = NULL;
 749	raw_spin_lock_irq(&worker->lock);
 750	if (!list_empty(&worker->work_list)) {
 751		work = list_first_entry(&worker->work_list,
 752					struct kthread_work, node);
 753		list_del_init(&work->node);
 754	}
 755	worker->current_work = work;
 756	raw_spin_unlock_irq(&worker->lock);
 757
 758	if (work) {
 759		kthread_work_func_t func = work->func;
 760		__set_current_state(TASK_RUNNING);
 761		trace_sched_kthread_work_execute_start(work);
 762		work->func(work);
 763		/*
 764		 * Avoid dereferencing work after this point.  The trace
 765		 * event only cares about the address.
 766		 */
 767		trace_sched_kthread_work_execute_end(work, func);
 768	} else if (!freezing(current))
 769		schedule();
 770
 771	try_to_freeze();
 772	cond_resched();
 773	goto repeat;
 774}
 775EXPORT_SYMBOL_GPL(kthread_worker_fn);
 776
 777static __printf(3, 0) struct kthread_worker *
 778__kthread_create_worker(int cpu, unsigned int flags,
 779			const char namefmt[], va_list args)
 780{
 781	struct kthread_worker *worker;
 782	struct task_struct *task;
 783	int node = NUMA_NO_NODE;
 784
 785	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
 786	if (!worker)
 787		return ERR_PTR(-ENOMEM);
 788
 789	kthread_init_worker(worker);
 790
 791	if (cpu >= 0)
 792		node = cpu_to_node(cpu);
 793
 794	task = __kthread_create_on_node(kthread_worker_fn, worker,
 795						node, namefmt, args);
 796	if (IS_ERR(task))
 797		goto fail_task;
 798
 799	if (cpu >= 0)
 800		kthread_bind(task, cpu);
 801
 802	worker->flags = flags;
 803	worker->task = task;
 804	wake_up_process(task);
 805	return worker;
 806
 807fail_task:
 808	kfree(worker);
 809	return ERR_CAST(task);
 810}
 811
 812/**
 813 * kthread_create_worker - create a kthread worker
 814 * @flags: flags modifying the default behavior of the worker
 815 * @namefmt: printf-style name for the kthread worker (task).
 816 *
 817 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 818 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 819 * when the worker was SIGKILLed.
 820 */
 821struct kthread_worker *
 822kthread_create_worker(unsigned int flags, const char namefmt[], ...)
 823{
 824	struct kthread_worker *worker;
 825	va_list args;
 826
 827	va_start(args, namefmt);
 828	worker = __kthread_create_worker(-1, flags, namefmt, args);
 829	va_end(args);
 830
 831	return worker;
 832}
 833EXPORT_SYMBOL(kthread_create_worker);
 834
 835/**
 836 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 837 *	to a given CPU and the associated NUMA node.
 838 * @cpu: CPU number
 839 * @flags: flags modifying the default behavior of the worker
 840 * @namefmt: printf-style name for the kthread worker (task).
 841 *
 842 * Use a valid CPU number if you want to bind the kthread worker
 843 * to the given CPU and the associated NUMA node.
 844 *
 845 * A good practice is to add the cpu number also into the worker name.
 846 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 847 *
 848 * CPU hotplug:
 849 * The kthread worker API is simple and generic. It just provides a way
 850 * to create, use, and destroy workers.
 851 *
 852 * It is up to the API user how to handle CPU hotplug. They have to decide
 853 * how to handle pending work items, prevent queuing new ones, and
 854 * restore the functionality when the CPU goes off and on. There are a
 855 * few catches:
 856 *
 857 *    - CPU affinity gets lost when it is scheduled on an offline CPU.
 858 *
 859 *    - The worker might not exist when the CPU was off when the user
 860 *      created the workers.
 861 *
 862 * Good practice is to implement two CPU hotplug callbacks and to
 863 * destroy/create the worker when the CPU goes down/up.
 864 *
 865 * Return:
 866 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 867 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 868 * when the worker was SIGKILLed.
 869 */
 870struct kthread_worker *
 871kthread_create_worker_on_cpu(int cpu, unsigned int flags,
 872			     const char namefmt[], ...)
 873{
 874	struct kthread_worker *worker;
 875	va_list args;
 876
 877	va_start(args, namefmt);
 878	worker = __kthread_create_worker(cpu, flags, namefmt, args);
 879	va_end(args);
 880
 881	return worker;
 882}
 883EXPORT_SYMBOL(kthread_create_worker_on_cpu);
 884
 885/*
 886 * Returns true when the work could not be queued at the moment.
 887 * It happens when it is already pending in a worker list
 888 * or when it is being cancelled.
 889 */
 890static inline bool queuing_blocked(struct kthread_worker *worker,
 891				   struct kthread_work *work)
 892{
 893	lockdep_assert_held(&worker->lock);
 894
 895	return !list_empty(&work->node) || work->canceling;
 896}
 897
 898static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
 899					     struct kthread_work *work)
 900{
 901	lockdep_assert_held(&worker->lock);
 902	WARN_ON_ONCE(!list_empty(&work->node));
 903	/* Do not use a work with >1 worker, see kthread_queue_work() */
 904	WARN_ON_ONCE(work->worker && work->worker != worker);
 905}
 906
 907/* insert @work before @pos in @worker */
 908static void kthread_insert_work(struct kthread_worker *worker,
 909				struct kthread_work *work,
 910				struct list_head *pos)
 911{
 912	kthread_insert_work_sanity_check(worker, work);
 913
 914	trace_sched_kthread_work_queue_work(worker, work);
 915
 916	list_add_tail(&work->node, pos);
 917	work->worker = worker;
 918	if (!worker->current_work && likely(worker->task))
 919		wake_up_process(worker->task);
 920}
 921
 922/**
 923 * kthread_queue_work - queue a kthread_work
 924 * @worker: target kthread_worker
 925 * @work: kthread_work to queue
 926 *
 927 * Queue @work to work processor @task for async execution.  @task
 928 * must have been created with kthread_worker_create().  Returns %true
 929 * if @work was successfully queued, %false if it was already pending.
 930 *
 931 * Reinitialize the work if it needs to be used by another worker.
 932 * For example, when the worker was stopped and started again.
 933 */
 934bool kthread_queue_work(struct kthread_worker *worker,
 935			struct kthread_work *work)
 936{
 937	bool ret = false;
 938	unsigned long flags;
 939
 940	raw_spin_lock_irqsave(&worker->lock, flags);
 941	if (!queuing_blocked(worker, work)) {
 942		kthread_insert_work(worker, work, &worker->work_list);
 943		ret = true;
 944	}
 945	raw_spin_unlock_irqrestore(&worker->lock, flags);
 946	return ret;
 947}
 948EXPORT_SYMBOL_GPL(kthread_queue_work);
 949
 950/**
 951 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
 952 *	delayed work when the timer expires.
 953 * @t: pointer to the expired timer
 954 *
 955 * The format of the function is defined by struct timer_list.
 956 * It should have been called from irqsafe timer with irq already off.
 957 */
 958void kthread_delayed_work_timer_fn(struct timer_list *t)
 959{
 960	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
 961	struct kthread_work *work = &dwork->work;
 962	struct kthread_worker *worker = work->worker;
 963	unsigned long flags;
 964
 965	/*
 966	 * This might happen when a pending work is reinitialized.
 967	 * It means that it is used a wrong way.
 968	 */
 969	if (WARN_ON_ONCE(!worker))
 970		return;
 971
 972	raw_spin_lock_irqsave(&worker->lock, flags);
 973	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 974	WARN_ON_ONCE(work->worker != worker);
 975
 976	/* Move the work from worker->delayed_work_list. */
 977	WARN_ON_ONCE(list_empty(&work->node));
 978	list_del_init(&work->node);
 979	if (!work->canceling)
 980		kthread_insert_work(worker, work, &worker->work_list);
 981
 982	raw_spin_unlock_irqrestore(&worker->lock, flags);
 983}
 984EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 985
 986static void __kthread_queue_delayed_work(struct kthread_worker *worker,
 987					 struct kthread_delayed_work *dwork,
 988					 unsigned long delay)
 989{
 990	struct timer_list *timer = &dwork->timer;
 991	struct kthread_work *work = &dwork->work;
 992
 993	WARN_ON_FUNCTION_MISMATCH(timer->function,
 994				  kthread_delayed_work_timer_fn);
 995
 996	/*
 997	 * If @delay is 0, queue @dwork->work immediately.  This is for
 998	 * both optimization and correctness.  The earliest @timer can
 999	 * expire is on the closest next tick and delayed_work users depend
1000	 * on that there's no such delay when @delay is 0.
1001	 */
1002	if (!delay) {
1003		kthread_insert_work(worker, work, &worker->work_list);
1004		return;
1005	}
1006
1007	/* Be paranoid and try to detect possible races already now. */
1008	kthread_insert_work_sanity_check(worker, work);
1009
1010	list_add(&work->node, &worker->delayed_work_list);
1011	work->worker = worker;
1012	timer->expires = jiffies + delay;
1013	add_timer(timer);
1014}
1015
1016/**
1017 * kthread_queue_delayed_work - queue the associated kthread work
1018 *	after a delay.
1019 * @worker: target kthread_worker
1020 * @dwork: kthread_delayed_work to queue
1021 * @delay: number of jiffies to wait before queuing
1022 *
1023 * If the work has not been pending it starts a timer that will queue
1024 * the work after the given @delay. If @delay is zero, it queues the
1025 * work immediately.
1026 *
1027 * Return: %false if the @work has already been pending. It means that
1028 * either the timer was running or the work was queued. It returns %true
1029 * otherwise.
1030 */
1031bool kthread_queue_delayed_work(struct kthread_worker *worker,
1032				struct kthread_delayed_work *dwork,
1033				unsigned long delay)
1034{
1035	struct kthread_work *work = &dwork->work;
1036	unsigned long flags;
1037	bool ret = false;
1038
1039	raw_spin_lock_irqsave(&worker->lock, flags);
1040
1041	if (!queuing_blocked(worker, work)) {
1042		__kthread_queue_delayed_work(worker, dwork, delay);
1043		ret = true;
1044	}
1045
1046	raw_spin_unlock_irqrestore(&worker->lock, flags);
1047	return ret;
1048}
1049EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1050
1051struct kthread_flush_work {
1052	struct kthread_work	work;
1053	struct completion	done;
1054};
1055
1056static void kthread_flush_work_fn(struct kthread_work *work)
1057{
1058	struct kthread_flush_work *fwork =
1059		container_of(work, struct kthread_flush_work, work);
1060	complete(&fwork->done);
1061}
1062
1063/**
1064 * kthread_flush_work - flush a kthread_work
1065 * @work: work to flush
1066 *
1067 * If @work is queued or executing, wait for it to finish execution.
1068 */
1069void kthread_flush_work(struct kthread_work *work)
1070{
1071	struct kthread_flush_work fwork = {
1072		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1073		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1074	};
1075	struct kthread_worker *worker;
1076	bool noop = false;
1077
1078	worker = work->worker;
1079	if (!worker)
1080		return;
1081
1082	raw_spin_lock_irq(&worker->lock);
1083	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1084	WARN_ON_ONCE(work->worker != worker);
1085
1086	if (!list_empty(&work->node))
1087		kthread_insert_work(worker, &fwork.work, work->node.next);
1088	else if (worker->current_work == work)
1089		kthread_insert_work(worker, &fwork.work,
1090				    worker->work_list.next);
1091	else
1092		noop = true;
1093
1094	raw_spin_unlock_irq(&worker->lock);
1095
1096	if (!noop)
1097		wait_for_completion(&fwork.done);
1098}
1099EXPORT_SYMBOL_GPL(kthread_flush_work);
1100
1101/*
1102 * Make sure that the timer is neither set nor running and could
1103 * not manipulate the work list_head any longer.
1104 *
1105 * The function is called under worker->lock. The lock is temporary
1106 * released but the timer can't be set again in the meantime.
1107 */
1108static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1109					      unsigned long *flags)
1110{
1111	struct kthread_delayed_work *dwork =
1112		container_of(work, struct kthread_delayed_work, work);
1113	struct kthread_worker *worker = work->worker;
1114
1115	/*
1116	 * del_timer_sync() must be called to make sure that the timer
1117	 * callback is not running. The lock must be temporary released
1118	 * to avoid a deadlock with the callback. In the meantime,
1119	 * any queuing is blocked by setting the canceling counter.
1120	 */
1121	work->canceling++;
1122	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1123	del_timer_sync(&dwork->timer);
1124	raw_spin_lock_irqsave(&worker->lock, *flags);
1125	work->canceling--;
1126}
1127
1128/*
1129 * This function removes the work from the worker queue.
1130 *
1131 * It is called under worker->lock. The caller must make sure that
1132 * the timer used by delayed work is not running, e.g. by calling
1133 * kthread_cancel_delayed_work_timer().
1134 *
1135 * The work might still be in use when this function finishes. See the
1136 * current_work proceed by the worker.
1137 *
1138 * Return: %true if @work was pending and successfully canceled,
1139 *	%false if @work was not pending
1140 */
1141static bool __kthread_cancel_work(struct kthread_work *work)
 
1142{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143	/*
1144	 * Try to remove the work from a worker list. It might either
1145	 * be from worker->work_list or from worker->delayed_work_list.
1146	 */
1147	if (!list_empty(&work->node)) {
1148		list_del_init(&work->node);
1149		return true;
1150	}
1151
1152	return false;
1153}
1154
1155/**
1156 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1157 * @worker: kthread worker to use
1158 * @dwork: kthread delayed work to queue
1159 * @delay: number of jiffies to wait before queuing
1160 *
1161 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1162 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1163 * @work is guaranteed to be queued immediately.
1164 *
1165 * Return: %false if @dwork was idle and queued, %true otherwise.
 
1166 *
1167 * A special case is when the work is being canceled in parallel.
1168 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1169 * or yet another kthread_mod_delayed_work() call. We let the other command
1170 * win and return %true here. The return value can be used for reference
1171 * counting and the number of queued works stays the same. Anyway, the caller
1172 * is supposed to synchronize these operations a reasonable way.
1173 *
1174 * This function is safe to call from any context including IRQ handler.
1175 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1176 * for details.
1177 */
1178bool kthread_mod_delayed_work(struct kthread_worker *worker,
1179			      struct kthread_delayed_work *dwork,
1180			      unsigned long delay)
1181{
1182	struct kthread_work *work = &dwork->work;
1183	unsigned long flags;
1184	int ret;
1185
1186	raw_spin_lock_irqsave(&worker->lock, flags);
1187
1188	/* Do not bother with canceling when never queued. */
1189	if (!work->worker) {
1190		ret = false;
1191		goto fast_queue;
1192	}
1193
1194	/* Work must not be used with >1 worker, see kthread_queue_work() */
1195	WARN_ON_ONCE(work->worker != worker);
1196
1197	/*
1198	 * Temporary cancel the work but do not fight with another command
1199	 * that is canceling the work as well.
1200	 *
1201	 * It is a bit tricky because of possible races with another
1202	 * mod_delayed_work() and cancel_delayed_work() callers.
1203	 *
1204	 * The timer must be canceled first because worker->lock is released
1205	 * when doing so. But the work can be removed from the queue (list)
1206	 * only when it can be queued again so that the return value can
1207	 * be used for reference counting.
1208	 */
1209	kthread_cancel_delayed_work_timer(work, &flags);
1210	if (work->canceling) {
1211		/* The number of works in the queue does not change. */
1212		ret = true;
1213		goto out;
1214	}
1215	ret = __kthread_cancel_work(work);
1216
 
1217fast_queue:
1218	__kthread_queue_delayed_work(worker, dwork, delay);
1219out:
1220	raw_spin_unlock_irqrestore(&worker->lock, flags);
1221	return ret;
1222}
1223EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1224
1225static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1226{
1227	struct kthread_worker *worker = work->worker;
1228	unsigned long flags;
1229	int ret = false;
1230
1231	if (!worker)
1232		goto out;
1233
1234	raw_spin_lock_irqsave(&worker->lock, flags);
1235	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1236	WARN_ON_ONCE(work->worker != worker);
1237
1238	if (is_dwork)
1239		kthread_cancel_delayed_work_timer(work, &flags);
1240
1241	ret = __kthread_cancel_work(work);
1242
1243	if (worker->current_work != work)
1244		goto out_fast;
1245
1246	/*
1247	 * The work is in progress and we need to wait with the lock released.
1248	 * In the meantime, block any queuing by setting the canceling counter.
1249	 */
1250	work->canceling++;
1251	raw_spin_unlock_irqrestore(&worker->lock, flags);
1252	kthread_flush_work(work);
1253	raw_spin_lock_irqsave(&worker->lock, flags);
1254	work->canceling--;
1255
1256out_fast:
1257	raw_spin_unlock_irqrestore(&worker->lock, flags);
1258out:
1259	return ret;
1260}
1261
1262/**
1263 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1264 * @work: the kthread work to cancel
1265 *
1266 * Cancel @work and wait for its execution to finish.  This function
1267 * can be used even if the work re-queues itself. On return from this
1268 * function, @work is guaranteed to be not pending or executing on any CPU.
1269 *
1270 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1271 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1272 *
1273 * The caller must ensure that the worker on which @work was last
1274 * queued can't be destroyed before this function returns.
1275 *
1276 * Return: %true if @work was pending, %false otherwise.
1277 */
1278bool kthread_cancel_work_sync(struct kthread_work *work)
1279{
1280	return __kthread_cancel_work_sync(work, false);
1281}
1282EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1283
1284/**
1285 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1286 *	wait for it to finish.
1287 * @dwork: the kthread delayed work to cancel
1288 *
1289 * This is kthread_cancel_work_sync() for delayed works.
1290 *
1291 * Return: %true if @dwork was pending, %false otherwise.
1292 */
1293bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1294{
1295	return __kthread_cancel_work_sync(&dwork->work, true);
1296}
1297EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1298
1299/**
1300 * kthread_flush_worker - flush all current works on a kthread_worker
1301 * @worker: worker to flush
1302 *
1303 * Wait until all currently executing or pending works on @worker are
1304 * finished.
1305 */
1306void kthread_flush_worker(struct kthread_worker *worker)
1307{
1308	struct kthread_flush_work fwork = {
1309		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1310		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1311	};
1312
1313	kthread_queue_work(worker, &fwork.work);
1314	wait_for_completion(&fwork.done);
1315}
1316EXPORT_SYMBOL_GPL(kthread_flush_worker);
1317
1318/**
1319 * kthread_destroy_worker - destroy a kthread worker
1320 * @worker: worker to be destroyed
1321 *
1322 * Flush and destroy @worker.  The simple flush is enough because the kthread
1323 * worker API is used only in trivial scenarios.  There are no multi-step state
1324 * machines needed.
1325 */
1326void kthread_destroy_worker(struct kthread_worker *worker)
1327{
1328	struct task_struct *task;
1329
1330	task = worker->task;
1331	if (WARN_ON(!task))
1332		return;
1333
1334	kthread_flush_worker(worker);
1335	kthread_stop(task);
1336	WARN_ON(!list_empty(&worker->work_list));
1337	kfree(worker);
1338}
1339EXPORT_SYMBOL(kthread_destroy_worker);
1340
1341/**
1342 * kthread_use_mm - make the calling kthread operate on an address space
1343 * @mm: address space to operate on
1344 */
1345void kthread_use_mm(struct mm_struct *mm)
1346{
1347	struct mm_struct *active_mm;
1348	struct task_struct *tsk = current;
1349
1350	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1351	WARN_ON_ONCE(tsk->mm);
1352
1353	task_lock(tsk);
1354	/* Hold off tlb flush IPIs while switching mm's */
1355	local_irq_disable();
1356	active_mm = tsk->active_mm;
1357	if (active_mm != mm) {
1358		mmgrab(mm);
1359		tsk->active_mm = mm;
1360	}
1361	tsk->mm = mm;
1362	membarrier_update_current_mm(mm);
1363	switch_mm_irqs_off(active_mm, mm, tsk);
1364	local_irq_enable();
1365	task_unlock(tsk);
1366#ifdef finish_arch_post_lock_switch
1367	finish_arch_post_lock_switch();
1368#endif
1369
1370	/*
1371	 * When a kthread starts operating on an address space, the loop
1372	 * in membarrier_{private,global}_expedited() may not observe
1373	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1374	 * memory barrier after storing to tsk->mm, before accessing
1375	 * user-space memory. A full memory barrier for membarrier
1376	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1377	 * mmdrop(), or explicitly with smp_mb().
1378	 */
1379	if (active_mm != mm)
1380		mmdrop(active_mm);
1381	else
1382		smp_mb();
1383
1384	to_kthread(tsk)->oldfs = force_uaccess_begin();
1385}
1386EXPORT_SYMBOL_GPL(kthread_use_mm);
1387
1388/**
1389 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1390 * @mm: address space to operate on
1391 */
1392void kthread_unuse_mm(struct mm_struct *mm)
1393{
1394	struct task_struct *tsk = current;
1395
1396	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1397	WARN_ON_ONCE(!tsk->mm);
1398
1399	force_uaccess_end(to_kthread(tsk)->oldfs);
1400
1401	task_lock(tsk);
1402	/*
1403	 * When a kthread stops operating on an address space, the loop
1404	 * in membarrier_{private,global}_expedited() may not observe
1405	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1406	 * memory barrier after accessing user-space memory, before
1407	 * clearing tsk->mm.
1408	 */
1409	smp_mb__after_spinlock();
1410	sync_mm_rss(mm);
1411	local_irq_disable();
1412	tsk->mm = NULL;
1413	membarrier_update_current_mm(NULL);
1414	/* active_mm is still 'mm' */
1415	enter_lazy_tlb(mm, tsk);
1416	local_irq_enable();
1417	task_unlock(tsk);
1418}
1419EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1420
1421#ifdef CONFIG_BLK_CGROUP
1422/**
1423 * kthread_associate_blkcg - associate blkcg to current kthread
1424 * @css: the cgroup info
1425 *
1426 * Current thread must be a kthread. The thread is running jobs on behalf of
1427 * other threads. In some cases, we expect the jobs attach cgroup info of
1428 * original threads instead of that of current thread. This function stores
1429 * original thread's cgroup info in current kthread context for later
1430 * retrieval.
1431 */
1432void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1433{
1434	struct kthread *kthread;
1435
1436	if (!(current->flags & PF_KTHREAD))
1437		return;
1438	kthread = to_kthread(current);
1439	if (!kthread)
1440		return;
1441
1442	if (kthread->blkcg_css) {
1443		css_put(kthread->blkcg_css);
1444		kthread->blkcg_css = NULL;
1445	}
1446	if (css) {
1447		css_get(css);
1448		kthread->blkcg_css = css;
1449	}
1450}
1451EXPORT_SYMBOL(kthread_associate_blkcg);
1452
1453/**
1454 * kthread_blkcg - get associated blkcg css of current kthread
1455 *
1456 * Current thread must be a kthread.
1457 */
1458struct cgroup_subsys_state *kthread_blkcg(void)
1459{
1460	struct kthread *kthread;
1461
1462	if (current->flags & PF_KTHREAD) {
1463		kthread = to_kthread(current);
1464		if (kthread)
1465			return kthread->blkcg_css;
1466	}
1467	return NULL;
1468}
1469EXPORT_SYMBOL(kthread_blkcg);
1470#endif
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Kernel thread helper functions.
   3 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
   4 *   Copyright (C) 2009 Red Hat, Inc.
   5 *
   6 * Creation is done via kthreadd, so that we get a clean environment
   7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
   8 * etc.).
   9 */
  10#include <uapi/linux/sched/types.h>
  11#include <linux/mm.h>
  12#include <linux/mmu_context.h>
  13#include <linux/sched.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/task.h>
  16#include <linux/kthread.h>
  17#include <linux/completion.h>
  18#include <linux/err.h>
  19#include <linux/cgroup.h>
  20#include <linux/cpuset.h>
  21#include <linux/unistd.h>
  22#include <linux/file.h>
  23#include <linux/export.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <linux/freezer.h>
  27#include <linux/ptrace.h>
  28#include <linux/uaccess.h>
  29#include <linux/numa.h>
  30#include <linux/sched/isolation.h>
  31#include <trace/events/sched.h>
  32
  33
  34static DEFINE_SPINLOCK(kthread_create_lock);
  35static LIST_HEAD(kthread_create_list);
  36struct task_struct *kthreadd_task;
  37
  38struct kthread_create_info
  39{
  40	/* Information passed to kthread() from kthreadd. */
  41	int (*threadfn)(void *data);
  42	void *data;
  43	int node;
  44
  45	/* Result passed back to kthread_create() from kthreadd. */
  46	struct task_struct *result;
  47	struct completion *done;
  48
  49	struct list_head list;
  50};
  51
  52struct kthread {
  53	unsigned long flags;
  54	unsigned int cpu;
  55	int (*threadfn)(void *);
  56	void *data;
  57	mm_segment_t oldfs;
  58	struct completion parked;
  59	struct completion exited;
  60#ifdef CONFIG_BLK_CGROUP
  61	struct cgroup_subsys_state *blkcg_css;
  62#endif
  63};
  64
  65enum KTHREAD_BITS {
  66	KTHREAD_IS_PER_CPU = 0,
  67	KTHREAD_SHOULD_STOP,
  68	KTHREAD_SHOULD_PARK,
  69};
  70
  71static inline void set_kthread_struct(void *kthread)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  72{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73	/*
  74	 * We abuse ->set_child_tid to avoid the new member and because it
  75	 * can't be wrongly copied by copy_process(). We also rely on fact
  76	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
  77	 */
  78	current->set_child_tid = (__force void __user *)kthread;
  79}
  80
  81static inline struct kthread *to_kthread(struct task_struct *k)
  82{
  83	WARN_ON(!(k->flags & PF_KTHREAD));
  84	return (__force void *)k->set_child_tid;
  85}
  86
  87void free_kthread_struct(struct task_struct *k)
  88{
  89	struct kthread *kthread;
  90
  91	/*
  92	 * Can be NULL if this kthread was created by kernel_thread()
  93	 * or if kmalloc() in kthread() failed.
  94	 */
  95	kthread = to_kthread(k);
  96#ifdef CONFIG_BLK_CGROUP
  97	WARN_ON_ONCE(kthread && kthread->blkcg_css);
  98#endif
  99	kfree(kthread);
 100}
 101
 102/**
 103 * kthread_should_stop - should this kthread return now?
 104 *
 105 * When someone calls kthread_stop() on your kthread, it will be woken
 106 * and this will return true.  You should then return, and your return
 107 * value will be passed through to kthread_stop().
 108 */
 109bool kthread_should_stop(void)
 110{
 111	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
 112}
 113EXPORT_SYMBOL(kthread_should_stop);
 114
 115bool __kthread_should_park(struct task_struct *k)
 116{
 117	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
 118}
 119EXPORT_SYMBOL_GPL(__kthread_should_park);
 120
 121/**
 122 * kthread_should_park - should this kthread park now?
 123 *
 124 * When someone calls kthread_park() on your kthread, it will be woken
 125 * and this will return true.  You should then do the necessary
 126 * cleanup and call kthread_parkme()
 127 *
 128 * Similar to kthread_should_stop(), but this keeps the thread alive
 129 * and in a park position. kthread_unpark() "restarts" the thread and
 130 * calls the thread function again.
 131 */
 132bool kthread_should_park(void)
 133{
 134	return __kthread_should_park(current);
 135}
 136EXPORT_SYMBOL_GPL(kthread_should_park);
 137
 138/**
 139 * kthread_freezable_should_stop - should this freezable kthread return now?
 140 * @was_frozen: optional out parameter, indicates whether %current was frozen
 141 *
 142 * kthread_should_stop() for freezable kthreads, which will enter
 143 * refrigerator if necessary.  This function is safe from kthread_stop() /
 144 * freezer deadlock and freezable kthreads should use this function instead
 145 * of calling try_to_freeze() directly.
 146 */
 147bool kthread_freezable_should_stop(bool *was_frozen)
 148{
 149	bool frozen = false;
 150
 151	might_sleep();
 152
 153	if (unlikely(freezing(current)))
 154		frozen = __refrigerator(true);
 155
 156	if (was_frozen)
 157		*was_frozen = frozen;
 158
 159	return kthread_should_stop();
 160}
 161EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
 162
 163/**
 164 * kthread_func - return the function specified on kthread creation
 165 * @task: kthread task in question
 166 *
 167 * Returns NULL if the task is not a kthread.
 168 */
 169void *kthread_func(struct task_struct *task)
 170{
 171	if (task->flags & PF_KTHREAD)
 172		return to_kthread(task)->threadfn;
 
 173	return NULL;
 174}
 175EXPORT_SYMBOL_GPL(kthread_func);
 176
 177/**
 178 * kthread_data - return data value specified on kthread creation
 179 * @task: kthread task in question
 180 *
 181 * Return the data value specified when kthread @task was created.
 182 * The caller is responsible for ensuring the validity of @task when
 183 * calling this function.
 184 */
 185void *kthread_data(struct task_struct *task)
 186{
 187	return to_kthread(task)->data;
 188}
 189EXPORT_SYMBOL_GPL(kthread_data);
 190
 191/**
 192 * kthread_probe_data - speculative version of kthread_data()
 193 * @task: possible kthread task in question
 194 *
 195 * @task could be a kthread task.  Return the data value specified when it
 196 * was created if accessible.  If @task isn't a kthread task or its data is
 197 * inaccessible for any reason, %NULL is returned.  This function requires
 198 * that @task itself is safe to dereference.
 199 */
 200void *kthread_probe_data(struct task_struct *task)
 201{
 202	struct kthread *kthread = to_kthread(task);
 203	void *data = NULL;
 204
 205	copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
 
 206	return data;
 207}
 208
 209static void __kthread_parkme(struct kthread *self)
 210{
 211	for (;;) {
 212		/*
 213		 * TASK_PARKED is a special state; we must serialize against
 214		 * possible pending wakeups to avoid store-store collisions on
 215		 * task->state.
 216		 *
 217		 * Such a collision might possibly result in the task state
 218		 * changin from TASK_PARKED and us failing the
 219		 * wait_task_inactive() in kthread_park().
 220		 */
 221		set_special_state(TASK_PARKED);
 222		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
 223			break;
 224
 225		/*
 226		 * Thread is going to call schedule(), do not preempt it,
 227		 * or the caller of kthread_park() may spend more time in
 228		 * wait_task_inactive().
 229		 */
 230		preempt_disable();
 231		complete(&self->parked);
 232		schedule_preempt_disabled();
 233		preempt_enable();
 234	}
 235	__set_current_state(TASK_RUNNING);
 236}
 237
 238void kthread_parkme(void)
 239{
 240	__kthread_parkme(to_kthread(current));
 241}
 242EXPORT_SYMBOL_GPL(kthread_parkme);
 243
 244static int kthread(void *_create)
 245{
 246	/* Copy data: it's on kthread's stack */
 247	struct kthread_create_info *create = _create;
 248	int (*threadfn)(void *data) = create->threadfn;
 249	void *data = create->data;
 250	struct completion *done;
 251	struct kthread *self;
 252	int ret;
 253
 254	self = kzalloc(sizeof(*self), GFP_KERNEL);
 255	set_kthread_struct(self);
 256
 257	/* If user was SIGKILLed, I release the structure. */
 258	done = xchg(&create->done, NULL);
 259	if (!done) {
 260		kfree(create);
 261		do_exit(-EINTR);
 262	}
 263
 264	if (!self) {
 265		create->result = ERR_PTR(-ENOMEM);
 266		complete(done);
 267		do_exit(-ENOMEM);
 268	}
 269
 270	self->threadfn = threadfn;
 271	self->data = data;
 272	init_completion(&self->exited);
 273	init_completion(&self->parked);
 274	current->vfork_done = &self->exited;
 275
 276	/* OK, tell user we're spawned, wait for stop or wakeup */
 277	__set_current_state(TASK_UNINTERRUPTIBLE);
 278	create->result = current;
 279	/*
 280	 * Thread is going to call schedule(), do not preempt it,
 281	 * or the creator may spend more time in wait_task_inactive().
 282	 */
 283	preempt_disable();
 284	complete(done);
 285	schedule_preempt_disabled();
 286	preempt_enable();
 287
 288	ret = -EINTR;
 289	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
 290		cgroup_kthread_ready();
 291		__kthread_parkme(self);
 292		ret = threadfn(data);
 293	}
 294	do_exit(ret);
 295}
 296
 297/* called from do_fork() to get node information for about to be created task */
 298int tsk_fork_get_node(struct task_struct *tsk)
 299{
 300#ifdef CONFIG_NUMA
 301	if (tsk == kthreadd_task)
 302		return tsk->pref_node_fork;
 303#endif
 304	return NUMA_NO_NODE;
 305}
 306
 307static void create_kthread(struct kthread_create_info *create)
 308{
 309	int pid;
 310
 311#ifdef CONFIG_NUMA
 312	current->pref_node_fork = create->node;
 313#endif
 314	/* We want our own signal handler (we take no signals by default). */
 315	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
 316	if (pid < 0) {
 317		/* If user was SIGKILLed, I release the structure. */
 318		struct completion *done = xchg(&create->done, NULL);
 319
 320		if (!done) {
 321			kfree(create);
 322			return;
 323		}
 324		create->result = ERR_PTR(pid);
 325		complete(done);
 326	}
 327}
 328
 329static __printf(4, 0)
 330struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 331						    void *data, int node,
 332						    const char namefmt[],
 333						    va_list args)
 334{
 335	DECLARE_COMPLETION_ONSTACK(done);
 336	struct task_struct *task;
 337	struct kthread_create_info *create = kmalloc(sizeof(*create),
 338						     GFP_KERNEL);
 339
 340	if (!create)
 341		return ERR_PTR(-ENOMEM);
 342	create->threadfn = threadfn;
 343	create->data = data;
 344	create->node = node;
 345	create->done = &done;
 346
 347	spin_lock(&kthread_create_lock);
 348	list_add_tail(&create->list, &kthread_create_list);
 349	spin_unlock(&kthread_create_lock);
 350
 351	wake_up_process(kthreadd_task);
 352	/*
 353	 * Wait for completion in killable state, for I might be chosen by
 354	 * the OOM killer while kthreadd is trying to allocate memory for
 355	 * new kernel thread.
 356	 */
 357	if (unlikely(wait_for_completion_killable(&done))) {
 358		/*
 359		 * If I was SIGKILLed before kthreadd (or new kernel thread)
 360		 * calls complete(), leave the cleanup of this structure to
 361		 * that thread.
 362		 */
 363		if (xchg(&create->done, NULL))
 364			return ERR_PTR(-EINTR);
 365		/*
 366		 * kthreadd (or new kernel thread) will call complete()
 367		 * shortly.
 368		 */
 369		wait_for_completion(&done);
 370	}
 371	task = create->result;
 372	if (!IS_ERR(task)) {
 373		static const struct sched_param param = { .sched_priority = 0 };
 374		char name[TASK_COMM_LEN];
 375
 376		/*
 377		 * task is already visible to other tasks, so updating
 378		 * COMM must be protected.
 379		 */
 380		vsnprintf(name, sizeof(name), namefmt, args);
 381		set_task_comm(task, name);
 382		/*
 383		 * root may have changed our (kthreadd's) priority or CPU mask.
 384		 * The kernel thread should not inherit these properties.
 385		 */
 386		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
 387		set_cpus_allowed_ptr(task,
 388				     housekeeping_cpumask(HK_FLAG_KTHREAD));
 389	}
 390	kfree(create);
 391	return task;
 392}
 393
 394/**
 395 * kthread_create_on_node - create a kthread.
 396 * @threadfn: the function to run until signal_pending(current).
 397 * @data: data ptr for @threadfn.
 398 * @node: task and thread structures for the thread are allocated on this node
 399 * @namefmt: printf-style name for the thread.
 400 *
 401 * Description: This helper function creates and names a kernel
 402 * thread.  The thread will be stopped: use wake_up_process() to start
 403 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 404 * is affine to all CPUs.
 405 *
 406 * If thread is going to be bound on a particular cpu, give its node
 407 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 408 * When woken, the thread will run @threadfn() with @data as its
 409 * argument. @threadfn() can either call do_exit() directly if it is a
 410 * standalone thread for which no one will call kthread_stop(), or
 411 * return when 'kthread_should_stop()' is true (which means
 412 * kthread_stop() has been called).  The return value should be zero
 413 * or a negative error number; it will be passed to kthread_stop().
 414 *
 415 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 416 */
 417struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 418					   void *data, int node,
 419					   const char namefmt[],
 420					   ...)
 421{
 422	struct task_struct *task;
 423	va_list args;
 424
 425	va_start(args, namefmt);
 426	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
 427	va_end(args);
 428
 429	return task;
 430}
 431EXPORT_SYMBOL(kthread_create_on_node);
 432
 433static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
 434{
 435	unsigned long flags;
 436
 437	if (!wait_task_inactive(p, state)) {
 438		WARN_ON(1);
 439		return;
 440	}
 441
 442	/* It's safe because the task is inactive. */
 443	raw_spin_lock_irqsave(&p->pi_lock, flags);
 444	do_set_cpus_allowed(p, mask);
 445	p->flags |= PF_NO_SETAFFINITY;
 446	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 447}
 448
 449static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
 450{
 451	__kthread_bind_mask(p, cpumask_of(cpu), state);
 452}
 453
 454void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
 455{
 456	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 457}
 458
 459/**
 460 * kthread_bind - bind a just-created kthread to a cpu.
 461 * @p: thread created by kthread_create().
 462 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 463 *
 464 * Description: This function is equivalent to set_cpus_allowed(),
 465 * except that @cpu doesn't need to be online, and the thread must be
 466 * stopped (i.e., just returned from kthread_create()).
 467 */
 468void kthread_bind(struct task_struct *p, unsigned int cpu)
 469{
 470	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 471}
 472EXPORT_SYMBOL(kthread_bind);
 473
 474/**
 475 * kthread_create_on_cpu - Create a cpu bound kthread
 476 * @threadfn: the function to run until signal_pending(current).
 477 * @data: data ptr for @threadfn.
 478 * @cpu: The cpu on which the thread should be bound,
 479 * @namefmt: printf-style name for the thread. Format is restricted
 480 *	     to "name.*%u". Code fills in cpu number.
 481 *
 482 * Description: This helper function creates and names a kernel thread
 483 */
 484struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 485					  void *data, unsigned int cpu,
 486					  const char *namefmt)
 487{
 488	struct task_struct *p;
 489
 490	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
 491				   cpu);
 492	if (IS_ERR(p))
 493		return p;
 494	kthread_bind(p, cpu);
 495	/* CPU hotplug need to bind once again when unparking the thread. */
 496	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
 497	to_kthread(p)->cpu = cpu;
 498	return p;
 499}
 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 501/**
 502 * kthread_unpark - unpark a thread created by kthread_create().
 503 * @k:		thread created by kthread_create().
 504 *
 505 * Sets kthread_should_park() for @k to return false, wakes it, and
 506 * waits for it to return. If the thread is marked percpu then its
 507 * bound to the cpu again.
 508 */
 509void kthread_unpark(struct task_struct *k)
 510{
 511	struct kthread *kthread = to_kthread(k);
 512
 513	/*
 514	 * Newly created kthread was parked when the CPU was offline.
 515	 * The binding was lost and we need to set it again.
 516	 */
 517	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
 518		__kthread_bind(k, kthread->cpu, TASK_PARKED);
 519
 520	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 521	/*
 522	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
 523	 */
 524	wake_up_state(k, TASK_PARKED);
 525}
 526EXPORT_SYMBOL_GPL(kthread_unpark);
 527
 528/**
 529 * kthread_park - park a thread created by kthread_create().
 530 * @k: thread created by kthread_create().
 531 *
 532 * Sets kthread_should_park() for @k to return true, wakes it, and
 533 * waits for it to return. This can also be called after kthread_create()
 534 * instead of calling wake_up_process(): the thread will park without
 535 * calling threadfn().
 536 *
 537 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 538 * If called by the kthread itself just the park bit is set.
 539 */
 540int kthread_park(struct task_struct *k)
 541{
 542	struct kthread *kthread = to_kthread(k);
 543
 544	if (WARN_ON(k->flags & PF_EXITING))
 545		return -ENOSYS;
 546
 547	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
 548		return -EBUSY;
 549
 550	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 551	if (k != current) {
 552		wake_up_process(k);
 553		/*
 554		 * Wait for __kthread_parkme() to complete(), this means we
 555		 * _will_ have TASK_PARKED and are about to call schedule().
 556		 */
 557		wait_for_completion(&kthread->parked);
 558		/*
 559		 * Now wait for that schedule() to complete and the task to
 560		 * get scheduled out.
 561		 */
 562		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
 563	}
 564
 565	return 0;
 566}
 567EXPORT_SYMBOL_GPL(kthread_park);
 568
 569/**
 570 * kthread_stop - stop a thread created by kthread_create().
 571 * @k: thread created by kthread_create().
 572 *
 573 * Sets kthread_should_stop() for @k to return true, wakes it, and
 574 * waits for it to exit. This can also be called after kthread_create()
 575 * instead of calling wake_up_process(): the thread will exit without
 576 * calling threadfn().
 577 *
 578 * If threadfn() may call do_exit() itself, the caller must ensure
 579 * task_struct can't go away.
 580 *
 581 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 582 * was never called.
 583 */
 584int kthread_stop(struct task_struct *k)
 585{
 586	struct kthread *kthread;
 587	int ret;
 588
 589	trace_sched_kthread_stop(k);
 590
 591	get_task_struct(k);
 592	kthread = to_kthread(k);
 593	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
 594	kthread_unpark(k);
 595	wake_up_process(k);
 596	wait_for_completion(&kthread->exited);
 597	ret = k->exit_code;
 598	put_task_struct(k);
 599
 600	trace_sched_kthread_stop_ret(ret);
 601	return ret;
 602}
 603EXPORT_SYMBOL(kthread_stop);
 604
 605int kthreadd(void *unused)
 606{
 607	struct task_struct *tsk = current;
 608
 609	/* Setup a clean context for our children to inherit. */
 610	set_task_comm(tsk, "kthreadd");
 611	ignore_signals(tsk);
 612	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
 613	set_mems_allowed(node_states[N_MEMORY]);
 614
 615	current->flags |= PF_NOFREEZE;
 616	cgroup_init_kthreadd();
 617
 618	for (;;) {
 619		set_current_state(TASK_INTERRUPTIBLE);
 620		if (list_empty(&kthread_create_list))
 621			schedule();
 622		__set_current_state(TASK_RUNNING);
 623
 624		spin_lock(&kthread_create_lock);
 625		while (!list_empty(&kthread_create_list)) {
 626			struct kthread_create_info *create;
 627
 628			create = list_entry(kthread_create_list.next,
 629					    struct kthread_create_info, list);
 630			list_del_init(&create->list);
 631			spin_unlock(&kthread_create_lock);
 632
 633			create_kthread(create);
 634
 635			spin_lock(&kthread_create_lock);
 636		}
 637		spin_unlock(&kthread_create_lock);
 638	}
 639
 640	return 0;
 641}
 642
 643void __kthread_init_worker(struct kthread_worker *worker,
 644				const char *name,
 645				struct lock_class_key *key)
 646{
 647	memset(worker, 0, sizeof(struct kthread_worker));
 648	raw_spin_lock_init(&worker->lock);
 649	lockdep_set_class_and_name(&worker->lock, key, name);
 650	INIT_LIST_HEAD(&worker->work_list);
 651	INIT_LIST_HEAD(&worker->delayed_work_list);
 652}
 653EXPORT_SYMBOL_GPL(__kthread_init_worker);
 654
 655/**
 656 * kthread_worker_fn - kthread function to process kthread_worker
 657 * @worker_ptr: pointer to initialized kthread_worker
 658 *
 659 * This function implements the main cycle of kthread worker. It processes
 660 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 661 * is empty.
 662 *
 663 * The works are not allowed to keep any locks, disable preemption or interrupts
 664 * when they finish. There is defined a safe point for freezing when one work
 665 * finishes and before a new one is started.
 666 *
 667 * Also the works must not be handled by more than one worker at the same time,
 668 * see also kthread_queue_work().
 669 */
 670int kthread_worker_fn(void *worker_ptr)
 671{
 672	struct kthread_worker *worker = worker_ptr;
 673	struct kthread_work *work;
 674
 675	/*
 676	 * FIXME: Update the check and remove the assignment when all kthread
 677	 * worker users are created using kthread_create_worker*() functions.
 678	 */
 679	WARN_ON(worker->task && worker->task != current);
 680	worker->task = current;
 681
 682	if (worker->flags & KTW_FREEZABLE)
 683		set_freezable();
 684
 685repeat:
 686	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
 687
 688	if (kthread_should_stop()) {
 689		__set_current_state(TASK_RUNNING);
 690		raw_spin_lock_irq(&worker->lock);
 691		worker->task = NULL;
 692		raw_spin_unlock_irq(&worker->lock);
 693		return 0;
 694	}
 695
 696	work = NULL;
 697	raw_spin_lock_irq(&worker->lock);
 698	if (!list_empty(&worker->work_list)) {
 699		work = list_first_entry(&worker->work_list,
 700					struct kthread_work, node);
 701		list_del_init(&work->node);
 702	}
 703	worker->current_work = work;
 704	raw_spin_unlock_irq(&worker->lock);
 705
 706	if (work) {
 
 707		__set_current_state(TASK_RUNNING);
 
 708		work->func(work);
 
 
 
 
 
 709	} else if (!freezing(current))
 710		schedule();
 711
 712	try_to_freeze();
 713	cond_resched();
 714	goto repeat;
 715}
 716EXPORT_SYMBOL_GPL(kthread_worker_fn);
 717
 718static __printf(3, 0) struct kthread_worker *
 719__kthread_create_worker(int cpu, unsigned int flags,
 720			const char namefmt[], va_list args)
 721{
 722	struct kthread_worker *worker;
 723	struct task_struct *task;
 724	int node = NUMA_NO_NODE;
 725
 726	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
 727	if (!worker)
 728		return ERR_PTR(-ENOMEM);
 729
 730	kthread_init_worker(worker);
 731
 732	if (cpu >= 0)
 733		node = cpu_to_node(cpu);
 734
 735	task = __kthread_create_on_node(kthread_worker_fn, worker,
 736						node, namefmt, args);
 737	if (IS_ERR(task))
 738		goto fail_task;
 739
 740	if (cpu >= 0)
 741		kthread_bind(task, cpu);
 742
 743	worker->flags = flags;
 744	worker->task = task;
 745	wake_up_process(task);
 746	return worker;
 747
 748fail_task:
 749	kfree(worker);
 750	return ERR_CAST(task);
 751}
 752
 753/**
 754 * kthread_create_worker - create a kthread worker
 755 * @flags: flags modifying the default behavior of the worker
 756 * @namefmt: printf-style name for the kthread worker (task).
 757 *
 758 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 759 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 760 * when the worker was SIGKILLed.
 761 */
 762struct kthread_worker *
 763kthread_create_worker(unsigned int flags, const char namefmt[], ...)
 764{
 765	struct kthread_worker *worker;
 766	va_list args;
 767
 768	va_start(args, namefmt);
 769	worker = __kthread_create_worker(-1, flags, namefmt, args);
 770	va_end(args);
 771
 772	return worker;
 773}
 774EXPORT_SYMBOL(kthread_create_worker);
 775
 776/**
 777 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 778 *	it to a given CPU and the associated NUMA node.
 779 * @cpu: CPU number
 780 * @flags: flags modifying the default behavior of the worker
 781 * @namefmt: printf-style name for the kthread worker (task).
 782 *
 783 * Use a valid CPU number if you want to bind the kthread worker
 784 * to the given CPU and the associated NUMA node.
 785 *
 786 * A good practice is to add the cpu number also into the worker name.
 787 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 788 *
 789 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 790 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 791 * when the worker was SIGKILLed.
 792 */
 793struct kthread_worker *
 794kthread_create_worker_on_cpu(int cpu, unsigned int flags,
 795			     const char namefmt[], ...)
 796{
 797	struct kthread_worker *worker;
 798	va_list args;
 799
 800	va_start(args, namefmt);
 801	worker = __kthread_create_worker(cpu, flags, namefmt, args);
 802	va_end(args);
 803
 804	return worker;
 805}
 806EXPORT_SYMBOL(kthread_create_worker_on_cpu);
 807
 808/*
 809 * Returns true when the work could not be queued at the moment.
 810 * It happens when it is already pending in a worker list
 811 * or when it is being cancelled.
 812 */
 813static inline bool queuing_blocked(struct kthread_worker *worker,
 814				   struct kthread_work *work)
 815{
 816	lockdep_assert_held(&worker->lock);
 817
 818	return !list_empty(&work->node) || work->canceling;
 819}
 820
 821static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
 822					     struct kthread_work *work)
 823{
 824	lockdep_assert_held(&worker->lock);
 825	WARN_ON_ONCE(!list_empty(&work->node));
 826	/* Do not use a work with >1 worker, see kthread_queue_work() */
 827	WARN_ON_ONCE(work->worker && work->worker != worker);
 828}
 829
 830/* insert @work before @pos in @worker */
 831static void kthread_insert_work(struct kthread_worker *worker,
 832				struct kthread_work *work,
 833				struct list_head *pos)
 834{
 835	kthread_insert_work_sanity_check(worker, work);
 836
 
 
 837	list_add_tail(&work->node, pos);
 838	work->worker = worker;
 839	if (!worker->current_work && likely(worker->task))
 840		wake_up_process(worker->task);
 841}
 842
 843/**
 844 * kthread_queue_work - queue a kthread_work
 845 * @worker: target kthread_worker
 846 * @work: kthread_work to queue
 847 *
 848 * Queue @work to work processor @task for async execution.  @task
 849 * must have been created with kthread_worker_create().  Returns %true
 850 * if @work was successfully queued, %false if it was already pending.
 851 *
 852 * Reinitialize the work if it needs to be used by another worker.
 853 * For example, when the worker was stopped and started again.
 854 */
 855bool kthread_queue_work(struct kthread_worker *worker,
 856			struct kthread_work *work)
 857{
 858	bool ret = false;
 859	unsigned long flags;
 860
 861	raw_spin_lock_irqsave(&worker->lock, flags);
 862	if (!queuing_blocked(worker, work)) {
 863		kthread_insert_work(worker, work, &worker->work_list);
 864		ret = true;
 865	}
 866	raw_spin_unlock_irqrestore(&worker->lock, flags);
 867	return ret;
 868}
 869EXPORT_SYMBOL_GPL(kthread_queue_work);
 870
 871/**
 872 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
 873 *	delayed work when the timer expires.
 874 * @t: pointer to the expired timer
 875 *
 876 * The format of the function is defined by struct timer_list.
 877 * It should have been called from irqsafe timer with irq already off.
 878 */
 879void kthread_delayed_work_timer_fn(struct timer_list *t)
 880{
 881	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
 882	struct kthread_work *work = &dwork->work;
 883	struct kthread_worker *worker = work->worker;
 884	unsigned long flags;
 885
 886	/*
 887	 * This might happen when a pending work is reinitialized.
 888	 * It means that it is used a wrong way.
 889	 */
 890	if (WARN_ON_ONCE(!worker))
 891		return;
 892
 893	raw_spin_lock_irqsave(&worker->lock, flags);
 894	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 895	WARN_ON_ONCE(work->worker != worker);
 896
 897	/* Move the work from worker->delayed_work_list. */
 898	WARN_ON_ONCE(list_empty(&work->node));
 899	list_del_init(&work->node);
 900	kthread_insert_work(worker, work, &worker->work_list);
 
 901
 902	raw_spin_unlock_irqrestore(&worker->lock, flags);
 903}
 904EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 905
 906static void __kthread_queue_delayed_work(struct kthread_worker *worker,
 907					 struct kthread_delayed_work *dwork,
 908					 unsigned long delay)
 909{
 910	struct timer_list *timer = &dwork->timer;
 911	struct kthread_work *work = &dwork->work;
 912
 913	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
 
 914
 915	/*
 916	 * If @delay is 0, queue @dwork->work immediately.  This is for
 917	 * both optimization and correctness.  The earliest @timer can
 918	 * expire is on the closest next tick and delayed_work users depend
 919	 * on that there's no such delay when @delay is 0.
 920	 */
 921	if (!delay) {
 922		kthread_insert_work(worker, work, &worker->work_list);
 923		return;
 924	}
 925
 926	/* Be paranoid and try to detect possible races already now. */
 927	kthread_insert_work_sanity_check(worker, work);
 928
 929	list_add(&work->node, &worker->delayed_work_list);
 930	work->worker = worker;
 931	timer->expires = jiffies + delay;
 932	add_timer(timer);
 933}
 934
 935/**
 936 * kthread_queue_delayed_work - queue the associated kthread work
 937 *	after a delay.
 938 * @worker: target kthread_worker
 939 * @dwork: kthread_delayed_work to queue
 940 * @delay: number of jiffies to wait before queuing
 941 *
 942 * If the work has not been pending it starts a timer that will queue
 943 * the work after the given @delay. If @delay is zero, it queues the
 944 * work immediately.
 945 *
 946 * Return: %false if the @work has already been pending. It means that
 947 * either the timer was running or the work was queued. It returns %true
 948 * otherwise.
 949 */
 950bool kthread_queue_delayed_work(struct kthread_worker *worker,
 951				struct kthread_delayed_work *dwork,
 952				unsigned long delay)
 953{
 954	struct kthread_work *work = &dwork->work;
 955	unsigned long flags;
 956	bool ret = false;
 957
 958	raw_spin_lock_irqsave(&worker->lock, flags);
 959
 960	if (!queuing_blocked(worker, work)) {
 961		__kthread_queue_delayed_work(worker, dwork, delay);
 962		ret = true;
 963	}
 964
 965	raw_spin_unlock_irqrestore(&worker->lock, flags);
 966	return ret;
 967}
 968EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
 969
 970struct kthread_flush_work {
 971	struct kthread_work	work;
 972	struct completion	done;
 973};
 974
 975static void kthread_flush_work_fn(struct kthread_work *work)
 976{
 977	struct kthread_flush_work *fwork =
 978		container_of(work, struct kthread_flush_work, work);
 979	complete(&fwork->done);
 980}
 981
 982/**
 983 * kthread_flush_work - flush a kthread_work
 984 * @work: work to flush
 985 *
 986 * If @work is queued or executing, wait for it to finish execution.
 987 */
 988void kthread_flush_work(struct kthread_work *work)
 989{
 990	struct kthread_flush_work fwork = {
 991		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
 992		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
 993	};
 994	struct kthread_worker *worker;
 995	bool noop = false;
 996
 997	worker = work->worker;
 998	if (!worker)
 999		return;
1000
1001	raw_spin_lock_irq(&worker->lock);
1002	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1003	WARN_ON_ONCE(work->worker != worker);
1004
1005	if (!list_empty(&work->node))
1006		kthread_insert_work(worker, &fwork.work, work->node.next);
1007	else if (worker->current_work == work)
1008		kthread_insert_work(worker, &fwork.work,
1009				    worker->work_list.next);
1010	else
1011		noop = true;
1012
1013	raw_spin_unlock_irq(&worker->lock);
1014
1015	if (!noop)
1016		wait_for_completion(&fwork.done);
1017}
1018EXPORT_SYMBOL_GPL(kthread_flush_work);
1019
1020/*
1021 * This function removes the work from the worker queue. Also it makes sure
1022 * that it won't get queued later via the delayed work's timer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023 *
1024 * The work might still be in use when this function finishes. See the
1025 * current_work proceed by the worker.
1026 *
1027 * Return: %true if @work was pending and successfully canceled,
1028 *	%false if @work was not pending
1029 */
1030static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
1031				  unsigned long *flags)
1032{
1033	/* Try to cancel the timer if exists. */
1034	if (is_dwork) {
1035		struct kthread_delayed_work *dwork =
1036			container_of(work, struct kthread_delayed_work, work);
1037		struct kthread_worker *worker = work->worker;
1038
1039		/*
1040		 * del_timer_sync() must be called to make sure that the timer
1041		 * callback is not running. The lock must be temporary released
1042		 * to avoid a deadlock with the callback. In the meantime,
1043		 * any queuing is blocked by setting the canceling counter.
1044		 */
1045		work->canceling++;
1046		raw_spin_unlock_irqrestore(&worker->lock, *flags);
1047		del_timer_sync(&dwork->timer);
1048		raw_spin_lock_irqsave(&worker->lock, *flags);
1049		work->canceling--;
1050	}
1051
1052	/*
1053	 * Try to remove the work from a worker list. It might either
1054	 * be from worker->work_list or from worker->delayed_work_list.
1055	 */
1056	if (!list_empty(&work->node)) {
1057		list_del_init(&work->node);
1058		return true;
1059	}
1060
1061	return false;
1062}
1063
1064/**
1065 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1066 * @worker: kthread worker to use
1067 * @dwork: kthread delayed work to queue
1068 * @delay: number of jiffies to wait before queuing
1069 *
1070 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1071 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1072 * @work is guaranteed to be queued immediately.
1073 *
1074 * Return: %true if @dwork was pending and its timer was modified,
1075 * %false otherwise.
1076 *
1077 * A special case is when the work is being canceled in parallel.
1078 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1079 * or yet another kthread_mod_delayed_work() call. We let the other command
1080 * win and return %false here. The caller is supposed to synchronize these
1081 * operations a reasonable way.
 
1082 *
1083 * This function is safe to call from any context including IRQ handler.
1084 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1085 * for details.
1086 */
1087bool kthread_mod_delayed_work(struct kthread_worker *worker,
1088			      struct kthread_delayed_work *dwork,
1089			      unsigned long delay)
1090{
1091	struct kthread_work *work = &dwork->work;
1092	unsigned long flags;
1093	int ret = false;
1094
1095	raw_spin_lock_irqsave(&worker->lock, flags);
1096
1097	/* Do not bother with canceling when never queued. */
1098	if (!work->worker)
 
1099		goto fast_queue;
 
1100
1101	/* Work must not be used with >1 worker, see kthread_queue_work() */
1102	WARN_ON_ONCE(work->worker != worker);
1103
1104	/* Do not fight with another command that is canceling this work. */
1105	if (work->canceling)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106		goto out;
 
 
1107
1108	ret = __kthread_cancel_work(work, true, &flags);
1109fast_queue:
1110	__kthread_queue_delayed_work(worker, dwork, delay);
1111out:
1112	raw_spin_unlock_irqrestore(&worker->lock, flags);
1113	return ret;
1114}
1115EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1116
1117static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1118{
1119	struct kthread_worker *worker = work->worker;
1120	unsigned long flags;
1121	int ret = false;
1122
1123	if (!worker)
1124		goto out;
1125
1126	raw_spin_lock_irqsave(&worker->lock, flags);
1127	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1128	WARN_ON_ONCE(work->worker != worker);
1129
1130	ret = __kthread_cancel_work(work, is_dwork, &flags);
 
 
 
1131
1132	if (worker->current_work != work)
1133		goto out_fast;
1134
1135	/*
1136	 * The work is in progress and we need to wait with the lock released.
1137	 * In the meantime, block any queuing by setting the canceling counter.
1138	 */
1139	work->canceling++;
1140	raw_spin_unlock_irqrestore(&worker->lock, flags);
1141	kthread_flush_work(work);
1142	raw_spin_lock_irqsave(&worker->lock, flags);
1143	work->canceling--;
1144
1145out_fast:
1146	raw_spin_unlock_irqrestore(&worker->lock, flags);
1147out:
1148	return ret;
1149}
1150
1151/**
1152 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1153 * @work: the kthread work to cancel
1154 *
1155 * Cancel @work and wait for its execution to finish.  This function
1156 * can be used even if the work re-queues itself. On return from this
1157 * function, @work is guaranteed to be not pending or executing on any CPU.
1158 *
1159 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1160 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1161 *
1162 * The caller must ensure that the worker on which @work was last
1163 * queued can't be destroyed before this function returns.
1164 *
1165 * Return: %true if @work was pending, %false otherwise.
1166 */
1167bool kthread_cancel_work_sync(struct kthread_work *work)
1168{
1169	return __kthread_cancel_work_sync(work, false);
1170}
1171EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1172
1173/**
1174 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1175 *	wait for it to finish.
1176 * @dwork: the kthread delayed work to cancel
1177 *
1178 * This is kthread_cancel_work_sync() for delayed works.
1179 *
1180 * Return: %true if @dwork was pending, %false otherwise.
1181 */
1182bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1183{
1184	return __kthread_cancel_work_sync(&dwork->work, true);
1185}
1186EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1187
1188/**
1189 * kthread_flush_worker - flush all current works on a kthread_worker
1190 * @worker: worker to flush
1191 *
1192 * Wait until all currently executing or pending works on @worker are
1193 * finished.
1194 */
1195void kthread_flush_worker(struct kthread_worker *worker)
1196{
1197	struct kthread_flush_work fwork = {
1198		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1199		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1200	};
1201
1202	kthread_queue_work(worker, &fwork.work);
1203	wait_for_completion(&fwork.done);
1204}
1205EXPORT_SYMBOL_GPL(kthread_flush_worker);
1206
1207/**
1208 * kthread_destroy_worker - destroy a kthread worker
1209 * @worker: worker to be destroyed
1210 *
1211 * Flush and destroy @worker.  The simple flush is enough because the kthread
1212 * worker API is used only in trivial scenarios.  There are no multi-step state
1213 * machines needed.
1214 */
1215void kthread_destroy_worker(struct kthread_worker *worker)
1216{
1217	struct task_struct *task;
1218
1219	task = worker->task;
1220	if (WARN_ON(!task))
1221		return;
1222
1223	kthread_flush_worker(worker);
1224	kthread_stop(task);
1225	WARN_ON(!list_empty(&worker->work_list));
1226	kfree(worker);
1227}
1228EXPORT_SYMBOL(kthread_destroy_worker);
1229
1230/**
1231 * kthread_use_mm - make the calling kthread operate on an address space
1232 * @mm: address space to operate on
1233 */
1234void kthread_use_mm(struct mm_struct *mm)
1235{
1236	struct mm_struct *active_mm;
1237	struct task_struct *tsk = current;
1238
1239	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1240	WARN_ON_ONCE(tsk->mm);
1241
1242	task_lock(tsk);
1243	/* Hold off tlb flush IPIs while switching mm's */
1244	local_irq_disable();
1245	active_mm = tsk->active_mm;
1246	if (active_mm != mm) {
1247		mmgrab(mm);
1248		tsk->active_mm = mm;
1249	}
1250	tsk->mm = mm;
 
1251	switch_mm_irqs_off(active_mm, mm, tsk);
1252	local_irq_enable();
1253	task_unlock(tsk);
1254#ifdef finish_arch_post_lock_switch
1255	finish_arch_post_lock_switch();
1256#endif
1257
 
 
 
 
 
 
 
 
 
1258	if (active_mm != mm)
1259		mmdrop(active_mm);
 
 
1260
1261	to_kthread(tsk)->oldfs = force_uaccess_begin();
1262}
1263EXPORT_SYMBOL_GPL(kthread_use_mm);
1264
1265/**
1266 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1267 * @mm: address space to operate on
1268 */
1269void kthread_unuse_mm(struct mm_struct *mm)
1270{
1271	struct task_struct *tsk = current;
1272
1273	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1274	WARN_ON_ONCE(!tsk->mm);
1275
1276	force_uaccess_end(to_kthread(tsk)->oldfs);
1277
1278	task_lock(tsk);
 
 
 
 
 
 
 
 
1279	sync_mm_rss(mm);
1280	local_irq_disable();
1281	tsk->mm = NULL;
 
1282	/* active_mm is still 'mm' */
1283	enter_lazy_tlb(mm, tsk);
1284	local_irq_enable();
1285	task_unlock(tsk);
1286}
1287EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1288
1289#ifdef CONFIG_BLK_CGROUP
1290/**
1291 * kthread_associate_blkcg - associate blkcg to current kthread
1292 * @css: the cgroup info
1293 *
1294 * Current thread must be a kthread. The thread is running jobs on behalf of
1295 * other threads. In some cases, we expect the jobs attach cgroup info of
1296 * original threads instead of that of current thread. This function stores
1297 * original thread's cgroup info in current kthread context for later
1298 * retrieval.
1299 */
1300void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1301{
1302	struct kthread *kthread;
1303
1304	if (!(current->flags & PF_KTHREAD))
1305		return;
1306	kthread = to_kthread(current);
1307	if (!kthread)
1308		return;
1309
1310	if (kthread->blkcg_css) {
1311		css_put(kthread->blkcg_css);
1312		kthread->blkcg_css = NULL;
1313	}
1314	if (css) {
1315		css_get(css);
1316		kthread->blkcg_css = css;
1317	}
1318}
1319EXPORT_SYMBOL(kthread_associate_blkcg);
1320
1321/**
1322 * kthread_blkcg - get associated blkcg css of current kthread
1323 *
1324 * Current thread must be a kthread.
1325 */
1326struct cgroup_subsys_state *kthread_blkcg(void)
1327{
1328	struct kthread *kthread;
1329
1330	if (current->flags & PF_KTHREAD) {
1331		kthread = to_kthread(current);
1332		if (kthread)
1333			return kthread->blkcg_css;
1334	}
1335	return NULL;
1336}
1337EXPORT_SYMBOL(kthread_blkcg);
1338#endif