Linux Audio

Check our new training course

Loading...
v4.17
 
   1/* Kernel thread helper functions.
   2 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
 
   3 *
   4 * Creation is done via kthreadd, so that we get a clean environment
   5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
   6 * etc.).
   7 */
   8#include <uapi/linux/sched/types.h>
 
 
   9#include <linux/sched.h>
 
  10#include <linux/sched/task.h>
  11#include <linux/kthread.h>
  12#include <linux/completion.h>
  13#include <linux/err.h>
 
  14#include <linux/cpuset.h>
  15#include <linux/unistd.h>
  16#include <linux/file.h>
  17#include <linux/export.h>
  18#include <linux/mutex.h>
  19#include <linux/slab.h>
  20#include <linux/freezer.h>
  21#include <linux/ptrace.h>
  22#include <linux/uaccess.h>
 
 
  23#include <trace/events/sched.h>
  24
 
  25static DEFINE_SPINLOCK(kthread_create_lock);
  26static LIST_HEAD(kthread_create_list);
  27struct task_struct *kthreadd_task;
  28
  29struct kthread_create_info
  30{
  31	/* Information passed to kthread() from kthreadd. */
 
  32	int (*threadfn)(void *data);
  33	void *data;
  34	int node;
  35
  36	/* Result passed back to kthread_create() from kthreadd. */
  37	struct task_struct *result;
  38	struct completion *done;
  39
  40	struct list_head list;
  41};
  42
  43struct kthread {
  44	unsigned long flags;
  45	unsigned int cpu;
 
 
  46	void *data;
  47	struct completion parked;
  48	struct completion exited;
  49#ifdef CONFIG_BLK_CGROUP
  50	struct cgroup_subsys_state *blkcg_css;
  51#endif
 
 
  52};
  53
  54enum KTHREAD_BITS {
  55	KTHREAD_IS_PER_CPU = 0,
  56	KTHREAD_SHOULD_STOP,
  57	KTHREAD_SHOULD_PARK,
  58};
  59
  60static inline void set_kthread_struct(void *kthread)
  61{
  62	/*
  63	 * We abuse ->set_child_tid to avoid the new member and because it
  64	 * can't be wrongly copied by copy_process(). We also rely on fact
  65	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
  66	 */
  67	current->set_child_tid = (__force void __user *)kthread;
  68}
  69
  70static inline struct kthread *to_kthread(struct task_struct *k)
 
 
 
 
 
 
 
 
 
 
 
  71{
  72	WARN_ON(!(k->flags & PF_KTHREAD));
  73	return (__force void *)k->set_child_tid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  74}
  75
  76void free_kthread_struct(struct task_struct *k)
  77{
  78	struct kthread *kthread;
  79
  80	/*
  81	 * Can be NULL if this kthread was created by kernel_thread()
  82	 * or if kmalloc() in kthread() failed.
  83	 */
  84	kthread = to_kthread(k);
 
 
 
  85#ifdef CONFIG_BLK_CGROUP
  86	WARN_ON_ONCE(kthread && kthread->blkcg_css);
  87#endif
 
 
  88	kfree(kthread);
  89}
  90
  91/**
  92 * kthread_should_stop - should this kthread return now?
  93 *
  94 * When someone calls kthread_stop() on your kthread, it will be woken
  95 * and this will return true.  You should then return, and your return
  96 * value will be passed through to kthread_stop().
  97 */
  98bool kthread_should_stop(void)
  99{
 100	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
 101}
 102EXPORT_SYMBOL(kthread_should_stop);
 103
 
 
 
 
 
 104/**
 105 * kthread_should_park - should this kthread park now?
 106 *
 107 * When someone calls kthread_park() on your kthread, it will be woken
 108 * and this will return true.  You should then do the necessary
 109 * cleanup and call kthread_parkme()
 110 *
 111 * Similar to kthread_should_stop(), but this keeps the thread alive
 112 * and in a park position. kthread_unpark() "restarts" the thread and
 113 * calls the thread function again.
 114 */
 115bool kthread_should_park(void)
 116{
 117	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
 118}
 119EXPORT_SYMBOL_GPL(kthread_should_park);
 120
 
 
 
 
 
 
 
 
 
 
 121/**
 122 * kthread_freezable_should_stop - should this freezable kthread return now?
 123 * @was_frozen: optional out parameter, indicates whether %current was frozen
 124 *
 125 * kthread_should_stop() for freezable kthreads, which will enter
 126 * refrigerator if necessary.  This function is safe from kthread_stop() /
 127 * freezer deadlock and freezable kthreads should use this function instead
 128 * of calling try_to_freeze() directly.
 129 */
 130bool kthread_freezable_should_stop(bool *was_frozen)
 131{
 132	bool frozen = false;
 133
 134	might_sleep();
 135
 136	if (unlikely(freezing(current)))
 137		frozen = __refrigerator(true);
 138
 139	if (was_frozen)
 140		*was_frozen = frozen;
 141
 142	return kthread_should_stop();
 143}
 144EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
 145
 146/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 147 * kthread_data - return data value specified on kthread creation
 148 * @task: kthread task in question
 149 *
 150 * Return the data value specified when kthread @task was created.
 151 * The caller is responsible for ensuring the validity of @task when
 152 * calling this function.
 153 */
 154void *kthread_data(struct task_struct *task)
 155{
 156	return to_kthread(task)->data;
 157}
 
 158
 159/**
 160 * kthread_probe_data - speculative version of kthread_data()
 161 * @task: possible kthread task in question
 162 *
 163 * @task could be a kthread task.  Return the data value specified when it
 164 * was created if accessible.  If @task isn't a kthread task or its data is
 165 * inaccessible for any reason, %NULL is returned.  This function requires
 166 * that @task itself is safe to dereference.
 167 */
 168void *kthread_probe_data(struct task_struct *task)
 169{
 170	struct kthread *kthread = to_kthread(task);
 171	void *data = NULL;
 172
 173	probe_kernel_read(&data, &kthread->data, sizeof(data));
 
 174	return data;
 175}
 176
 177static void __kthread_parkme(struct kthread *self)
 178{
 179	for (;;) {
 180		set_current_state(TASK_PARKED);
 
 
 
 
 
 
 
 
 
 181		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
 182			break;
 183		schedule();
 
 
 
 
 
 
 
 
 
 184	}
 185	__set_current_state(TASK_RUNNING);
 186}
 187
 188void kthread_parkme(void)
 189{
 190	__kthread_parkme(to_kthread(current));
 191}
 192EXPORT_SYMBOL_GPL(kthread_parkme);
 193
 194void kthread_park_complete(struct task_struct *k)
 
 
 
 
 
 
 
 
 
 
 195{
 196	complete_all(&to_kthread(k)->parked);
 
 
 197}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 198
 199static int kthread(void *_create)
 200{
 
 201	/* Copy data: it's on kthread's stack */
 202	struct kthread_create_info *create = _create;
 203	int (*threadfn)(void *data) = create->threadfn;
 204	void *data = create->data;
 205	struct completion *done;
 206	struct kthread *self;
 207	int ret;
 208
 209	self = kzalloc(sizeof(*self), GFP_KERNEL);
 210	set_kthread_struct(self);
 211
 212	/* If user was SIGKILLed, I release the structure. */
 213	done = xchg(&create->done, NULL);
 214	if (!done) {
 
 215		kfree(create);
 216		do_exit(-EINTR);
 217	}
 218
 219	if (!self) {
 220		create->result = ERR_PTR(-ENOMEM);
 221		complete(done);
 222		do_exit(-ENOMEM);
 223	}
 224
 
 
 225	self->data = data;
 226	init_completion(&self->exited);
 227	init_completion(&self->parked);
 228	current->vfork_done = &self->exited;
 
 
 
 
 229
 230	/* OK, tell user we're spawned, wait for stop or wakeup */
 231	__set_current_state(TASK_UNINTERRUPTIBLE);
 232	create->result = current;
 
 
 
 
 
 233	complete(done);
 234	schedule();
 
 235
 236	ret = -EINTR;
 237	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
 238		cgroup_kthread_ready();
 239		__kthread_parkme(self);
 240		ret = threadfn(data);
 241	}
 242	do_exit(ret);
 243}
 244
 245/* called from do_fork() to get node information for about to be created task */
 246int tsk_fork_get_node(struct task_struct *tsk)
 247{
 248#ifdef CONFIG_NUMA
 249	if (tsk == kthreadd_task)
 250		return tsk->pref_node_fork;
 251#endif
 252	return NUMA_NO_NODE;
 253}
 254
 255static void create_kthread(struct kthread_create_info *create)
 256{
 257	int pid;
 258
 259#ifdef CONFIG_NUMA
 260	current->pref_node_fork = create->node;
 261#endif
 262	/* We want our own signal handler (we take no signals by default). */
 263	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
 
 264	if (pid < 0) {
 265		/* If user was SIGKILLed, I release the structure. */
 266		struct completion *done = xchg(&create->done, NULL);
 267
 
 268		if (!done) {
 269			kfree(create);
 270			return;
 271		}
 272		create->result = ERR_PTR(pid);
 273		complete(done);
 274	}
 275}
 276
 277static __printf(4, 0)
 278struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 279						    void *data, int node,
 280						    const char namefmt[],
 281						    va_list args)
 282{
 283	DECLARE_COMPLETION_ONSTACK(done);
 284	struct task_struct *task;
 285	struct kthread_create_info *create = kmalloc(sizeof(*create),
 286						     GFP_KERNEL);
 287
 288	if (!create)
 289		return ERR_PTR(-ENOMEM);
 290	create->threadfn = threadfn;
 291	create->data = data;
 292	create->node = node;
 293	create->done = &done;
 
 
 
 
 
 294
 295	spin_lock(&kthread_create_lock);
 296	list_add_tail(&create->list, &kthread_create_list);
 297	spin_unlock(&kthread_create_lock);
 298
 299	wake_up_process(kthreadd_task);
 300	/*
 301	 * Wait for completion in killable state, for I might be chosen by
 302	 * the OOM killer while kthreadd is trying to allocate memory for
 303	 * new kernel thread.
 304	 */
 305	if (unlikely(wait_for_completion_killable(&done))) {
 306		/*
 307		 * If I was SIGKILLed before kthreadd (or new kernel thread)
 308		 * calls complete(), leave the cleanup of this structure to
 309		 * that thread.
 310		 */
 311		if (xchg(&create->done, NULL))
 312			return ERR_PTR(-EINTR);
 313		/*
 314		 * kthreadd (or new kernel thread) will call complete()
 315		 * shortly.
 316		 */
 317		wait_for_completion(&done);
 318	}
 319	task = create->result;
 320	if (!IS_ERR(task)) {
 321		static const struct sched_param param = { .sched_priority = 0 };
 322
 323		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
 324		/*
 325		 * root may have changed our (kthreadd's) priority or CPU mask.
 326		 * The kernel thread should not inherit these properties.
 327		 */
 328		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
 329		set_cpus_allowed_ptr(task, cpu_all_mask);
 330	}
 331	kfree(create);
 332	return task;
 333}
 334
 335/**
 336 * kthread_create_on_node - create a kthread.
 337 * @threadfn: the function to run until signal_pending(current).
 338 * @data: data ptr for @threadfn.
 339 * @node: task and thread structures for the thread are allocated on this node
 340 * @namefmt: printf-style name for the thread.
 341 *
 342 * Description: This helper function creates and names a kernel
 343 * thread.  The thread will be stopped: use wake_up_process() to start
 344 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 345 * is affine to all CPUs.
 346 *
 347 * If thread is going to be bound on a particular cpu, give its node
 348 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 349 * When woken, the thread will run @threadfn() with @data as its
 350 * argument. @threadfn() can either call do_exit() directly if it is a
 351 * standalone thread for which no one will call kthread_stop(), or
 352 * return when 'kthread_should_stop()' is true (which means
 353 * kthread_stop() has been called).  The return value should be zero
 354 * or a negative error number; it will be passed to kthread_stop().
 355 *
 356 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 357 */
 358struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 359					   void *data, int node,
 360					   const char namefmt[],
 361					   ...)
 362{
 363	struct task_struct *task;
 364	va_list args;
 365
 366	va_start(args, namefmt);
 367	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
 368	va_end(args);
 369
 370	return task;
 371}
 372EXPORT_SYMBOL(kthread_create_on_node);
 373
 374static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
 375{
 376	unsigned long flags;
 377
 378	if (!wait_task_inactive(p, state)) {
 379		WARN_ON(1);
 380		return;
 381	}
 382
 383	/* It's safe because the task is inactive. */
 384	raw_spin_lock_irqsave(&p->pi_lock, flags);
 385	do_set_cpus_allowed(p, mask);
 386	p->flags |= PF_NO_SETAFFINITY;
 387	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 388}
 389
 390static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
 391{
 392	__kthread_bind_mask(p, cpumask_of(cpu), state);
 393}
 394
 395void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
 396{
 397	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 398}
 399
 400/**
 401 * kthread_bind - bind a just-created kthread to a cpu.
 402 * @p: thread created by kthread_create().
 403 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 404 *
 405 * Description: This function is equivalent to set_cpus_allowed(),
 406 * except that @cpu doesn't need to be online, and the thread must be
 407 * stopped (i.e., just returned from kthread_create()).
 408 */
 409void kthread_bind(struct task_struct *p, unsigned int cpu)
 410{
 411	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 412}
 413EXPORT_SYMBOL(kthread_bind);
 414
 415/**
 416 * kthread_create_on_cpu - Create a cpu bound kthread
 417 * @threadfn: the function to run until signal_pending(current).
 418 * @data: data ptr for @threadfn.
 419 * @cpu: The cpu on which the thread should be bound,
 420 * @namefmt: printf-style name for the thread. Format is restricted
 421 *	     to "name.*%u". Code fills in cpu number.
 422 *
 423 * Description: This helper function creates and names a kernel thread
 424 * The thread will be woken and put into park mode.
 425 */
 426struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 427					  void *data, unsigned int cpu,
 428					  const char *namefmt)
 429{
 430	struct task_struct *p;
 431
 432	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
 433				   cpu);
 434	if (IS_ERR(p))
 435		return p;
 436	kthread_bind(p, cpu);
 437	/* CPU hotplug need to bind once again when unparking the thread. */
 438	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
 439	to_kthread(p)->cpu = cpu;
 440	return p;
 441}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 442
 443/**
 444 * kthread_unpark - unpark a thread created by kthread_create().
 445 * @k:		thread created by kthread_create().
 446 *
 447 * Sets kthread_should_park() for @k to return false, wakes it, and
 448 * waits for it to return. If the thread is marked percpu then its
 449 * bound to the cpu again.
 450 */
 451void kthread_unpark(struct task_struct *k)
 452{
 453	struct kthread *kthread = to_kthread(k);
 454
 
 
 455	/*
 456	 * Newly created kthread was parked when the CPU was offline.
 457	 * The binding was lost and we need to set it again.
 458	 */
 459	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
 460		__kthread_bind(k, kthread->cpu, TASK_PARKED);
 461
 462	reinit_completion(&kthread->parked);
 463	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 
 
 
 464	wake_up_state(k, TASK_PARKED);
 465}
 466EXPORT_SYMBOL_GPL(kthread_unpark);
 467
 468/**
 469 * kthread_park - park a thread created by kthread_create().
 470 * @k: thread created by kthread_create().
 471 *
 472 * Sets kthread_should_park() for @k to return true, wakes it, and
 473 * waits for it to return. This can also be called after kthread_create()
 474 * instead of calling wake_up_process(): the thread will park without
 475 * calling threadfn().
 476 *
 477 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 478 * If called by the kthread itself just the park bit is set.
 479 */
 480int kthread_park(struct task_struct *k)
 481{
 482	struct kthread *kthread = to_kthread(k);
 483
 484	if (WARN_ON(k->flags & PF_EXITING))
 485		return -ENOSYS;
 486
 
 
 
 487	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 488	if (k != current) {
 489		wake_up_process(k);
 
 
 
 
 490		wait_for_completion(&kthread->parked);
 
 
 
 
 
 491	}
 492
 493	return 0;
 494}
 495EXPORT_SYMBOL_GPL(kthread_park);
 496
 497/**
 498 * kthread_stop - stop a thread created by kthread_create().
 499 * @k: thread created by kthread_create().
 500 *
 501 * Sets kthread_should_stop() for @k to return true, wakes it, and
 502 * waits for it to exit. This can also be called after kthread_create()
 503 * instead of calling wake_up_process(): the thread will exit without
 504 * calling threadfn().
 505 *
 506 * If threadfn() may call do_exit() itself, the caller must ensure
 507 * task_struct can't go away.
 508 *
 509 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 510 * was never called.
 511 */
 512int kthread_stop(struct task_struct *k)
 513{
 514	struct kthread *kthread;
 515	int ret;
 516
 517	trace_sched_kthread_stop(k);
 518
 519	get_task_struct(k);
 520	kthread = to_kthread(k);
 521	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
 522	kthread_unpark(k);
 
 523	wake_up_process(k);
 524	wait_for_completion(&kthread->exited);
 525	ret = k->exit_code;
 526	put_task_struct(k);
 527
 528	trace_sched_kthread_stop_ret(ret);
 529	return ret;
 530}
 531EXPORT_SYMBOL(kthread_stop);
 532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 533int kthreadd(void *unused)
 534{
 535	struct task_struct *tsk = current;
 536
 537	/* Setup a clean context for our children to inherit. */
 538	set_task_comm(tsk, "kthreadd");
 539	ignore_signals(tsk);
 540	set_cpus_allowed_ptr(tsk, cpu_all_mask);
 541	set_mems_allowed(node_states[N_MEMORY]);
 542
 543	current->flags |= PF_NOFREEZE;
 544	cgroup_init_kthreadd();
 545
 546	for (;;) {
 547		set_current_state(TASK_INTERRUPTIBLE);
 548		if (list_empty(&kthread_create_list))
 549			schedule();
 550		__set_current_state(TASK_RUNNING);
 551
 552		spin_lock(&kthread_create_lock);
 553		while (!list_empty(&kthread_create_list)) {
 554			struct kthread_create_info *create;
 555
 556			create = list_entry(kthread_create_list.next,
 557					    struct kthread_create_info, list);
 558			list_del_init(&create->list);
 559			spin_unlock(&kthread_create_lock);
 560
 561			create_kthread(create);
 562
 563			spin_lock(&kthread_create_lock);
 564		}
 565		spin_unlock(&kthread_create_lock);
 566	}
 567
 568	return 0;
 569}
 570
 571void __kthread_init_worker(struct kthread_worker *worker,
 572				const char *name,
 573				struct lock_class_key *key)
 574{
 575	memset(worker, 0, sizeof(struct kthread_worker));
 576	spin_lock_init(&worker->lock);
 577	lockdep_set_class_and_name(&worker->lock, key, name);
 578	INIT_LIST_HEAD(&worker->work_list);
 579	INIT_LIST_HEAD(&worker->delayed_work_list);
 580}
 581EXPORT_SYMBOL_GPL(__kthread_init_worker);
 582
 583/**
 584 * kthread_worker_fn - kthread function to process kthread_worker
 585 * @worker_ptr: pointer to initialized kthread_worker
 586 *
 587 * This function implements the main cycle of kthread worker. It processes
 588 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 589 * is empty.
 590 *
 591 * The works are not allowed to keep any locks, disable preemption or interrupts
 592 * when they finish. There is defined a safe point for freezing when one work
 593 * finishes and before a new one is started.
 594 *
 595 * Also the works must not be handled by more than one worker at the same time,
 596 * see also kthread_queue_work().
 597 */
 598int kthread_worker_fn(void *worker_ptr)
 599{
 600	struct kthread_worker *worker = worker_ptr;
 601	struct kthread_work *work;
 602
 603	/*
 604	 * FIXME: Update the check and remove the assignment when all kthread
 605	 * worker users are created using kthread_create_worker*() functions.
 606	 */
 607	WARN_ON(worker->task && worker->task != current);
 608	worker->task = current;
 609
 610	if (worker->flags & KTW_FREEZABLE)
 611		set_freezable();
 612
 613repeat:
 614	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
 615
 616	if (kthread_should_stop()) {
 617		__set_current_state(TASK_RUNNING);
 618		spin_lock_irq(&worker->lock);
 619		worker->task = NULL;
 620		spin_unlock_irq(&worker->lock);
 621		return 0;
 622	}
 623
 624	work = NULL;
 625	spin_lock_irq(&worker->lock);
 626	if (!list_empty(&worker->work_list)) {
 627		work = list_first_entry(&worker->work_list,
 628					struct kthread_work, node);
 629		list_del_init(&work->node);
 630	}
 631	worker->current_work = work;
 632	spin_unlock_irq(&worker->lock);
 633
 634	if (work) {
 
 635		__set_current_state(TASK_RUNNING);
 
 636		work->func(work);
 637	} else if (!freezing(current))
 
 
 
 
 
 638		schedule();
 
 
 
 
 
 
 
 
 639
 640	try_to_freeze();
 641	cond_resched();
 642	goto repeat;
 643}
 644EXPORT_SYMBOL_GPL(kthread_worker_fn);
 645
 646static __printf(3, 0) struct kthread_worker *
 647__kthread_create_worker(int cpu, unsigned int flags,
 648			const char namefmt[], va_list args)
 649{
 650	struct kthread_worker *worker;
 651	struct task_struct *task;
 652	int node = -1;
 653
 654	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
 655	if (!worker)
 656		return ERR_PTR(-ENOMEM);
 657
 658	kthread_init_worker(worker);
 659
 660	if (cpu >= 0)
 661		node = cpu_to_node(cpu);
 662
 663	task = __kthread_create_on_node(kthread_worker_fn, worker,
 664						node, namefmt, args);
 665	if (IS_ERR(task))
 666		goto fail_task;
 667
 668	if (cpu >= 0)
 669		kthread_bind(task, cpu);
 670
 671	worker->flags = flags;
 672	worker->task = task;
 673	wake_up_process(task);
 674	return worker;
 675
 676fail_task:
 677	kfree(worker);
 678	return ERR_CAST(task);
 679}
 680
 681/**
 682 * kthread_create_worker - create a kthread worker
 683 * @flags: flags modifying the default behavior of the worker
 684 * @namefmt: printf-style name for the kthread worker (task).
 685 *
 686 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 687 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 688 * when the worker was SIGKILLed.
 689 */
 690struct kthread_worker *
 691kthread_create_worker(unsigned int flags, const char namefmt[], ...)
 692{
 693	struct kthread_worker *worker;
 694	va_list args;
 695
 696	va_start(args, namefmt);
 697	worker = __kthread_create_worker(-1, flags, namefmt, args);
 698	va_end(args);
 699
 700	return worker;
 701}
 702EXPORT_SYMBOL(kthread_create_worker);
 703
 704/**
 705 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 706 *	it to a given CPU and the associated NUMA node.
 707 * @cpu: CPU number
 708 * @flags: flags modifying the default behavior of the worker
 709 * @namefmt: printf-style name for the kthread worker (task).
 710 *
 711 * Use a valid CPU number if you want to bind the kthread worker
 712 * to the given CPU and the associated NUMA node.
 713 *
 714 * A good practice is to add the cpu number also into the worker name.
 715 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 716 *
 717 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 719 * when the worker was SIGKILLed.
 720 */
 721struct kthread_worker *
 722kthread_create_worker_on_cpu(int cpu, unsigned int flags,
 723			     const char namefmt[], ...)
 724{
 725	struct kthread_worker *worker;
 726	va_list args;
 727
 728	va_start(args, namefmt);
 729	worker = __kthread_create_worker(cpu, flags, namefmt, args);
 730	va_end(args);
 731
 732	return worker;
 733}
 734EXPORT_SYMBOL(kthread_create_worker_on_cpu);
 735
 736/*
 737 * Returns true when the work could not be queued at the moment.
 738 * It happens when it is already pending in a worker list
 739 * or when it is being cancelled.
 740 */
 741static inline bool queuing_blocked(struct kthread_worker *worker,
 742				   struct kthread_work *work)
 743{
 744	lockdep_assert_held(&worker->lock);
 745
 746	return !list_empty(&work->node) || work->canceling;
 747}
 748
 749static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
 750					     struct kthread_work *work)
 751{
 752	lockdep_assert_held(&worker->lock);
 753	WARN_ON_ONCE(!list_empty(&work->node));
 754	/* Do not use a work with >1 worker, see kthread_queue_work() */
 755	WARN_ON_ONCE(work->worker && work->worker != worker);
 756}
 757
 758/* insert @work before @pos in @worker */
 759static void kthread_insert_work(struct kthread_worker *worker,
 760				struct kthread_work *work,
 761				struct list_head *pos)
 762{
 763	kthread_insert_work_sanity_check(worker, work);
 764
 
 
 765	list_add_tail(&work->node, pos);
 766	work->worker = worker;
 767	if (!worker->current_work && likely(worker->task))
 768		wake_up_process(worker->task);
 769}
 770
 771/**
 772 * kthread_queue_work - queue a kthread_work
 773 * @worker: target kthread_worker
 774 * @work: kthread_work to queue
 775 *
 776 * Queue @work to work processor @task for async execution.  @task
 777 * must have been created with kthread_worker_create().  Returns %true
 778 * if @work was successfully queued, %false if it was already pending.
 779 *
 780 * Reinitialize the work if it needs to be used by another worker.
 781 * For example, when the worker was stopped and started again.
 782 */
 783bool kthread_queue_work(struct kthread_worker *worker,
 784			struct kthread_work *work)
 785{
 786	bool ret = false;
 787	unsigned long flags;
 788
 789	spin_lock_irqsave(&worker->lock, flags);
 790	if (!queuing_blocked(worker, work)) {
 791		kthread_insert_work(worker, work, &worker->work_list);
 792		ret = true;
 793	}
 794	spin_unlock_irqrestore(&worker->lock, flags);
 795	return ret;
 796}
 797EXPORT_SYMBOL_GPL(kthread_queue_work);
 798
 799/**
 800 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
 801 *	delayed work when the timer expires.
 802 * @t: pointer to the expired timer
 803 *
 804 * The format of the function is defined by struct timer_list.
 805 * It should have been called from irqsafe timer with irq already off.
 806 */
 807void kthread_delayed_work_timer_fn(struct timer_list *t)
 808{
 809	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
 810	struct kthread_work *work = &dwork->work;
 811	struct kthread_worker *worker = work->worker;
 
 812
 813	/*
 814	 * This might happen when a pending work is reinitialized.
 815	 * It means that it is used a wrong way.
 816	 */
 817	if (WARN_ON_ONCE(!worker))
 818		return;
 819
 820	spin_lock(&worker->lock);
 821	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 822	WARN_ON_ONCE(work->worker != worker);
 823
 824	/* Move the work from worker->delayed_work_list. */
 825	WARN_ON_ONCE(list_empty(&work->node));
 826	list_del_init(&work->node);
 827	kthread_insert_work(worker, work, &worker->work_list);
 
 828
 829	spin_unlock(&worker->lock);
 830}
 831EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 832
 833void __kthread_queue_delayed_work(struct kthread_worker *worker,
 834				  struct kthread_delayed_work *dwork,
 835				  unsigned long delay)
 836{
 837	struct timer_list *timer = &dwork->timer;
 838	struct kthread_work *work = &dwork->work;
 839
 840	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
 841
 842	/*
 843	 * If @delay is 0, queue @dwork->work immediately.  This is for
 844	 * both optimization and correctness.  The earliest @timer can
 845	 * expire is on the closest next tick and delayed_work users depend
 846	 * on that there's no such delay when @delay is 0.
 847	 */
 848	if (!delay) {
 849		kthread_insert_work(worker, work, &worker->work_list);
 850		return;
 851	}
 852
 853	/* Be paranoid and try to detect possible races already now. */
 854	kthread_insert_work_sanity_check(worker, work);
 855
 856	list_add(&work->node, &worker->delayed_work_list);
 857	work->worker = worker;
 858	timer->expires = jiffies + delay;
 859	add_timer(timer);
 860}
 861
 862/**
 863 * kthread_queue_delayed_work - queue the associated kthread work
 864 *	after a delay.
 865 * @worker: target kthread_worker
 866 * @dwork: kthread_delayed_work to queue
 867 * @delay: number of jiffies to wait before queuing
 868 *
 869 * If the work has not been pending it starts a timer that will queue
 870 * the work after the given @delay. If @delay is zero, it queues the
 871 * work immediately.
 872 *
 873 * Return: %false if the @work has already been pending. It means that
 874 * either the timer was running or the work was queued. It returns %true
 875 * otherwise.
 876 */
 877bool kthread_queue_delayed_work(struct kthread_worker *worker,
 878				struct kthread_delayed_work *dwork,
 879				unsigned long delay)
 880{
 881	struct kthread_work *work = &dwork->work;
 882	unsigned long flags;
 883	bool ret = false;
 884
 885	spin_lock_irqsave(&worker->lock, flags);
 886
 887	if (!queuing_blocked(worker, work)) {
 888		__kthread_queue_delayed_work(worker, dwork, delay);
 889		ret = true;
 890	}
 891
 892	spin_unlock_irqrestore(&worker->lock, flags);
 893	return ret;
 894}
 895EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
 896
 897struct kthread_flush_work {
 898	struct kthread_work	work;
 899	struct completion	done;
 900};
 901
 902static void kthread_flush_work_fn(struct kthread_work *work)
 903{
 904	struct kthread_flush_work *fwork =
 905		container_of(work, struct kthread_flush_work, work);
 906	complete(&fwork->done);
 907}
 908
 909/**
 910 * kthread_flush_work - flush a kthread_work
 911 * @work: work to flush
 912 *
 913 * If @work is queued or executing, wait for it to finish execution.
 914 */
 915void kthread_flush_work(struct kthread_work *work)
 916{
 917	struct kthread_flush_work fwork = {
 918		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
 919		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
 920	};
 921	struct kthread_worker *worker;
 922	bool noop = false;
 923
 924	worker = work->worker;
 925	if (!worker)
 926		return;
 927
 928	spin_lock_irq(&worker->lock);
 929	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 930	WARN_ON_ONCE(work->worker != worker);
 931
 932	if (!list_empty(&work->node))
 933		kthread_insert_work(worker, &fwork.work, work->node.next);
 934	else if (worker->current_work == work)
 935		kthread_insert_work(worker, &fwork.work,
 936				    worker->work_list.next);
 937	else
 938		noop = true;
 939
 940	spin_unlock_irq(&worker->lock);
 941
 942	if (!noop)
 943		wait_for_completion(&fwork.done);
 944}
 945EXPORT_SYMBOL_GPL(kthread_flush_work);
 946
 947/*
 948 * This function removes the work from the worker queue. Also it makes sure
 949 * that it won't get queued later via the delayed work's timer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 950 *
 951 * The work might still be in use when this function finishes. See the
 952 * current_work proceed by the worker.
 953 *
 954 * Return: %true if @work was pending and successfully canceled,
 955 *	%false if @work was not pending
 956 */
 957static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
 958				  unsigned long *flags)
 959{
 960	/* Try to cancel the timer if exists. */
 961	if (is_dwork) {
 962		struct kthread_delayed_work *dwork =
 963			container_of(work, struct kthread_delayed_work, work);
 964		struct kthread_worker *worker = work->worker;
 965
 966		/*
 967		 * del_timer_sync() must be called to make sure that the timer
 968		 * callback is not running. The lock must be temporary released
 969		 * to avoid a deadlock with the callback. In the meantime,
 970		 * any queuing is blocked by setting the canceling counter.
 971		 */
 972		work->canceling++;
 973		spin_unlock_irqrestore(&worker->lock, *flags);
 974		del_timer_sync(&dwork->timer);
 975		spin_lock_irqsave(&worker->lock, *flags);
 976		work->canceling--;
 977	}
 978
 979	/*
 980	 * Try to remove the work from a worker list. It might either
 981	 * be from worker->work_list or from worker->delayed_work_list.
 982	 */
 983	if (!list_empty(&work->node)) {
 984		list_del_init(&work->node);
 985		return true;
 986	}
 987
 988	return false;
 989}
 990
 991/**
 992 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
 993 * @worker: kthread worker to use
 994 * @dwork: kthread delayed work to queue
 995 * @delay: number of jiffies to wait before queuing
 996 *
 997 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
 998 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
 999 * @work is guaranteed to be queued immediately.
1000 *
1001 * Return: %true if @dwork was pending and its timer was modified,
1002 * %false otherwise.
1003 *
1004 * A special case is when the work is being canceled in parallel.
1005 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1006 * or yet another kthread_mod_delayed_work() call. We let the other command
1007 * win and return %false here. The caller is supposed to synchronize these
1008 * operations a reasonable way.
 
1009 *
1010 * This function is safe to call from any context including IRQ handler.
1011 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1012 * for details.
1013 */
1014bool kthread_mod_delayed_work(struct kthread_worker *worker,
1015			      struct kthread_delayed_work *dwork,
1016			      unsigned long delay)
1017{
1018	struct kthread_work *work = &dwork->work;
1019	unsigned long flags;
1020	int ret = false;
1021
1022	spin_lock_irqsave(&worker->lock, flags);
1023
1024	/* Do not bother with canceling when never queued. */
1025	if (!work->worker)
 
1026		goto fast_queue;
 
1027
1028	/* Work must not be used with >1 worker, see kthread_queue_work() */
1029	WARN_ON_ONCE(work->worker != worker);
1030
1031	/* Do not fight with another command that is canceling this work. */
1032	if (work->canceling)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1033		goto out;
 
 
1034
1035	ret = __kthread_cancel_work(work, true, &flags);
1036fast_queue:
1037	__kthread_queue_delayed_work(worker, dwork, delay);
1038out:
1039	spin_unlock_irqrestore(&worker->lock, flags);
1040	return ret;
1041}
1042EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1043
1044static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1045{
1046	struct kthread_worker *worker = work->worker;
1047	unsigned long flags;
1048	int ret = false;
1049
1050	if (!worker)
1051		goto out;
1052
1053	spin_lock_irqsave(&worker->lock, flags);
1054	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1055	WARN_ON_ONCE(work->worker != worker);
1056
1057	ret = __kthread_cancel_work(work, is_dwork, &flags);
 
 
 
1058
1059	if (worker->current_work != work)
1060		goto out_fast;
1061
1062	/*
1063	 * The work is in progress and we need to wait with the lock released.
1064	 * In the meantime, block any queuing by setting the canceling counter.
1065	 */
1066	work->canceling++;
1067	spin_unlock_irqrestore(&worker->lock, flags);
1068	kthread_flush_work(work);
1069	spin_lock_irqsave(&worker->lock, flags);
1070	work->canceling--;
1071
1072out_fast:
1073	spin_unlock_irqrestore(&worker->lock, flags);
1074out:
1075	return ret;
1076}
1077
1078/**
1079 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1080 * @work: the kthread work to cancel
1081 *
1082 * Cancel @work and wait for its execution to finish.  This function
1083 * can be used even if the work re-queues itself. On return from this
1084 * function, @work is guaranteed to be not pending or executing on any CPU.
1085 *
1086 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1087 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1088 *
1089 * The caller must ensure that the worker on which @work was last
1090 * queued can't be destroyed before this function returns.
1091 *
1092 * Return: %true if @work was pending, %false otherwise.
1093 */
1094bool kthread_cancel_work_sync(struct kthread_work *work)
1095{
1096	return __kthread_cancel_work_sync(work, false);
1097}
1098EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1099
1100/**
1101 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1102 *	wait for it to finish.
1103 * @dwork: the kthread delayed work to cancel
1104 *
1105 * This is kthread_cancel_work_sync() for delayed works.
1106 *
1107 * Return: %true if @dwork was pending, %false otherwise.
1108 */
1109bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1110{
1111	return __kthread_cancel_work_sync(&dwork->work, true);
1112}
1113EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1114
1115/**
1116 * kthread_flush_worker - flush all current works on a kthread_worker
1117 * @worker: worker to flush
1118 *
1119 * Wait until all currently executing or pending works on @worker are
1120 * finished.
1121 */
1122void kthread_flush_worker(struct kthread_worker *worker)
1123{
1124	struct kthread_flush_work fwork = {
1125		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1126		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1127	};
1128
1129	kthread_queue_work(worker, &fwork.work);
1130	wait_for_completion(&fwork.done);
1131}
1132EXPORT_SYMBOL_GPL(kthread_flush_worker);
1133
1134/**
1135 * kthread_destroy_worker - destroy a kthread worker
1136 * @worker: worker to be destroyed
1137 *
1138 * Flush and destroy @worker.  The simple flush is enough because the kthread
1139 * worker API is used only in trivial scenarios.  There are no multi-step state
1140 * machines needed.
 
 
 
 
1141 */
1142void kthread_destroy_worker(struct kthread_worker *worker)
1143{
1144	struct task_struct *task;
1145
1146	task = worker->task;
1147	if (WARN_ON(!task))
1148		return;
1149
1150	kthread_flush_worker(worker);
1151	kthread_stop(task);
 
1152	WARN_ON(!list_empty(&worker->work_list));
1153	kfree(worker);
1154}
1155EXPORT_SYMBOL(kthread_destroy_worker);
1156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1157#ifdef CONFIG_BLK_CGROUP
1158/**
1159 * kthread_associate_blkcg - associate blkcg to current kthread
1160 * @css: the cgroup info
1161 *
1162 * Current thread must be a kthread. The thread is running jobs on behalf of
1163 * other threads. In some cases, we expect the jobs attach cgroup info of
1164 * original threads instead of that of current thread. This function stores
1165 * original thread's cgroup info in current kthread context for later
1166 * retrieval.
1167 */
1168void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1169{
1170	struct kthread *kthread;
1171
1172	if (!(current->flags & PF_KTHREAD))
1173		return;
1174	kthread = to_kthread(current);
1175	if (!kthread)
1176		return;
1177
1178	if (kthread->blkcg_css) {
1179		css_put(kthread->blkcg_css);
1180		kthread->blkcg_css = NULL;
1181	}
1182	if (css) {
1183		css_get(css);
1184		kthread->blkcg_css = css;
1185	}
1186}
1187EXPORT_SYMBOL(kthread_associate_blkcg);
1188
1189/**
1190 * kthread_blkcg - get associated blkcg css of current kthread
1191 *
1192 * Current thread must be a kthread.
1193 */
1194struct cgroup_subsys_state *kthread_blkcg(void)
1195{
1196	struct kthread *kthread;
1197
1198	if (current->flags & PF_KTHREAD) {
1199		kthread = to_kthread(current);
1200		if (kthread)
1201			return kthread->blkcg_css;
1202	}
1203	return NULL;
1204}
1205EXPORT_SYMBOL(kthread_blkcg);
1206#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Kernel thread helper functions.
   3 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
   4 *   Copyright (C) 2009 Red Hat, Inc.
   5 *
   6 * Creation is done via kthreadd, so that we get a clean environment
   7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
   8 * etc.).
   9 */
  10#include <uapi/linux/sched/types.h>
  11#include <linux/mm.h>
  12#include <linux/mmu_context.h>
  13#include <linux/sched.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/task.h>
  16#include <linux/kthread.h>
  17#include <linux/completion.h>
  18#include <linux/err.h>
  19#include <linux/cgroup.h>
  20#include <linux/cpuset.h>
  21#include <linux/unistd.h>
  22#include <linux/file.h>
  23#include <linux/export.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <linux/freezer.h>
  27#include <linux/ptrace.h>
  28#include <linux/uaccess.h>
  29#include <linux/numa.h>
  30#include <linux/sched/isolation.h>
  31#include <trace/events/sched.h>
  32
  33
  34static DEFINE_SPINLOCK(kthread_create_lock);
  35static LIST_HEAD(kthread_create_list);
  36struct task_struct *kthreadd_task;
  37
  38struct kthread_create_info
  39{
  40	/* Information passed to kthread() from kthreadd. */
  41	char *full_name;
  42	int (*threadfn)(void *data);
  43	void *data;
  44	int node;
  45
  46	/* Result passed back to kthread_create() from kthreadd. */
  47	struct task_struct *result;
  48	struct completion *done;
  49
  50	struct list_head list;
  51};
  52
  53struct kthread {
  54	unsigned long flags;
  55	unsigned int cpu;
  56	int result;
  57	int (*threadfn)(void *);
  58	void *data;
  59	struct completion parked;
  60	struct completion exited;
  61#ifdef CONFIG_BLK_CGROUP
  62	struct cgroup_subsys_state *blkcg_css;
  63#endif
  64	/* To store the full name if task comm is truncated. */
  65	char *full_name;
  66};
  67
  68enum KTHREAD_BITS {
  69	KTHREAD_IS_PER_CPU = 0,
  70	KTHREAD_SHOULD_STOP,
  71	KTHREAD_SHOULD_PARK,
  72};
  73
  74static inline struct kthread *to_kthread(struct task_struct *k)
  75{
  76	WARN_ON(!(k->flags & PF_KTHREAD));
  77	return k->worker_private;
 
 
 
 
  78}
  79
  80/*
  81 * Variant of to_kthread() that doesn't assume @p is a kthread.
  82 *
  83 * Per construction; when:
  84 *
  85 *   (p->flags & PF_KTHREAD) && p->worker_private
  86 *
  87 * the task is both a kthread and struct kthread is persistent. However
  88 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
  89 * begin_new_exec()).
  90 */
  91static inline struct kthread *__to_kthread(struct task_struct *p)
  92{
  93	void *kthread = p->worker_private;
  94	if (kthread && !(p->flags & PF_KTHREAD))
  95		kthread = NULL;
  96	return kthread;
  97}
  98
  99void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
 100{
 101	struct kthread *kthread = to_kthread(tsk);
 102
 103	if (!kthread || !kthread->full_name) {
 104		strscpy(buf, tsk->comm, buf_size);
 105		return;
 106	}
 107
 108	strscpy_pad(buf, kthread->full_name, buf_size);
 109}
 110
 111bool set_kthread_struct(struct task_struct *p)
 112{
 113	struct kthread *kthread;
 114
 115	if (WARN_ON_ONCE(to_kthread(p)))
 116		return false;
 117
 118	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
 119	if (!kthread)
 120		return false;
 121
 122	init_completion(&kthread->exited);
 123	init_completion(&kthread->parked);
 124	p->vfork_done = &kthread->exited;
 125
 126	p->worker_private = kthread;
 127	return true;
 128}
 129
 130void free_kthread_struct(struct task_struct *k)
 131{
 132	struct kthread *kthread;
 133
 134	/*
 135	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
 
 136	 */
 137	kthread = to_kthread(k);
 138	if (!kthread)
 139		return;
 140
 141#ifdef CONFIG_BLK_CGROUP
 142	WARN_ON_ONCE(kthread->blkcg_css);
 143#endif
 144	k->worker_private = NULL;
 145	kfree(kthread->full_name);
 146	kfree(kthread);
 147}
 148
 149/**
 150 * kthread_should_stop - should this kthread return now?
 151 *
 152 * When someone calls kthread_stop() on your kthread, it will be woken
 153 * and this will return true.  You should then return, and your return
 154 * value will be passed through to kthread_stop().
 155 */
 156bool kthread_should_stop(void)
 157{
 158	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
 159}
 160EXPORT_SYMBOL(kthread_should_stop);
 161
 162static bool __kthread_should_park(struct task_struct *k)
 163{
 164	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
 165}
 166
 167/**
 168 * kthread_should_park - should this kthread park now?
 169 *
 170 * When someone calls kthread_park() on your kthread, it will be woken
 171 * and this will return true.  You should then do the necessary
 172 * cleanup and call kthread_parkme()
 173 *
 174 * Similar to kthread_should_stop(), but this keeps the thread alive
 175 * and in a park position. kthread_unpark() "restarts" the thread and
 176 * calls the thread function again.
 177 */
 178bool kthread_should_park(void)
 179{
 180	return __kthread_should_park(current);
 181}
 182EXPORT_SYMBOL_GPL(kthread_should_park);
 183
 184bool kthread_should_stop_or_park(void)
 185{
 186	struct kthread *kthread = __to_kthread(current);
 187
 188	if (!kthread)
 189		return false;
 190
 191	return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
 192}
 193
 194/**
 195 * kthread_freezable_should_stop - should this freezable kthread return now?
 196 * @was_frozen: optional out parameter, indicates whether %current was frozen
 197 *
 198 * kthread_should_stop() for freezable kthreads, which will enter
 199 * refrigerator if necessary.  This function is safe from kthread_stop() /
 200 * freezer deadlock and freezable kthreads should use this function instead
 201 * of calling try_to_freeze() directly.
 202 */
 203bool kthread_freezable_should_stop(bool *was_frozen)
 204{
 205	bool frozen = false;
 206
 207	might_sleep();
 208
 209	if (unlikely(freezing(current)))
 210		frozen = __refrigerator(true);
 211
 212	if (was_frozen)
 213		*was_frozen = frozen;
 214
 215	return kthread_should_stop();
 216}
 217EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
 218
 219/**
 220 * kthread_func - return the function specified on kthread creation
 221 * @task: kthread task in question
 222 *
 223 * Returns NULL if the task is not a kthread.
 224 */
 225void *kthread_func(struct task_struct *task)
 226{
 227	struct kthread *kthread = __to_kthread(task);
 228	if (kthread)
 229		return kthread->threadfn;
 230	return NULL;
 231}
 232EXPORT_SYMBOL_GPL(kthread_func);
 233
 234/**
 235 * kthread_data - return data value specified on kthread creation
 236 * @task: kthread task in question
 237 *
 238 * Return the data value specified when kthread @task was created.
 239 * The caller is responsible for ensuring the validity of @task when
 240 * calling this function.
 241 */
 242void *kthread_data(struct task_struct *task)
 243{
 244	return to_kthread(task)->data;
 245}
 246EXPORT_SYMBOL_GPL(kthread_data);
 247
 248/**
 249 * kthread_probe_data - speculative version of kthread_data()
 250 * @task: possible kthread task in question
 251 *
 252 * @task could be a kthread task.  Return the data value specified when it
 253 * was created if accessible.  If @task isn't a kthread task or its data is
 254 * inaccessible for any reason, %NULL is returned.  This function requires
 255 * that @task itself is safe to dereference.
 256 */
 257void *kthread_probe_data(struct task_struct *task)
 258{
 259	struct kthread *kthread = __to_kthread(task);
 260	void *data = NULL;
 261
 262	if (kthread)
 263		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
 264	return data;
 265}
 266
 267static void __kthread_parkme(struct kthread *self)
 268{
 269	for (;;) {
 270		/*
 271		 * TASK_PARKED is a special state; we must serialize against
 272		 * possible pending wakeups to avoid store-store collisions on
 273		 * task->state.
 274		 *
 275		 * Such a collision might possibly result in the task state
 276		 * changin from TASK_PARKED and us failing the
 277		 * wait_task_inactive() in kthread_park().
 278		 */
 279		set_special_state(TASK_PARKED);
 280		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
 281			break;
 282
 283		/*
 284		 * Thread is going to call schedule(), do not preempt it,
 285		 * or the caller of kthread_park() may spend more time in
 286		 * wait_task_inactive().
 287		 */
 288		preempt_disable();
 289		complete(&self->parked);
 290		schedule_preempt_disabled();
 291		preempt_enable();
 292	}
 293	__set_current_state(TASK_RUNNING);
 294}
 295
 296void kthread_parkme(void)
 297{
 298	__kthread_parkme(to_kthread(current));
 299}
 300EXPORT_SYMBOL_GPL(kthread_parkme);
 301
 302/**
 303 * kthread_exit - Cause the current kthread return @result to kthread_stop().
 304 * @result: The integer value to return to kthread_stop().
 305 *
 306 * While kthread_exit can be called directly, it exists so that
 307 * functions which do some additional work in non-modular code such as
 308 * module_put_and_kthread_exit can be implemented.
 309 *
 310 * Does not return.
 311 */
 312void __noreturn kthread_exit(long result)
 313{
 314	struct kthread *kthread = to_kthread(current);
 315	kthread->result = result;
 316	do_exit(0);
 317}
 318EXPORT_SYMBOL(kthread_exit);
 319
 320/**
 321 * kthread_complete_and_exit - Exit the current kthread.
 322 * @comp: Completion to complete
 323 * @code: The integer value to return to kthread_stop().
 324 *
 325 * If present, complete @comp and then return code to kthread_stop().
 326 *
 327 * A kernel thread whose module may be removed after the completion of
 328 * @comp can use this function to exit safely.
 329 *
 330 * Does not return.
 331 */
 332void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
 333{
 334	if (comp)
 335		complete(comp);
 336
 337	kthread_exit(code);
 338}
 339EXPORT_SYMBOL(kthread_complete_and_exit);
 340
 341static int kthread(void *_create)
 342{
 343	static const struct sched_param param = { .sched_priority = 0 };
 344	/* Copy data: it's on kthread's stack */
 345	struct kthread_create_info *create = _create;
 346	int (*threadfn)(void *data) = create->threadfn;
 347	void *data = create->data;
 348	struct completion *done;
 349	struct kthread *self;
 350	int ret;
 351
 352	self = to_kthread(current);
 
 353
 354	/* Release the structure when caller killed by a fatal signal. */
 355	done = xchg(&create->done, NULL);
 356	if (!done) {
 357		kfree(create->full_name);
 358		kfree(create);
 359		kthread_exit(-EINTR);
 
 
 
 
 
 
 360	}
 361
 362	self->full_name = create->full_name;
 363	self->threadfn = threadfn;
 364	self->data = data;
 365
 366	/*
 367	 * The new thread inherited kthreadd's priority and CPU mask. Reset
 368	 * back to default in case they have been changed.
 369	 */
 370	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
 371	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
 372
 373	/* OK, tell user we're spawned, wait for stop or wakeup */
 374	__set_current_state(TASK_UNINTERRUPTIBLE);
 375	create->result = current;
 376	/*
 377	 * Thread is going to call schedule(), do not preempt it,
 378	 * or the creator may spend more time in wait_task_inactive().
 379	 */
 380	preempt_disable();
 381	complete(done);
 382	schedule_preempt_disabled();
 383	preempt_enable();
 384
 385	ret = -EINTR;
 386	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
 387		cgroup_kthread_ready();
 388		__kthread_parkme(self);
 389		ret = threadfn(data);
 390	}
 391	kthread_exit(ret);
 392}
 393
 394/* called from kernel_clone() to get node information for about to be created task */
 395int tsk_fork_get_node(struct task_struct *tsk)
 396{
 397#ifdef CONFIG_NUMA
 398	if (tsk == kthreadd_task)
 399		return tsk->pref_node_fork;
 400#endif
 401	return NUMA_NO_NODE;
 402}
 403
 404static void create_kthread(struct kthread_create_info *create)
 405{
 406	int pid;
 407
 408#ifdef CONFIG_NUMA
 409	current->pref_node_fork = create->node;
 410#endif
 411	/* We want our own signal handler (we take no signals by default). */
 412	pid = kernel_thread(kthread, create, create->full_name,
 413			    CLONE_FS | CLONE_FILES | SIGCHLD);
 414	if (pid < 0) {
 415		/* Release the structure when caller killed by a fatal signal. */
 416		struct completion *done = xchg(&create->done, NULL);
 417
 418		kfree(create->full_name);
 419		if (!done) {
 420			kfree(create);
 421			return;
 422		}
 423		create->result = ERR_PTR(pid);
 424		complete(done);
 425	}
 426}
 427
 428static __printf(4, 0)
 429struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 430						    void *data, int node,
 431						    const char namefmt[],
 432						    va_list args)
 433{
 434	DECLARE_COMPLETION_ONSTACK(done);
 435	struct task_struct *task;
 436	struct kthread_create_info *create = kmalloc(sizeof(*create),
 437						     GFP_KERNEL);
 438
 439	if (!create)
 440		return ERR_PTR(-ENOMEM);
 441	create->threadfn = threadfn;
 442	create->data = data;
 443	create->node = node;
 444	create->done = &done;
 445	create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
 446	if (!create->full_name) {
 447		task = ERR_PTR(-ENOMEM);
 448		goto free_create;
 449	}
 450
 451	spin_lock(&kthread_create_lock);
 452	list_add_tail(&create->list, &kthread_create_list);
 453	spin_unlock(&kthread_create_lock);
 454
 455	wake_up_process(kthreadd_task);
 456	/*
 457	 * Wait for completion in killable state, for I might be chosen by
 458	 * the OOM killer while kthreadd is trying to allocate memory for
 459	 * new kernel thread.
 460	 */
 461	if (unlikely(wait_for_completion_killable(&done))) {
 462		/*
 463		 * If I was killed by a fatal signal before kthreadd (or new
 464		 * kernel thread) calls complete(), leave the cleanup of this
 465		 * structure to that thread.
 466		 */
 467		if (xchg(&create->done, NULL))
 468			return ERR_PTR(-EINTR);
 469		/*
 470		 * kthreadd (or new kernel thread) will call complete()
 471		 * shortly.
 472		 */
 473		wait_for_completion(&done);
 474	}
 475	task = create->result;
 476free_create:
 
 
 
 
 
 
 
 
 
 
 477	kfree(create);
 478	return task;
 479}
 480
 481/**
 482 * kthread_create_on_node - create a kthread.
 483 * @threadfn: the function to run until signal_pending(current).
 484 * @data: data ptr for @threadfn.
 485 * @node: task and thread structures for the thread are allocated on this node
 486 * @namefmt: printf-style name for the thread.
 487 *
 488 * Description: This helper function creates and names a kernel
 489 * thread.  The thread will be stopped: use wake_up_process() to start
 490 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 491 * is affine to all CPUs.
 492 *
 493 * If thread is going to be bound on a particular cpu, give its node
 494 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 495 * When woken, the thread will run @threadfn() with @data as its
 496 * argument. @threadfn() can either return directly if it is a
 497 * standalone thread for which no one will call kthread_stop(), or
 498 * return when 'kthread_should_stop()' is true (which means
 499 * kthread_stop() has been called).  The return value should be zero
 500 * or a negative error number; it will be passed to kthread_stop().
 501 *
 502 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 503 */
 504struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 505					   void *data, int node,
 506					   const char namefmt[],
 507					   ...)
 508{
 509	struct task_struct *task;
 510	va_list args;
 511
 512	va_start(args, namefmt);
 513	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
 514	va_end(args);
 515
 516	return task;
 517}
 518EXPORT_SYMBOL(kthread_create_on_node);
 519
 520static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
 521{
 522	unsigned long flags;
 523
 524	if (!wait_task_inactive(p, state)) {
 525		WARN_ON(1);
 526		return;
 527	}
 528
 529	/* It's safe because the task is inactive. */
 530	raw_spin_lock_irqsave(&p->pi_lock, flags);
 531	do_set_cpus_allowed(p, mask);
 532	p->flags |= PF_NO_SETAFFINITY;
 533	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 534}
 535
 536static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
 537{
 538	__kthread_bind_mask(p, cpumask_of(cpu), state);
 539}
 540
 541void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
 542{
 543	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 544}
 545
 546/**
 547 * kthread_bind - bind a just-created kthread to a cpu.
 548 * @p: thread created by kthread_create().
 549 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 550 *
 551 * Description: This function is equivalent to set_cpus_allowed(),
 552 * except that @cpu doesn't need to be online, and the thread must be
 553 * stopped (i.e., just returned from kthread_create()).
 554 */
 555void kthread_bind(struct task_struct *p, unsigned int cpu)
 556{
 557	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 558}
 559EXPORT_SYMBOL(kthread_bind);
 560
 561/**
 562 * kthread_create_on_cpu - Create a cpu bound kthread
 563 * @threadfn: the function to run until signal_pending(current).
 564 * @data: data ptr for @threadfn.
 565 * @cpu: The cpu on which the thread should be bound,
 566 * @namefmt: printf-style name for the thread. Format is restricted
 567 *	     to "name.*%u". Code fills in cpu number.
 568 *
 569 * Description: This helper function creates and names a kernel thread
 
 570 */
 571struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 572					  void *data, unsigned int cpu,
 573					  const char *namefmt)
 574{
 575	struct task_struct *p;
 576
 577	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
 578				   cpu);
 579	if (IS_ERR(p))
 580		return p;
 581	kthread_bind(p, cpu);
 582	/* CPU hotplug need to bind once again when unparking the thread. */
 
 583	to_kthread(p)->cpu = cpu;
 584	return p;
 585}
 586EXPORT_SYMBOL(kthread_create_on_cpu);
 587
 588void kthread_set_per_cpu(struct task_struct *k, int cpu)
 589{
 590	struct kthread *kthread = to_kthread(k);
 591	if (!kthread)
 592		return;
 593
 594	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
 595
 596	if (cpu < 0) {
 597		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 598		return;
 599	}
 600
 601	kthread->cpu = cpu;
 602	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 603}
 604
 605bool kthread_is_per_cpu(struct task_struct *p)
 606{
 607	struct kthread *kthread = __to_kthread(p);
 608	if (!kthread)
 609		return false;
 610
 611	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 612}
 613
 614/**
 615 * kthread_unpark - unpark a thread created by kthread_create().
 616 * @k:		thread created by kthread_create().
 617 *
 618 * Sets kthread_should_park() for @k to return false, wakes it, and
 619 * waits for it to return. If the thread is marked percpu then its
 620 * bound to the cpu again.
 621 */
 622void kthread_unpark(struct task_struct *k)
 623{
 624	struct kthread *kthread = to_kthread(k);
 625
 626	if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
 627		return;
 628	/*
 629	 * Newly created kthread was parked when the CPU was offline.
 630	 * The binding was lost and we need to set it again.
 631	 */
 632	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
 633		__kthread_bind(k, kthread->cpu, TASK_PARKED);
 634
 
 635	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 636	/*
 637	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
 638	 */
 639	wake_up_state(k, TASK_PARKED);
 640}
 641EXPORT_SYMBOL_GPL(kthread_unpark);
 642
 643/**
 644 * kthread_park - park a thread created by kthread_create().
 645 * @k: thread created by kthread_create().
 646 *
 647 * Sets kthread_should_park() for @k to return true, wakes it, and
 648 * waits for it to return. This can also be called after kthread_create()
 649 * instead of calling wake_up_process(): the thread will park without
 650 * calling threadfn().
 651 *
 652 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 653 * If called by the kthread itself just the park bit is set.
 654 */
 655int kthread_park(struct task_struct *k)
 656{
 657	struct kthread *kthread = to_kthread(k);
 658
 659	if (WARN_ON(k->flags & PF_EXITING))
 660		return -ENOSYS;
 661
 662	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
 663		return -EBUSY;
 664
 665	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 666	if (k != current) {
 667		wake_up_process(k);
 668		/*
 669		 * Wait for __kthread_parkme() to complete(), this means we
 670		 * _will_ have TASK_PARKED and are about to call schedule().
 671		 */
 672		wait_for_completion(&kthread->parked);
 673		/*
 674		 * Now wait for that schedule() to complete and the task to
 675		 * get scheduled out.
 676		 */
 677		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
 678	}
 679
 680	return 0;
 681}
 682EXPORT_SYMBOL_GPL(kthread_park);
 683
 684/**
 685 * kthread_stop - stop a thread created by kthread_create().
 686 * @k: thread created by kthread_create().
 687 *
 688 * Sets kthread_should_stop() for @k to return true, wakes it, and
 689 * waits for it to exit. This can also be called after kthread_create()
 690 * instead of calling wake_up_process(): the thread will exit without
 691 * calling threadfn().
 692 *
 693 * If threadfn() may call kthread_exit() itself, the caller must ensure
 694 * task_struct can't go away.
 695 *
 696 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 697 * was never called.
 698 */
 699int kthread_stop(struct task_struct *k)
 700{
 701	struct kthread *kthread;
 702	int ret;
 703
 704	trace_sched_kthread_stop(k);
 705
 706	get_task_struct(k);
 707	kthread = to_kthread(k);
 708	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
 709	kthread_unpark(k);
 710	set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
 711	wake_up_process(k);
 712	wait_for_completion(&kthread->exited);
 713	ret = kthread->result;
 714	put_task_struct(k);
 715
 716	trace_sched_kthread_stop_ret(ret);
 717	return ret;
 718}
 719EXPORT_SYMBOL(kthread_stop);
 720
 721/**
 722 * kthread_stop_put - stop a thread and put its task struct
 723 * @k: thread created by kthread_create().
 724 *
 725 * Stops a thread created by kthread_create() and put its task_struct.
 726 * Only use when holding an extra task struct reference obtained by
 727 * calling get_task_struct().
 728 */
 729int kthread_stop_put(struct task_struct *k)
 730{
 731	int ret;
 732
 733	ret = kthread_stop(k);
 734	put_task_struct(k);
 735	return ret;
 736}
 737EXPORT_SYMBOL(kthread_stop_put);
 738
 739int kthreadd(void *unused)
 740{
 741	struct task_struct *tsk = current;
 742
 743	/* Setup a clean context for our children to inherit. */
 744	set_task_comm(tsk, "kthreadd");
 745	ignore_signals(tsk);
 746	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
 747	set_mems_allowed(node_states[N_MEMORY]);
 748
 749	current->flags |= PF_NOFREEZE;
 750	cgroup_init_kthreadd();
 751
 752	for (;;) {
 753		set_current_state(TASK_INTERRUPTIBLE);
 754		if (list_empty(&kthread_create_list))
 755			schedule();
 756		__set_current_state(TASK_RUNNING);
 757
 758		spin_lock(&kthread_create_lock);
 759		while (!list_empty(&kthread_create_list)) {
 760			struct kthread_create_info *create;
 761
 762			create = list_entry(kthread_create_list.next,
 763					    struct kthread_create_info, list);
 764			list_del_init(&create->list);
 765			spin_unlock(&kthread_create_lock);
 766
 767			create_kthread(create);
 768
 769			spin_lock(&kthread_create_lock);
 770		}
 771		spin_unlock(&kthread_create_lock);
 772	}
 773
 774	return 0;
 775}
 776
 777void __kthread_init_worker(struct kthread_worker *worker,
 778				const char *name,
 779				struct lock_class_key *key)
 780{
 781	memset(worker, 0, sizeof(struct kthread_worker));
 782	raw_spin_lock_init(&worker->lock);
 783	lockdep_set_class_and_name(&worker->lock, key, name);
 784	INIT_LIST_HEAD(&worker->work_list);
 785	INIT_LIST_HEAD(&worker->delayed_work_list);
 786}
 787EXPORT_SYMBOL_GPL(__kthread_init_worker);
 788
 789/**
 790 * kthread_worker_fn - kthread function to process kthread_worker
 791 * @worker_ptr: pointer to initialized kthread_worker
 792 *
 793 * This function implements the main cycle of kthread worker. It processes
 794 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 795 * is empty.
 796 *
 797 * The works are not allowed to keep any locks, disable preemption or interrupts
 798 * when they finish. There is defined a safe point for freezing when one work
 799 * finishes and before a new one is started.
 800 *
 801 * Also the works must not be handled by more than one worker at the same time,
 802 * see also kthread_queue_work().
 803 */
 804int kthread_worker_fn(void *worker_ptr)
 805{
 806	struct kthread_worker *worker = worker_ptr;
 807	struct kthread_work *work;
 808
 809	/*
 810	 * FIXME: Update the check and remove the assignment when all kthread
 811	 * worker users are created using kthread_create_worker*() functions.
 812	 */
 813	WARN_ON(worker->task && worker->task != current);
 814	worker->task = current;
 815
 816	if (worker->flags & KTW_FREEZABLE)
 817		set_freezable();
 818
 819repeat:
 820	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
 821
 822	if (kthread_should_stop()) {
 823		__set_current_state(TASK_RUNNING);
 824		raw_spin_lock_irq(&worker->lock);
 825		worker->task = NULL;
 826		raw_spin_unlock_irq(&worker->lock);
 827		return 0;
 828	}
 829
 830	work = NULL;
 831	raw_spin_lock_irq(&worker->lock);
 832	if (!list_empty(&worker->work_list)) {
 833		work = list_first_entry(&worker->work_list,
 834					struct kthread_work, node);
 835		list_del_init(&work->node);
 836	}
 837	worker->current_work = work;
 838	raw_spin_unlock_irq(&worker->lock);
 839
 840	if (work) {
 841		kthread_work_func_t func = work->func;
 842		__set_current_state(TASK_RUNNING);
 843		trace_sched_kthread_work_execute_start(work);
 844		work->func(work);
 845		/*
 846		 * Avoid dereferencing work after this point.  The trace
 847		 * event only cares about the address.
 848		 */
 849		trace_sched_kthread_work_execute_end(work, func);
 850	} else if (!freezing(current)) {
 851		schedule();
 852	} else {
 853		/*
 854		 * Handle the case where the current remains
 855		 * TASK_INTERRUPTIBLE. try_to_freeze() expects
 856		 * the current to be TASK_RUNNING.
 857		 */
 858		__set_current_state(TASK_RUNNING);
 859	}
 860
 861	try_to_freeze();
 862	cond_resched();
 863	goto repeat;
 864}
 865EXPORT_SYMBOL_GPL(kthread_worker_fn);
 866
 867static __printf(3, 0) struct kthread_worker *
 868__kthread_create_worker(int cpu, unsigned int flags,
 869			const char namefmt[], va_list args)
 870{
 871	struct kthread_worker *worker;
 872	struct task_struct *task;
 873	int node = NUMA_NO_NODE;
 874
 875	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
 876	if (!worker)
 877		return ERR_PTR(-ENOMEM);
 878
 879	kthread_init_worker(worker);
 880
 881	if (cpu >= 0)
 882		node = cpu_to_node(cpu);
 883
 884	task = __kthread_create_on_node(kthread_worker_fn, worker,
 885						node, namefmt, args);
 886	if (IS_ERR(task))
 887		goto fail_task;
 888
 889	if (cpu >= 0)
 890		kthread_bind(task, cpu);
 891
 892	worker->flags = flags;
 893	worker->task = task;
 894	wake_up_process(task);
 895	return worker;
 896
 897fail_task:
 898	kfree(worker);
 899	return ERR_CAST(task);
 900}
 901
 902/**
 903 * kthread_create_worker - create a kthread worker
 904 * @flags: flags modifying the default behavior of the worker
 905 * @namefmt: printf-style name for the kthread worker (task).
 906 *
 907 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 908 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 909 * when the caller was killed by a fatal signal.
 910 */
 911struct kthread_worker *
 912kthread_create_worker(unsigned int flags, const char namefmt[], ...)
 913{
 914	struct kthread_worker *worker;
 915	va_list args;
 916
 917	va_start(args, namefmt);
 918	worker = __kthread_create_worker(-1, flags, namefmt, args);
 919	va_end(args);
 920
 921	return worker;
 922}
 923EXPORT_SYMBOL(kthread_create_worker);
 924
 925/**
 926 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 927 *	to a given CPU and the associated NUMA node.
 928 * @cpu: CPU number
 929 * @flags: flags modifying the default behavior of the worker
 930 * @namefmt: printf-style name for the kthread worker (task).
 931 *
 932 * Use a valid CPU number if you want to bind the kthread worker
 933 * to the given CPU and the associated NUMA node.
 934 *
 935 * A good practice is to add the cpu number also into the worker name.
 936 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 937 *
 938 * CPU hotplug:
 939 * The kthread worker API is simple and generic. It just provides a way
 940 * to create, use, and destroy workers.
 941 *
 942 * It is up to the API user how to handle CPU hotplug. They have to decide
 943 * how to handle pending work items, prevent queuing new ones, and
 944 * restore the functionality when the CPU goes off and on. There are a
 945 * few catches:
 946 *
 947 *    - CPU affinity gets lost when it is scheduled on an offline CPU.
 948 *
 949 *    - The worker might not exist when the CPU was off when the user
 950 *      created the workers.
 951 *
 952 * Good practice is to implement two CPU hotplug callbacks and to
 953 * destroy/create the worker when the CPU goes down/up.
 954 *
 955 * Return:
 956 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 957 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 958 * when the caller was killed by a fatal signal.
 959 */
 960struct kthread_worker *
 961kthread_create_worker_on_cpu(int cpu, unsigned int flags,
 962			     const char namefmt[], ...)
 963{
 964	struct kthread_worker *worker;
 965	va_list args;
 966
 967	va_start(args, namefmt);
 968	worker = __kthread_create_worker(cpu, flags, namefmt, args);
 969	va_end(args);
 970
 971	return worker;
 972}
 973EXPORT_SYMBOL(kthread_create_worker_on_cpu);
 974
 975/*
 976 * Returns true when the work could not be queued at the moment.
 977 * It happens when it is already pending in a worker list
 978 * or when it is being cancelled.
 979 */
 980static inline bool queuing_blocked(struct kthread_worker *worker,
 981				   struct kthread_work *work)
 982{
 983	lockdep_assert_held(&worker->lock);
 984
 985	return !list_empty(&work->node) || work->canceling;
 986}
 987
 988static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
 989					     struct kthread_work *work)
 990{
 991	lockdep_assert_held(&worker->lock);
 992	WARN_ON_ONCE(!list_empty(&work->node));
 993	/* Do not use a work with >1 worker, see kthread_queue_work() */
 994	WARN_ON_ONCE(work->worker && work->worker != worker);
 995}
 996
 997/* insert @work before @pos in @worker */
 998static void kthread_insert_work(struct kthread_worker *worker,
 999				struct kthread_work *work,
1000				struct list_head *pos)
1001{
1002	kthread_insert_work_sanity_check(worker, work);
1003
1004	trace_sched_kthread_work_queue_work(worker, work);
1005
1006	list_add_tail(&work->node, pos);
1007	work->worker = worker;
1008	if (!worker->current_work && likely(worker->task))
1009		wake_up_process(worker->task);
1010}
1011
1012/**
1013 * kthread_queue_work - queue a kthread_work
1014 * @worker: target kthread_worker
1015 * @work: kthread_work to queue
1016 *
1017 * Queue @work to work processor @task for async execution.  @task
1018 * must have been created with kthread_worker_create().  Returns %true
1019 * if @work was successfully queued, %false if it was already pending.
1020 *
1021 * Reinitialize the work if it needs to be used by another worker.
1022 * For example, when the worker was stopped and started again.
1023 */
1024bool kthread_queue_work(struct kthread_worker *worker,
1025			struct kthread_work *work)
1026{
1027	bool ret = false;
1028	unsigned long flags;
1029
1030	raw_spin_lock_irqsave(&worker->lock, flags);
1031	if (!queuing_blocked(worker, work)) {
1032		kthread_insert_work(worker, work, &worker->work_list);
1033		ret = true;
1034	}
1035	raw_spin_unlock_irqrestore(&worker->lock, flags);
1036	return ret;
1037}
1038EXPORT_SYMBOL_GPL(kthread_queue_work);
1039
1040/**
1041 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1042 *	delayed work when the timer expires.
1043 * @t: pointer to the expired timer
1044 *
1045 * The format of the function is defined by struct timer_list.
1046 * It should have been called from irqsafe timer with irq already off.
1047 */
1048void kthread_delayed_work_timer_fn(struct timer_list *t)
1049{
1050	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1051	struct kthread_work *work = &dwork->work;
1052	struct kthread_worker *worker = work->worker;
1053	unsigned long flags;
1054
1055	/*
1056	 * This might happen when a pending work is reinitialized.
1057	 * It means that it is used a wrong way.
1058	 */
1059	if (WARN_ON_ONCE(!worker))
1060		return;
1061
1062	raw_spin_lock_irqsave(&worker->lock, flags);
1063	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1064	WARN_ON_ONCE(work->worker != worker);
1065
1066	/* Move the work from worker->delayed_work_list. */
1067	WARN_ON_ONCE(list_empty(&work->node));
1068	list_del_init(&work->node);
1069	if (!work->canceling)
1070		kthread_insert_work(worker, work, &worker->work_list);
1071
1072	raw_spin_unlock_irqrestore(&worker->lock, flags);
1073}
1074EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1075
1076static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1077					 struct kthread_delayed_work *dwork,
1078					 unsigned long delay)
1079{
1080	struct timer_list *timer = &dwork->timer;
1081	struct kthread_work *work = &dwork->work;
1082
1083	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1084
1085	/*
1086	 * If @delay is 0, queue @dwork->work immediately.  This is for
1087	 * both optimization and correctness.  The earliest @timer can
1088	 * expire is on the closest next tick and delayed_work users depend
1089	 * on that there's no such delay when @delay is 0.
1090	 */
1091	if (!delay) {
1092		kthread_insert_work(worker, work, &worker->work_list);
1093		return;
1094	}
1095
1096	/* Be paranoid and try to detect possible races already now. */
1097	kthread_insert_work_sanity_check(worker, work);
1098
1099	list_add(&work->node, &worker->delayed_work_list);
1100	work->worker = worker;
1101	timer->expires = jiffies + delay;
1102	add_timer(timer);
1103}
1104
1105/**
1106 * kthread_queue_delayed_work - queue the associated kthread work
1107 *	after a delay.
1108 * @worker: target kthread_worker
1109 * @dwork: kthread_delayed_work to queue
1110 * @delay: number of jiffies to wait before queuing
1111 *
1112 * If the work has not been pending it starts a timer that will queue
1113 * the work after the given @delay. If @delay is zero, it queues the
1114 * work immediately.
1115 *
1116 * Return: %false if the @work has already been pending. It means that
1117 * either the timer was running or the work was queued. It returns %true
1118 * otherwise.
1119 */
1120bool kthread_queue_delayed_work(struct kthread_worker *worker,
1121				struct kthread_delayed_work *dwork,
1122				unsigned long delay)
1123{
1124	struct kthread_work *work = &dwork->work;
1125	unsigned long flags;
1126	bool ret = false;
1127
1128	raw_spin_lock_irqsave(&worker->lock, flags);
1129
1130	if (!queuing_blocked(worker, work)) {
1131		__kthread_queue_delayed_work(worker, dwork, delay);
1132		ret = true;
1133	}
1134
1135	raw_spin_unlock_irqrestore(&worker->lock, flags);
1136	return ret;
1137}
1138EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1139
1140struct kthread_flush_work {
1141	struct kthread_work	work;
1142	struct completion	done;
1143};
1144
1145static void kthread_flush_work_fn(struct kthread_work *work)
1146{
1147	struct kthread_flush_work *fwork =
1148		container_of(work, struct kthread_flush_work, work);
1149	complete(&fwork->done);
1150}
1151
1152/**
1153 * kthread_flush_work - flush a kthread_work
1154 * @work: work to flush
1155 *
1156 * If @work is queued or executing, wait for it to finish execution.
1157 */
1158void kthread_flush_work(struct kthread_work *work)
1159{
1160	struct kthread_flush_work fwork = {
1161		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1162		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1163	};
1164	struct kthread_worker *worker;
1165	bool noop = false;
1166
1167	worker = work->worker;
1168	if (!worker)
1169		return;
1170
1171	raw_spin_lock_irq(&worker->lock);
1172	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1173	WARN_ON_ONCE(work->worker != worker);
1174
1175	if (!list_empty(&work->node))
1176		kthread_insert_work(worker, &fwork.work, work->node.next);
1177	else if (worker->current_work == work)
1178		kthread_insert_work(worker, &fwork.work,
1179				    worker->work_list.next);
1180	else
1181		noop = true;
1182
1183	raw_spin_unlock_irq(&worker->lock);
1184
1185	if (!noop)
1186		wait_for_completion(&fwork.done);
1187}
1188EXPORT_SYMBOL_GPL(kthread_flush_work);
1189
1190/*
1191 * Make sure that the timer is neither set nor running and could
1192 * not manipulate the work list_head any longer.
1193 *
1194 * The function is called under worker->lock. The lock is temporary
1195 * released but the timer can't be set again in the meantime.
1196 */
1197static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1198					      unsigned long *flags)
1199{
1200	struct kthread_delayed_work *dwork =
1201		container_of(work, struct kthread_delayed_work, work);
1202	struct kthread_worker *worker = work->worker;
1203
1204	/*
1205	 * del_timer_sync() must be called to make sure that the timer
1206	 * callback is not running. The lock must be temporary released
1207	 * to avoid a deadlock with the callback. In the meantime,
1208	 * any queuing is blocked by setting the canceling counter.
1209	 */
1210	work->canceling++;
1211	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1212	del_timer_sync(&dwork->timer);
1213	raw_spin_lock_irqsave(&worker->lock, *flags);
1214	work->canceling--;
1215}
1216
1217/*
1218 * This function removes the work from the worker queue.
1219 *
1220 * It is called under worker->lock. The caller must make sure that
1221 * the timer used by delayed work is not running, e.g. by calling
1222 * kthread_cancel_delayed_work_timer().
1223 *
1224 * The work might still be in use when this function finishes. See the
1225 * current_work proceed by the worker.
1226 *
1227 * Return: %true if @work was pending and successfully canceled,
1228 *	%false if @work was not pending
1229 */
1230static bool __kthread_cancel_work(struct kthread_work *work)
 
1231{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1232	/*
1233	 * Try to remove the work from a worker list. It might either
1234	 * be from worker->work_list or from worker->delayed_work_list.
1235	 */
1236	if (!list_empty(&work->node)) {
1237		list_del_init(&work->node);
1238		return true;
1239	}
1240
1241	return false;
1242}
1243
1244/**
1245 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1246 * @worker: kthread worker to use
1247 * @dwork: kthread delayed work to queue
1248 * @delay: number of jiffies to wait before queuing
1249 *
1250 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1251 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1252 * @work is guaranteed to be queued immediately.
1253 *
1254 * Return: %false if @dwork was idle and queued, %true otherwise.
 
1255 *
1256 * A special case is when the work is being canceled in parallel.
1257 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1258 * or yet another kthread_mod_delayed_work() call. We let the other command
1259 * win and return %true here. The return value can be used for reference
1260 * counting and the number of queued works stays the same. Anyway, the caller
1261 * is supposed to synchronize these operations a reasonable way.
1262 *
1263 * This function is safe to call from any context including IRQ handler.
1264 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1265 * for details.
1266 */
1267bool kthread_mod_delayed_work(struct kthread_worker *worker,
1268			      struct kthread_delayed_work *dwork,
1269			      unsigned long delay)
1270{
1271	struct kthread_work *work = &dwork->work;
1272	unsigned long flags;
1273	int ret;
1274
1275	raw_spin_lock_irqsave(&worker->lock, flags);
1276
1277	/* Do not bother with canceling when never queued. */
1278	if (!work->worker) {
1279		ret = false;
1280		goto fast_queue;
1281	}
1282
1283	/* Work must not be used with >1 worker, see kthread_queue_work() */
1284	WARN_ON_ONCE(work->worker != worker);
1285
1286	/*
1287	 * Temporary cancel the work but do not fight with another command
1288	 * that is canceling the work as well.
1289	 *
1290	 * It is a bit tricky because of possible races with another
1291	 * mod_delayed_work() and cancel_delayed_work() callers.
1292	 *
1293	 * The timer must be canceled first because worker->lock is released
1294	 * when doing so. But the work can be removed from the queue (list)
1295	 * only when it can be queued again so that the return value can
1296	 * be used for reference counting.
1297	 */
1298	kthread_cancel_delayed_work_timer(work, &flags);
1299	if (work->canceling) {
1300		/* The number of works in the queue does not change. */
1301		ret = true;
1302		goto out;
1303	}
1304	ret = __kthread_cancel_work(work);
1305
 
1306fast_queue:
1307	__kthread_queue_delayed_work(worker, dwork, delay);
1308out:
1309	raw_spin_unlock_irqrestore(&worker->lock, flags);
1310	return ret;
1311}
1312EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1313
1314static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1315{
1316	struct kthread_worker *worker = work->worker;
1317	unsigned long flags;
1318	int ret = false;
1319
1320	if (!worker)
1321		goto out;
1322
1323	raw_spin_lock_irqsave(&worker->lock, flags);
1324	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1325	WARN_ON_ONCE(work->worker != worker);
1326
1327	if (is_dwork)
1328		kthread_cancel_delayed_work_timer(work, &flags);
1329
1330	ret = __kthread_cancel_work(work);
1331
1332	if (worker->current_work != work)
1333		goto out_fast;
1334
1335	/*
1336	 * The work is in progress and we need to wait with the lock released.
1337	 * In the meantime, block any queuing by setting the canceling counter.
1338	 */
1339	work->canceling++;
1340	raw_spin_unlock_irqrestore(&worker->lock, flags);
1341	kthread_flush_work(work);
1342	raw_spin_lock_irqsave(&worker->lock, flags);
1343	work->canceling--;
1344
1345out_fast:
1346	raw_spin_unlock_irqrestore(&worker->lock, flags);
1347out:
1348	return ret;
1349}
1350
1351/**
1352 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1353 * @work: the kthread work to cancel
1354 *
1355 * Cancel @work and wait for its execution to finish.  This function
1356 * can be used even if the work re-queues itself. On return from this
1357 * function, @work is guaranteed to be not pending or executing on any CPU.
1358 *
1359 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1360 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1361 *
1362 * The caller must ensure that the worker on which @work was last
1363 * queued can't be destroyed before this function returns.
1364 *
1365 * Return: %true if @work was pending, %false otherwise.
1366 */
1367bool kthread_cancel_work_sync(struct kthread_work *work)
1368{
1369	return __kthread_cancel_work_sync(work, false);
1370}
1371EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1372
1373/**
1374 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1375 *	wait for it to finish.
1376 * @dwork: the kthread delayed work to cancel
1377 *
1378 * This is kthread_cancel_work_sync() for delayed works.
1379 *
1380 * Return: %true if @dwork was pending, %false otherwise.
1381 */
1382bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1383{
1384	return __kthread_cancel_work_sync(&dwork->work, true);
1385}
1386EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1387
1388/**
1389 * kthread_flush_worker - flush all current works on a kthread_worker
1390 * @worker: worker to flush
1391 *
1392 * Wait until all currently executing or pending works on @worker are
1393 * finished.
1394 */
1395void kthread_flush_worker(struct kthread_worker *worker)
1396{
1397	struct kthread_flush_work fwork = {
1398		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1399		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1400	};
1401
1402	kthread_queue_work(worker, &fwork.work);
1403	wait_for_completion(&fwork.done);
1404}
1405EXPORT_SYMBOL_GPL(kthread_flush_worker);
1406
1407/**
1408 * kthread_destroy_worker - destroy a kthread worker
1409 * @worker: worker to be destroyed
1410 *
1411 * Flush and destroy @worker.  The simple flush is enough because the kthread
1412 * worker API is used only in trivial scenarios.  There are no multi-step state
1413 * machines needed.
1414 *
1415 * Note that this function is not responsible for handling delayed work, so
1416 * caller should be responsible for queuing or canceling all delayed work items
1417 * before invoke this function.
1418 */
1419void kthread_destroy_worker(struct kthread_worker *worker)
1420{
1421	struct task_struct *task;
1422
1423	task = worker->task;
1424	if (WARN_ON(!task))
1425		return;
1426
1427	kthread_flush_worker(worker);
1428	kthread_stop(task);
1429	WARN_ON(!list_empty(&worker->delayed_work_list));
1430	WARN_ON(!list_empty(&worker->work_list));
1431	kfree(worker);
1432}
1433EXPORT_SYMBOL(kthread_destroy_worker);
1434
1435/**
1436 * kthread_use_mm - make the calling kthread operate on an address space
1437 * @mm: address space to operate on
1438 */
1439void kthread_use_mm(struct mm_struct *mm)
1440{
1441	struct mm_struct *active_mm;
1442	struct task_struct *tsk = current;
1443
1444	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1445	WARN_ON_ONCE(tsk->mm);
1446
1447	/*
1448	 * It is possible for mm to be the same as tsk->active_mm, but
1449	 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1450	 * because these references are not equivalent.
1451	 */
1452	mmgrab(mm);
1453
1454	task_lock(tsk);
1455	/* Hold off tlb flush IPIs while switching mm's */
1456	local_irq_disable();
1457	active_mm = tsk->active_mm;
1458	tsk->active_mm = mm;
1459	tsk->mm = mm;
1460	membarrier_update_current_mm(mm);
1461	switch_mm_irqs_off(active_mm, mm, tsk);
1462	local_irq_enable();
1463	task_unlock(tsk);
1464#ifdef finish_arch_post_lock_switch
1465	finish_arch_post_lock_switch();
1466#endif
1467
1468	/*
1469	 * When a kthread starts operating on an address space, the loop
1470	 * in membarrier_{private,global}_expedited() may not observe
1471	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1472	 * memory barrier after storing to tsk->mm, before accessing
1473	 * user-space memory. A full memory barrier for membarrier
1474	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1475	 * mmdrop_lazy_tlb().
1476	 */
1477	mmdrop_lazy_tlb(active_mm);
1478}
1479EXPORT_SYMBOL_GPL(kthread_use_mm);
1480
1481/**
1482 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1483 * @mm: address space to operate on
1484 */
1485void kthread_unuse_mm(struct mm_struct *mm)
1486{
1487	struct task_struct *tsk = current;
1488
1489	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1490	WARN_ON_ONCE(!tsk->mm);
1491
1492	task_lock(tsk);
1493	/*
1494	 * When a kthread stops operating on an address space, the loop
1495	 * in membarrier_{private,global}_expedited() may not observe
1496	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1497	 * memory barrier after accessing user-space memory, before
1498	 * clearing tsk->mm.
1499	 */
1500	smp_mb__after_spinlock();
1501	local_irq_disable();
1502	tsk->mm = NULL;
1503	membarrier_update_current_mm(NULL);
1504	mmgrab_lazy_tlb(mm);
1505	/* active_mm is still 'mm' */
1506	enter_lazy_tlb(mm, tsk);
1507	local_irq_enable();
1508	task_unlock(tsk);
1509
1510	mmdrop(mm);
1511}
1512EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1513
1514#ifdef CONFIG_BLK_CGROUP
1515/**
1516 * kthread_associate_blkcg - associate blkcg to current kthread
1517 * @css: the cgroup info
1518 *
1519 * Current thread must be a kthread. The thread is running jobs on behalf of
1520 * other threads. In some cases, we expect the jobs attach cgroup info of
1521 * original threads instead of that of current thread. This function stores
1522 * original thread's cgroup info in current kthread context for later
1523 * retrieval.
1524 */
1525void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1526{
1527	struct kthread *kthread;
1528
1529	if (!(current->flags & PF_KTHREAD))
1530		return;
1531	kthread = to_kthread(current);
1532	if (!kthread)
1533		return;
1534
1535	if (kthread->blkcg_css) {
1536		css_put(kthread->blkcg_css);
1537		kthread->blkcg_css = NULL;
1538	}
1539	if (css) {
1540		css_get(css);
1541		kthread->blkcg_css = css;
1542	}
1543}
1544EXPORT_SYMBOL(kthread_associate_blkcg);
1545
1546/**
1547 * kthread_blkcg - get associated blkcg css of current kthread
1548 *
1549 * Current thread must be a kthread.
1550 */
1551struct cgroup_subsys_state *kthread_blkcg(void)
1552{
1553	struct kthread *kthread;
1554
1555	if (current->flags & PF_KTHREAD) {
1556		kthread = to_kthread(current);
1557		if (kthread)
1558			return kthread->blkcg_css;
1559	}
1560	return NULL;
1561}
 
1562#endif