Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Kernel thread helper functions.
   3 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
 
   4 *
   5 * Creation is done via kthreadd, so that we get a clean environment
   6 * even if we're invoked from userspace (think modprobe, hotplug cpu,
   7 * etc.).
   8 */
   9#include <uapi/linux/sched/types.h>
 
 
  10#include <linux/sched.h>
 
  11#include <linux/sched/task.h>
  12#include <linux/kthread.h>
  13#include <linux/completion.h>
  14#include <linux/err.h>
  15#include <linux/cgroup.h>
  16#include <linux/cpuset.h>
  17#include <linux/unistd.h>
  18#include <linux/file.h>
  19#include <linux/export.h>
  20#include <linux/mutex.h>
  21#include <linux/slab.h>
  22#include <linux/freezer.h>
  23#include <linux/ptrace.h>
  24#include <linux/uaccess.h>
  25#include <linux/numa.h>
 
  26#include <trace/events/sched.h>
  27
 
  28static DEFINE_SPINLOCK(kthread_create_lock);
  29static LIST_HEAD(kthread_create_list);
  30struct task_struct *kthreadd_task;
  31
  32struct kthread_create_info
  33{
  34	/* Information passed to kthread() from kthreadd. */
 
  35	int (*threadfn)(void *data);
  36	void *data;
  37	int node;
  38
  39	/* Result passed back to kthread_create() from kthreadd. */
  40	struct task_struct *result;
  41	struct completion *done;
  42
  43	struct list_head list;
  44};
  45
  46struct kthread {
  47	unsigned long flags;
  48	unsigned int cpu;
 
 
  49	void *data;
  50	struct completion parked;
  51	struct completion exited;
  52#ifdef CONFIG_BLK_CGROUP
  53	struct cgroup_subsys_state *blkcg_css;
  54#endif
 
 
  55};
  56
  57enum KTHREAD_BITS {
  58	KTHREAD_IS_PER_CPU = 0,
  59	KTHREAD_SHOULD_STOP,
  60	KTHREAD_SHOULD_PARK,
  61};
  62
  63static inline void set_kthread_struct(void *kthread)
  64{
  65	/*
  66	 * We abuse ->set_child_tid to avoid the new member and because it
  67	 * can't be wrongly copied by copy_process(). We also rely on fact
  68	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
  69	 */
  70	current->set_child_tid = (__force void __user *)kthread;
  71}
  72
  73static inline struct kthread *to_kthread(struct task_struct *k)
 
 
 
 
 
 
 
 
 
 
 
  74{
  75	WARN_ON(!(k->flags & PF_KTHREAD));
  76	return (__force void *)k->set_child_tid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77}
  78
  79void free_kthread_struct(struct task_struct *k)
  80{
  81	struct kthread *kthread;
  82
  83	/*
  84	 * Can be NULL if this kthread was created by kernel_thread()
  85	 * or if kmalloc() in kthread() failed.
  86	 */
  87	kthread = to_kthread(k);
 
 
 
  88#ifdef CONFIG_BLK_CGROUP
  89	WARN_ON_ONCE(kthread && kthread->blkcg_css);
  90#endif
 
 
  91	kfree(kthread);
  92}
  93
  94/**
  95 * kthread_should_stop - should this kthread return now?
  96 *
  97 * When someone calls kthread_stop() on your kthread, it will be woken
  98 * and this will return true.  You should then return, and your return
  99 * value will be passed through to kthread_stop().
 100 */
 101bool kthread_should_stop(void)
 102{
 103	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
 104}
 105EXPORT_SYMBOL(kthread_should_stop);
 106
 107bool __kthread_should_park(struct task_struct *k)
 108{
 109	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
 110}
 111EXPORT_SYMBOL_GPL(__kthread_should_park);
 112
 113/**
 114 * kthread_should_park - should this kthread park now?
 115 *
 116 * When someone calls kthread_park() on your kthread, it will be woken
 117 * and this will return true.  You should then do the necessary
 118 * cleanup and call kthread_parkme()
 119 *
 120 * Similar to kthread_should_stop(), but this keeps the thread alive
 121 * and in a park position. kthread_unpark() "restarts" the thread and
 122 * calls the thread function again.
 123 */
 124bool kthread_should_park(void)
 125{
 126	return __kthread_should_park(current);
 127}
 128EXPORT_SYMBOL_GPL(kthread_should_park);
 129
 
 
 
 
 
 
 
 
 
 
 130/**
 131 * kthread_freezable_should_stop - should this freezable kthread return now?
 132 * @was_frozen: optional out parameter, indicates whether %current was frozen
 133 *
 134 * kthread_should_stop() for freezable kthreads, which will enter
 135 * refrigerator if necessary.  This function is safe from kthread_stop() /
 136 * freezer deadlock and freezable kthreads should use this function instead
 137 * of calling try_to_freeze() directly.
 138 */
 139bool kthread_freezable_should_stop(bool *was_frozen)
 140{
 141	bool frozen = false;
 142
 143	might_sleep();
 144
 145	if (unlikely(freezing(current)))
 146		frozen = __refrigerator(true);
 147
 148	if (was_frozen)
 149		*was_frozen = frozen;
 150
 151	return kthread_should_stop();
 152}
 153EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
 154
 155/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156 * kthread_data - return data value specified on kthread creation
 157 * @task: kthread task in question
 158 *
 159 * Return the data value specified when kthread @task was created.
 160 * The caller is responsible for ensuring the validity of @task when
 161 * calling this function.
 162 */
 163void *kthread_data(struct task_struct *task)
 164{
 165	return to_kthread(task)->data;
 166}
 
 167
 168/**
 169 * kthread_probe_data - speculative version of kthread_data()
 170 * @task: possible kthread task in question
 171 *
 172 * @task could be a kthread task.  Return the data value specified when it
 173 * was created if accessible.  If @task isn't a kthread task or its data is
 174 * inaccessible for any reason, %NULL is returned.  This function requires
 175 * that @task itself is safe to dereference.
 176 */
 177void *kthread_probe_data(struct task_struct *task)
 178{
 179	struct kthread *kthread = to_kthread(task);
 180	void *data = NULL;
 181
 182	probe_kernel_read(&data, &kthread->data, sizeof(data));
 
 183	return data;
 184}
 185
 186static void __kthread_parkme(struct kthread *self)
 187{
 188	for (;;) {
 189		/*
 190		 * TASK_PARKED is a special state; we must serialize against
 191		 * possible pending wakeups to avoid store-store collisions on
 192		 * task->state.
 193		 *
 194		 * Such a collision might possibly result in the task state
 195		 * changin from TASK_PARKED and us failing the
 196		 * wait_task_inactive() in kthread_park().
 197		 */
 198		set_special_state(TASK_PARKED);
 199		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
 200			break;
 201
 
 
 
 
 
 
 202		complete(&self->parked);
 203		schedule();
 
 204	}
 205	__set_current_state(TASK_RUNNING);
 206}
 207
 208void kthread_parkme(void)
 209{
 210	__kthread_parkme(to_kthread(current));
 211}
 212EXPORT_SYMBOL_GPL(kthread_parkme);
 213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 214static int kthread(void *_create)
 215{
 
 216	/* Copy data: it's on kthread's stack */
 217	struct kthread_create_info *create = _create;
 218	int (*threadfn)(void *data) = create->threadfn;
 219	void *data = create->data;
 220	struct completion *done;
 221	struct kthread *self;
 222	int ret;
 223
 224	self = kzalloc(sizeof(*self), GFP_KERNEL);
 225	set_kthread_struct(self);
 226
 227	/* If user was SIGKILLed, I release the structure. */
 228	done = xchg(&create->done, NULL);
 229	if (!done) {
 
 230		kfree(create);
 231		do_exit(-EINTR);
 232	}
 233
 234	if (!self) {
 235		create->result = ERR_PTR(-ENOMEM);
 236		complete(done);
 237		do_exit(-ENOMEM);
 238	}
 239
 
 
 240	self->data = data;
 241	init_completion(&self->exited);
 242	init_completion(&self->parked);
 243	current->vfork_done = &self->exited;
 
 
 
 
 244
 245	/* OK, tell user we're spawned, wait for stop or wakeup */
 246	__set_current_state(TASK_UNINTERRUPTIBLE);
 247	create->result = current;
 
 
 
 
 
 248	complete(done);
 249	schedule();
 
 250
 251	ret = -EINTR;
 252	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
 253		cgroup_kthread_ready();
 254		__kthread_parkme(self);
 255		ret = threadfn(data);
 256	}
 257	do_exit(ret);
 258}
 259
 260/* called from do_fork() to get node information for about to be created task */
 261int tsk_fork_get_node(struct task_struct *tsk)
 262{
 263#ifdef CONFIG_NUMA
 264	if (tsk == kthreadd_task)
 265		return tsk->pref_node_fork;
 266#endif
 267	return NUMA_NO_NODE;
 268}
 269
 270static void create_kthread(struct kthread_create_info *create)
 271{
 272	int pid;
 273
 274#ifdef CONFIG_NUMA
 275	current->pref_node_fork = create->node;
 276#endif
 277	/* We want our own signal handler (we take no signals by default). */
 278	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
 
 279	if (pid < 0) {
 280		/* If user was SIGKILLed, I release the structure. */
 281		struct completion *done = xchg(&create->done, NULL);
 282
 
 283		if (!done) {
 284			kfree(create);
 285			return;
 286		}
 287		create->result = ERR_PTR(pid);
 288		complete(done);
 289	}
 290}
 291
 292static __printf(4, 0)
 293struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 294						    void *data, int node,
 295						    const char namefmt[],
 296						    va_list args)
 297{
 298	DECLARE_COMPLETION_ONSTACK(done);
 299	struct task_struct *task;
 300	struct kthread_create_info *create = kmalloc(sizeof(*create),
 301						     GFP_KERNEL);
 302
 303	if (!create)
 304		return ERR_PTR(-ENOMEM);
 305	create->threadfn = threadfn;
 306	create->data = data;
 307	create->node = node;
 308	create->done = &done;
 
 
 
 
 
 309
 310	spin_lock(&kthread_create_lock);
 311	list_add_tail(&create->list, &kthread_create_list);
 312	spin_unlock(&kthread_create_lock);
 313
 314	wake_up_process(kthreadd_task);
 315	/*
 316	 * Wait for completion in killable state, for I might be chosen by
 317	 * the OOM killer while kthreadd is trying to allocate memory for
 318	 * new kernel thread.
 319	 */
 320	if (unlikely(wait_for_completion_killable(&done))) {
 321		/*
 322		 * If I was SIGKILLed before kthreadd (or new kernel thread)
 323		 * calls complete(), leave the cleanup of this structure to
 324		 * that thread.
 325		 */
 326		if (xchg(&create->done, NULL))
 327			return ERR_PTR(-EINTR);
 328		/*
 329		 * kthreadd (or new kernel thread) will call complete()
 330		 * shortly.
 331		 */
 332		wait_for_completion(&done);
 333	}
 334	task = create->result;
 335	if (!IS_ERR(task)) {
 336		static const struct sched_param param = { .sched_priority = 0 };
 337		char name[TASK_COMM_LEN];
 338
 339		/*
 340		 * task is already visible to other tasks, so updating
 341		 * COMM must be protected.
 342		 */
 343		vsnprintf(name, sizeof(name), namefmt, args);
 344		set_task_comm(task, name);
 345		/*
 346		 * root may have changed our (kthreadd's) priority or CPU mask.
 347		 * The kernel thread should not inherit these properties.
 348		 */
 349		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
 350		set_cpus_allowed_ptr(task, cpu_all_mask);
 351	}
 352	kfree(create);
 353	return task;
 354}
 355
 356/**
 357 * kthread_create_on_node - create a kthread.
 358 * @threadfn: the function to run until signal_pending(current).
 359 * @data: data ptr for @threadfn.
 360 * @node: task and thread structures for the thread are allocated on this node
 361 * @namefmt: printf-style name for the thread.
 362 *
 363 * Description: This helper function creates and names a kernel
 364 * thread.  The thread will be stopped: use wake_up_process() to start
 365 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 366 * is affine to all CPUs.
 367 *
 368 * If thread is going to be bound on a particular cpu, give its node
 369 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 370 * When woken, the thread will run @threadfn() with @data as its
 371 * argument. @threadfn() can either call do_exit() directly if it is a
 372 * standalone thread for which no one will call kthread_stop(), or
 373 * return when 'kthread_should_stop()' is true (which means
 374 * kthread_stop() has been called).  The return value should be zero
 375 * or a negative error number; it will be passed to kthread_stop().
 376 *
 377 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 378 */
 379struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 380					   void *data, int node,
 381					   const char namefmt[],
 382					   ...)
 383{
 384	struct task_struct *task;
 385	va_list args;
 386
 387	va_start(args, namefmt);
 388	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
 389	va_end(args);
 390
 391	return task;
 392}
 393EXPORT_SYMBOL(kthread_create_on_node);
 394
 395static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
 396{
 397	unsigned long flags;
 398
 399	if (!wait_task_inactive(p, state)) {
 400		WARN_ON(1);
 401		return;
 402	}
 403
 404	/* It's safe because the task is inactive. */
 405	raw_spin_lock_irqsave(&p->pi_lock, flags);
 406	do_set_cpus_allowed(p, mask);
 407	p->flags |= PF_NO_SETAFFINITY;
 408	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 409}
 410
 411static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
 412{
 413	__kthread_bind_mask(p, cpumask_of(cpu), state);
 414}
 415
 416void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
 417{
 418	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 419}
 420
 421/**
 422 * kthread_bind - bind a just-created kthread to a cpu.
 423 * @p: thread created by kthread_create().
 424 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 425 *
 426 * Description: This function is equivalent to set_cpus_allowed(),
 427 * except that @cpu doesn't need to be online, and the thread must be
 428 * stopped (i.e., just returned from kthread_create()).
 429 */
 430void kthread_bind(struct task_struct *p, unsigned int cpu)
 431{
 432	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 433}
 434EXPORT_SYMBOL(kthread_bind);
 435
 436/**
 437 * kthread_create_on_cpu - Create a cpu bound kthread
 438 * @threadfn: the function to run until signal_pending(current).
 439 * @data: data ptr for @threadfn.
 440 * @cpu: The cpu on which the thread should be bound,
 441 * @namefmt: printf-style name for the thread. Format is restricted
 442 *	     to "name.*%u". Code fills in cpu number.
 443 *
 444 * Description: This helper function creates and names a kernel thread
 445 * The thread will be woken and put into park mode.
 446 */
 447struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 448					  void *data, unsigned int cpu,
 449					  const char *namefmt)
 450{
 451	struct task_struct *p;
 452
 453	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
 454				   cpu);
 455	if (IS_ERR(p))
 456		return p;
 457	kthread_bind(p, cpu);
 458	/* CPU hotplug need to bind once again when unparking the thread. */
 459	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
 460	to_kthread(p)->cpu = cpu;
 461	return p;
 462}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463
 464/**
 465 * kthread_unpark - unpark a thread created by kthread_create().
 466 * @k:		thread created by kthread_create().
 467 *
 468 * Sets kthread_should_park() for @k to return false, wakes it, and
 469 * waits for it to return. If the thread is marked percpu then its
 470 * bound to the cpu again.
 471 */
 472void kthread_unpark(struct task_struct *k)
 473{
 474	struct kthread *kthread = to_kthread(k);
 475
 476	/*
 477	 * Newly created kthread was parked when the CPU was offline.
 478	 * The binding was lost and we need to set it again.
 479	 */
 480	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
 481		__kthread_bind(k, kthread->cpu, TASK_PARKED);
 482
 483	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 484	/*
 485	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
 486	 */
 487	wake_up_state(k, TASK_PARKED);
 488}
 489EXPORT_SYMBOL_GPL(kthread_unpark);
 490
 491/**
 492 * kthread_park - park a thread created by kthread_create().
 493 * @k: thread created by kthread_create().
 494 *
 495 * Sets kthread_should_park() for @k to return true, wakes it, and
 496 * waits for it to return. This can also be called after kthread_create()
 497 * instead of calling wake_up_process(): the thread will park without
 498 * calling threadfn().
 499 *
 500 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 501 * If called by the kthread itself just the park bit is set.
 502 */
 503int kthread_park(struct task_struct *k)
 504{
 505	struct kthread *kthread = to_kthread(k);
 506
 507	if (WARN_ON(k->flags & PF_EXITING))
 508		return -ENOSYS;
 509
 510	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
 511		return -EBUSY;
 512
 513	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 514	if (k != current) {
 515		wake_up_process(k);
 516		/*
 517		 * Wait for __kthread_parkme() to complete(), this means we
 518		 * _will_ have TASK_PARKED and are about to call schedule().
 519		 */
 520		wait_for_completion(&kthread->parked);
 521		/*
 522		 * Now wait for that schedule() to complete and the task to
 523		 * get scheduled out.
 524		 */
 525		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
 526	}
 527
 528	return 0;
 529}
 530EXPORT_SYMBOL_GPL(kthread_park);
 531
 532/**
 533 * kthread_stop - stop a thread created by kthread_create().
 534 * @k: thread created by kthread_create().
 535 *
 536 * Sets kthread_should_stop() for @k to return true, wakes it, and
 537 * waits for it to exit. This can also be called after kthread_create()
 538 * instead of calling wake_up_process(): the thread will exit without
 539 * calling threadfn().
 540 *
 541 * If threadfn() may call do_exit() itself, the caller must ensure
 542 * task_struct can't go away.
 543 *
 544 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 545 * was never called.
 546 */
 547int kthread_stop(struct task_struct *k)
 548{
 549	struct kthread *kthread;
 550	int ret;
 551
 552	trace_sched_kthread_stop(k);
 553
 554	get_task_struct(k);
 555	kthread = to_kthread(k);
 556	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
 557	kthread_unpark(k);
 
 558	wake_up_process(k);
 559	wait_for_completion(&kthread->exited);
 560	ret = k->exit_code;
 561	put_task_struct(k);
 562
 563	trace_sched_kthread_stop_ret(ret);
 564	return ret;
 565}
 566EXPORT_SYMBOL(kthread_stop);
 567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 568int kthreadd(void *unused)
 569{
 570	struct task_struct *tsk = current;
 571
 572	/* Setup a clean context for our children to inherit. */
 573	set_task_comm(tsk, "kthreadd");
 574	ignore_signals(tsk);
 575	set_cpus_allowed_ptr(tsk, cpu_all_mask);
 576	set_mems_allowed(node_states[N_MEMORY]);
 577
 578	current->flags |= PF_NOFREEZE;
 579	cgroup_init_kthreadd();
 580
 581	for (;;) {
 582		set_current_state(TASK_INTERRUPTIBLE);
 583		if (list_empty(&kthread_create_list))
 584			schedule();
 585		__set_current_state(TASK_RUNNING);
 586
 587		spin_lock(&kthread_create_lock);
 588		while (!list_empty(&kthread_create_list)) {
 589			struct kthread_create_info *create;
 590
 591			create = list_entry(kthread_create_list.next,
 592					    struct kthread_create_info, list);
 593			list_del_init(&create->list);
 594			spin_unlock(&kthread_create_lock);
 595
 596			create_kthread(create);
 597
 598			spin_lock(&kthread_create_lock);
 599		}
 600		spin_unlock(&kthread_create_lock);
 601	}
 602
 603	return 0;
 604}
 605
 606void __kthread_init_worker(struct kthread_worker *worker,
 607				const char *name,
 608				struct lock_class_key *key)
 609{
 610	memset(worker, 0, sizeof(struct kthread_worker));
 611	raw_spin_lock_init(&worker->lock);
 612	lockdep_set_class_and_name(&worker->lock, key, name);
 613	INIT_LIST_HEAD(&worker->work_list);
 614	INIT_LIST_HEAD(&worker->delayed_work_list);
 615}
 616EXPORT_SYMBOL_GPL(__kthread_init_worker);
 617
 618/**
 619 * kthread_worker_fn - kthread function to process kthread_worker
 620 * @worker_ptr: pointer to initialized kthread_worker
 621 *
 622 * This function implements the main cycle of kthread worker. It processes
 623 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 624 * is empty.
 625 *
 626 * The works are not allowed to keep any locks, disable preemption or interrupts
 627 * when they finish. There is defined a safe point for freezing when one work
 628 * finishes and before a new one is started.
 629 *
 630 * Also the works must not be handled by more than one worker at the same time,
 631 * see also kthread_queue_work().
 632 */
 633int kthread_worker_fn(void *worker_ptr)
 634{
 635	struct kthread_worker *worker = worker_ptr;
 636	struct kthread_work *work;
 637
 638	/*
 639	 * FIXME: Update the check and remove the assignment when all kthread
 640	 * worker users are created using kthread_create_worker*() functions.
 641	 */
 642	WARN_ON(worker->task && worker->task != current);
 643	worker->task = current;
 644
 645	if (worker->flags & KTW_FREEZABLE)
 646		set_freezable();
 647
 648repeat:
 649	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
 650
 651	if (kthread_should_stop()) {
 652		__set_current_state(TASK_RUNNING);
 653		raw_spin_lock_irq(&worker->lock);
 654		worker->task = NULL;
 655		raw_spin_unlock_irq(&worker->lock);
 656		return 0;
 657	}
 658
 659	work = NULL;
 660	raw_spin_lock_irq(&worker->lock);
 661	if (!list_empty(&worker->work_list)) {
 662		work = list_first_entry(&worker->work_list,
 663					struct kthread_work, node);
 664		list_del_init(&work->node);
 665	}
 666	worker->current_work = work;
 667	raw_spin_unlock_irq(&worker->lock);
 668
 669	if (work) {
 
 670		__set_current_state(TASK_RUNNING);
 
 671		work->func(work);
 
 
 
 
 
 672	} else if (!freezing(current))
 673		schedule();
 674
 675	try_to_freeze();
 676	cond_resched();
 677	goto repeat;
 678}
 679EXPORT_SYMBOL_GPL(kthread_worker_fn);
 680
 681static __printf(3, 0) struct kthread_worker *
 682__kthread_create_worker(int cpu, unsigned int flags,
 683			const char namefmt[], va_list args)
 684{
 685	struct kthread_worker *worker;
 686	struct task_struct *task;
 687	int node = NUMA_NO_NODE;
 688
 689	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
 690	if (!worker)
 691		return ERR_PTR(-ENOMEM);
 692
 693	kthread_init_worker(worker);
 694
 695	if (cpu >= 0)
 696		node = cpu_to_node(cpu);
 697
 698	task = __kthread_create_on_node(kthread_worker_fn, worker,
 699						node, namefmt, args);
 700	if (IS_ERR(task))
 701		goto fail_task;
 702
 703	if (cpu >= 0)
 704		kthread_bind(task, cpu);
 705
 706	worker->flags = flags;
 707	worker->task = task;
 708	wake_up_process(task);
 709	return worker;
 710
 711fail_task:
 712	kfree(worker);
 713	return ERR_CAST(task);
 714}
 715
 716/**
 717 * kthread_create_worker - create a kthread worker
 718 * @flags: flags modifying the default behavior of the worker
 719 * @namefmt: printf-style name for the kthread worker (task).
 720 *
 721 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 722 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 723 * when the worker was SIGKILLed.
 724 */
 725struct kthread_worker *
 726kthread_create_worker(unsigned int flags, const char namefmt[], ...)
 727{
 728	struct kthread_worker *worker;
 729	va_list args;
 730
 731	va_start(args, namefmt);
 732	worker = __kthread_create_worker(-1, flags, namefmt, args);
 733	va_end(args);
 734
 735	return worker;
 736}
 737EXPORT_SYMBOL(kthread_create_worker);
 738
 739/**
 740 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 741 *	it to a given CPU and the associated NUMA node.
 742 * @cpu: CPU number
 743 * @flags: flags modifying the default behavior of the worker
 744 * @namefmt: printf-style name for the kthread worker (task).
 745 *
 746 * Use a valid CPU number if you want to bind the kthread worker
 747 * to the given CPU and the associated NUMA node.
 748 *
 749 * A good practice is to add the cpu number also into the worker name.
 750 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 751 *
 752 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 754 * when the worker was SIGKILLed.
 755 */
 756struct kthread_worker *
 757kthread_create_worker_on_cpu(int cpu, unsigned int flags,
 758			     const char namefmt[], ...)
 759{
 760	struct kthread_worker *worker;
 761	va_list args;
 762
 763	va_start(args, namefmt);
 764	worker = __kthread_create_worker(cpu, flags, namefmt, args);
 765	va_end(args);
 766
 767	return worker;
 768}
 769EXPORT_SYMBOL(kthread_create_worker_on_cpu);
 770
 771/*
 772 * Returns true when the work could not be queued at the moment.
 773 * It happens when it is already pending in a worker list
 774 * or when it is being cancelled.
 775 */
 776static inline bool queuing_blocked(struct kthread_worker *worker,
 777				   struct kthread_work *work)
 778{
 779	lockdep_assert_held(&worker->lock);
 780
 781	return !list_empty(&work->node) || work->canceling;
 782}
 783
 784static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
 785					     struct kthread_work *work)
 786{
 787	lockdep_assert_held(&worker->lock);
 788	WARN_ON_ONCE(!list_empty(&work->node));
 789	/* Do not use a work with >1 worker, see kthread_queue_work() */
 790	WARN_ON_ONCE(work->worker && work->worker != worker);
 791}
 792
 793/* insert @work before @pos in @worker */
 794static void kthread_insert_work(struct kthread_worker *worker,
 795				struct kthread_work *work,
 796				struct list_head *pos)
 797{
 798	kthread_insert_work_sanity_check(worker, work);
 799
 
 
 800	list_add_tail(&work->node, pos);
 801	work->worker = worker;
 802	if (!worker->current_work && likely(worker->task))
 803		wake_up_process(worker->task);
 804}
 805
 806/**
 807 * kthread_queue_work - queue a kthread_work
 808 * @worker: target kthread_worker
 809 * @work: kthread_work to queue
 810 *
 811 * Queue @work to work processor @task for async execution.  @task
 812 * must have been created with kthread_worker_create().  Returns %true
 813 * if @work was successfully queued, %false if it was already pending.
 814 *
 815 * Reinitialize the work if it needs to be used by another worker.
 816 * For example, when the worker was stopped and started again.
 817 */
 818bool kthread_queue_work(struct kthread_worker *worker,
 819			struct kthread_work *work)
 820{
 821	bool ret = false;
 822	unsigned long flags;
 823
 824	raw_spin_lock_irqsave(&worker->lock, flags);
 825	if (!queuing_blocked(worker, work)) {
 826		kthread_insert_work(worker, work, &worker->work_list);
 827		ret = true;
 828	}
 829	raw_spin_unlock_irqrestore(&worker->lock, flags);
 830	return ret;
 831}
 832EXPORT_SYMBOL_GPL(kthread_queue_work);
 833
 834/**
 835 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
 836 *	delayed work when the timer expires.
 837 * @t: pointer to the expired timer
 838 *
 839 * The format of the function is defined by struct timer_list.
 840 * It should have been called from irqsafe timer with irq already off.
 841 */
 842void kthread_delayed_work_timer_fn(struct timer_list *t)
 843{
 844	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
 845	struct kthread_work *work = &dwork->work;
 846	struct kthread_worker *worker = work->worker;
 847	unsigned long flags;
 848
 849	/*
 850	 * This might happen when a pending work is reinitialized.
 851	 * It means that it is used a wrong way.
 852	 */
 853	if (WARN_ON_ONCE(!worker))
 854		return;
 855
 856	raw_spin_lock_irqsave(&worker->lock, flags);
 857	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 858	WARN_ON_ONCE(work->worker != worker);
 859
 860	/* Move the work from worker->delayed_work_list. */
 861	WARN_ON_ONCE(list_empty(&work->node));
 862	list_del_init(&work->node);
 863	kthread_insert_work(worker, work, &worker->work_list);
 
 864
 865	raw_spin_unlock_irqrestore(&worker->lock, flags);
 866}
 867EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 868
 869static void __kthread_queue_delayed_work(struct kthread_worker *worker,
 870					 struct kthread_delayed_work *dwork,
 871					 unsigned long delay)
 872{
 873	struct timer_list *timer = &dwork->timer;
 874	struct kthread_work *work = &dwork->work;
 875
 876	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
 877
 878	/*
 879	 * If @delay is 0, queue @dwork->work immediately.  This is for
 880	 * both optimization and correctness.  The earliest @timer can
 881	 * expire is on the closest next tick and delayed_work users depend
 882	 * on that there's no such delay when @delay is 0.
 883	 */
 884	if (!delay) {
 885		kthread_insert_work(worker, work, &worker->work_list);
 886		return;
 887	}
 888
 889	/* Be paranoid and try to detect possible races already now. */
 890	kthread_insert_work_sanity_check(worker, work);
 891
 892	list_add(&work->node, &worker->delayed_work_list);
 893	work->worker = worker;
 894	timer->expires = jiffies + delay;
 895	add_timer(timer);
 896}
 897
 898/**
 899 * kthread_queue_delayed_work - queue the associated kthread work
 900 *	after a delay.
 901 * @worker: target kthread_worker
 902 * @dwork: kthread_delayed_work to queue
 903 * @delay: number of jiffies to wait before queuing
 904 *
 905 * If the work has not been pending it starts a timer that will queue
 906 * the work after the given @delay. If @delay is zero, it queues the
 907 * work immediately.
 908 *
 909 * Return: %false if the @work has already been pending. It means that
 910 * either the timer was running or the work was queued. It returns %true
 911 * otherwise.
 912 */
 913bool kthread_queue_delayed_work(struct kthread_worker *worker,
 914				struct kthread_delayed_work *dwork,
 915				unsigned long delay)
 916{
 917	struct kthread_work *work = &dwork->work;
 918	unsigned long flags;
 919	bool ret = false;
 920
 921	raw_spin_lock_irqsave(&worker->lock, flags);
 922
 923	if (!queuing_blocked(worker, work)) {
 924		__kthread_queue_delayed_work(worker, dwork, delay);
 925		ret = true;
 926	}
 927
 928	raw_spin_unlock_irqrestore(&worker->lock, flags);
 929	return ret;
 930}
 931EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
 932
 933struct kthread_flush_work {
 934	struct kthread_work	work;
 935	struct completion	done;
 936};
 937
 938static void kthread_flush_work_fn(struct kthread_work *work)
 939{
 940	struct kthread_flush_work *fwork =
 941		container_of(work, struct kthread_flush_work, work);
 942	complete(&fwork->done);
 943}
 944
 945/**
 946 * kthread_flush_work - flush a kthread_work
 947 * @work: work to flush
 948 *
 949 * If @work is queued or executing, wait for it to finish execution.
 950 */
 951void kthread_flush_work(struct kthread_work *work)
 952{
 953	struct kthread_flush_work fwork = {
 954		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
 955		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
 956	};
 957	struct kthread_worker *worker;
 958	bool noop = false;
 959
 960	worker = work->worker;
 961	if (!worker)
 962		return;
 963
 964	raw_spin_lock_irq(&worker->lock);
 965	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 966	WARN_ON_ONCE(work->worker != worker);
 967
 968	if (!list_empty(&work->node))
 969		kthread_insert_work(worker, &fwork.work, work->node.next);
 970	else if (worker->current_work == work)
 971		kthread_insert_work(worker, &fwork.work,
 972				    worker->work_list.next);
 973	else
 974		noop = true;
 975
 976	raw_spin_unlock_irq(&worker->lock);
 977
 978	if (!noop)
 979		wait_for_completion(&fwork.done);
 980}
 981EXPORT_SYMBOL_GPL(kthread_flush_work);
 982
 983/*
 984 * This function removes the work from the worker queue. Also it makes sure
 985 * that it won't get queued later via the delayed work's timer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 986 *
 987 * The work might still be in use when this function finishes. See the
 988 * current_work proceed by the worker.
 989 *
 990 * Return: %true if @work was pending and successfully canceled,
 991 *	%false if @work was not pending
 992 */
 993static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
 994				  unsigned long *flags)
 995{
 996	/* Try to cancel the timer if exists. */
 997	if (is_dwork) {
 998		struct kthread_delayed_work *dwork =
 999			container_of(work, struct kthread_delayed_work, work);
1000		struct kthread_worker *worker = work->worker;
1001
1002		/*
1003		 * del_timer_sync() must be called to make sure that the timer
1004		 * callback is not running. The lock must be temporary released
1005		 * to avoid a deadlock with the callback. In the meantime,
1006		 * any queuing is blocked by setting the canceling counter.
1007		 */
1008		work->canceling++;
1009		raw_spin_unlock_irqrestore(&worker->lock, *flags);
1010		del_timer_sync(&dwork->timer);
1011		raw_spin_lock_irqsave(&worker->lock, *flags);
1012		work->canceling--;
1013	}
1014
1015	/*
1016	 * Try to remove the work from a worker list. It might either
1017	 * be from worker->work_list or from worker->delayed_work_list.
1018	 */
1019	if (!list_empty(&work->node)) {
1020		list_del_init(&work->node);
1021		return true;
1022	}
1023
1024	return false;
1025}
1026
1027/**
1028 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1029 * @worker: kthread worker to use
1030 * @dwork: kthread delayed work to queue
1031 * @delay: number of jiffies to wait before queuing
1032 *
1033 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1034 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1035 * @work is guaranteed to be queued immediately.
1036 *
1037 * Return: %true if @dwork was pending and its timer was modified,
1038 * %false otherwise.
1039 *
1040 * A special case is when the work is being canceled in parallel.
1041 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1042 * or yet another kthread_mod_delayed_work() call. We let the other command
1043 * win and return %false here. The caller is supposed to synchronize these
1044 * operations a reasonable way.
 
1045 *
1046 * This function is safe to call from any context including IRQ handler.
1047 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1048 * for details.
1049 */
1050bool kthread_mod_delayed_work(struct kthread_worker *worker,
1051			      struct kthread_delayed_work *dwork,
1052			      unsigned long delay)
1053{
1054	struct kthread_work *work = &dwork->work;
1055	unsigned long flags;
1056	int ret = false;
1057
1058	raw_spin_lock_irqsave(&worker->lock, flags);
1059
1060	/* Do not bother with canceling when never queued. */
1061	if (!work->worker)
 
1062		goto fast_queue;
 
1063
1064	/* Work must not be used with >1 worker, see kthread_queue_work() */
1065	WARN_ON_ONCE(work->worker != worker);
1066
1067	/* Do not fight with another command that is canceling this work. */
1068	if (work->canceling)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1069		goto out;
 
 
1070
1071	ret = __kthread_cancel_work(work, true, &flags);
1072fast_queue:
1073	__kthread_queue_delayed_work(worker, dwork, delay);
1074out:
1075	raw_spin_unlock_irqrestore(&worker->lock, flags);
1076	return ret;
1077}
1078EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1079
1080static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1081{
1082	struct kthread_worker *worker = work->worker;
1083	unsigned long flags;
1084	int ret = false;
1085
1086	if (!worker)
1087		goto out;
1088
1089	raw_spin_lock_irqsave(&worker->lock, flags);
1090	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1091	WARN_ON_ONCE(work->worker != worker);
1092
1093	ret = __kthread_cancel_work(work, is_dwork, &flags);
 
 
 
1094
1095	if (worker->current_work != work)
1096		goto out_fast;
1097
1098	/*
1099	 * The work is in progress and we need to wait with the lock released.
1100	 * In the meantime, block any queuing by setting the canceling counter.
1101	 */
1102	work->canceling++;
1103	raw_spin_unlock_irqrestore(&worker->lock, flags);
1104	kthread_flush_work(work);
1105	raw_spin_lock_irqsave(&worker->lock, flags);
1106	work->canceling--;
1107
1108out_fast:
1109	raw_spin_unlock_irqrestore(&worker->lock, flags);
1110out:
1111	return ret;
1112}
1113
1114/**
1115 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1116 * @work: the kthread work to cancel
1117 *
1118 * Cancel @work and wait for its execution to finish.  This function
1119 * can be used even if the work re-queues itself. On return from this
1120 * function, @work is guaranteed to be not pending or executing on any CPU.
1121 *
1122 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1123 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1124 *
1125 * The caller must ensure that the worker on which @work was last
1126 * queued can't be destroyed before this function returns.
1127 *
1128 * Return: %true if @work was pending, %false otherwise.
1129 */
1130bool kthread_cancel_work_sync(struct kthread_work *work)
1131{
1132	return __kthread_cancel_work_sync(work, false);
1133}
1134EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1135
1136/**
1137 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1138 *	wait for it to finish.
1139 * @dwork: the kthread delayed work to cancel
1140 *
1141 * This is kthread_cancel_work_sync() for delayed works.
1142 *
1143 * Return: %true if @dwork was pending, %false otherwise.
1144 */
1145bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1146{
1147	return __kthread_cancel_work_sync(&dwork->work, true);
1148}
1149EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1150
1151/**
1152 * kthread_flush_worker - flush all current works on a kthread_worker
1153 * @worker: worker to flush
1154 *
1155 * Wait until all currently executing or pending works on @worker are
1156 * finished.
1157 */
1158void kthread_flush_worker(struct kthread_worker *worker)
1159{
1160	struct kthread_flush_work fwork = {
1161		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1162		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1163	};
1164
1165	kthread_queue_work(worker, &fwork.work);
1166	wait_for_completion(&fwork.done);
1167}
1168EXPORT_SYMBOL_GPL(kthread_flush_worker);
1169
1170/**
1171 * kthread_destroy_worker - destroy a kthread worker
1172 * @worker: worker to be destroyed
1173 *
1174 * Flush and destroy @worker.  The simple flush is enough because the kthread
1175 * worker API is used only in trivial scenarios.  There are no multi-step state
1176 * machines needed.
 
 
 
 
1177 */
1178void kthread_destroy_worker(struct kthread_worker *worker)
1179{
1180	struct task_struct *task;
1181
1182	task = worker->task;
1183	if (WARN_ON(!task))
1184		return;
1185
1186	kthread_flush_worker(worker);
1187	kthread_stop(task);
 
1188	WARN_ON(!list_empty(&worker->work_list));
1189	kfree(worker);
1190}
1191EXPORT_SYMBOL(kthread_destroy_worker);
1192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193#ifdef CONFIG_BLK_CGROUP
1194/**
1195 * kthread_associate_blkcg - associate blkcg to current kthread
1196 * @css: the cgroup info
1197 *
1198 * Current thread must be a kthread. The thread is running jobs on behalf of
1199 * other threads. In some cases, we expect the jobs attach cgroup info of
1200 * original threads instead of that of current thread. This function stores
1201 * original thread's cgroup info in current kthread context for later
1202 * retrieval.
1203 */
1204void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1205{
1206	struct kthread *kthread;
1207
1208	if (!(current->flags & PF_KTHREAD))
1209		return;
1210	kthread = to_kthread(current);
1211	if (!kthread)
1212		return;
1213
1214	if (kthread->blkcg_css) {
1215		css_put(kthread->blkcg_css);
1216		kthread->blkcg_css = NULL;
1217	}
1218	if (css) {
1219		css_get(css);
1220		kthread->blkcg_css = css;
1221	}
1222}
1223EXPORT_SYMBOL(kthread_associate_blkcg);
1224
1225/**
1226 * kthread_blkcg - get associated blkcg css of current kthread
1227 *
1228 * Current thread must be a kthread.
1229 */
1230struct cgroup_subsys_state *kthread_blkcg(void)
1231{
1232	struct kthread *kthread;
1233
1234	if (current->flags & PF_KTHREAD) {
1235		kthread = to_kthread(current);
1236		if (kthread)
1237			return kthread->blkcg_css;
1238	}
1239	return NULL;
1240}
1241EXPORT_SYMBOL(kthread_blkcg);
1242#endif
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Kernel thread helper functions.
   3 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
   4 *   Copyright (C) 2009 Red Hat, Inc.
   5 *
   6 * Creation is done via kthreadd, so that we get a clean environment
   7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
   8 * etc.).
   9 */
  10#include <uapi/linux/sched/types.h>
  11#include <linux/mm.h>
  12#include <linux/mmu_context.h>
  13#include <linux/sched.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/task.h>
  16#include <linux/kthread.h>
  17#include <linux/completion.h>
  18#include <linux/err.h>
  19#include <linux/cgroup.h>
  20#include <linux/cpuset.h>
  21#include <linux/unistd.h>
  22#include <linux/file.h>
  23#include <linux/export.h>
  24#include <linux/mutex.h>
  25#include <linux/slab.h>
  26#include <linux/freezer.h>
  27#include <linux/ptrace.h>
  28#include <linux/uaccess.h>
  29#include <linux/numa.h>
  30#include <linux/sched/isolation.h>
  31#include <trace/events/sched.h>
  32
  33
  34static DEFINE_SPINLOCK(kthread_create_lock);
  35static LIST_HEAD(kthread_create_list);
  36struct task_struct *kthreadd_task;
  37
  38struct kthread_create_info
  39{
  40	/* Information passed to kthread() from kthreadd. */
  41	char *full_name;
  42	int (*threadfn)(void *data);
  43	void *data;
  44	int node;
  45
  46	/* Result passed back to kthread_create() from kthreadd. */
  47	struct task_struct *result;
  48	struct completion *done;
  49
  50	struct list_head list;
  51};
  52
  53struct kthread {
  54	unsigned long flags;
  55	unsigned int cpu;
  56	int result;
  57	int (*threadfn)(void *);
  58	void *data;
  59	struct completion parked;
  60	struct completion exited;
  61#ifdef CONFIG_BLK_CGROUP
  62	struct cgroup_subsys_state *blkcg_css;
  63#endif
  64	/* To store the full name if task comm is truncated. */
  65	char *full_name;
  66};
  67
  68enum KTHREAD_BITS {
  69	KTHREAD_IS_PER_CPU = 0,
  70	KTHREAD_SHOULD_STOP,
  71	KTHREAD_SHOULD_PARK,
  72};
  73
  74static inline struct kthread *to_kthread(struct task_struct *k)
  75{
  76	WARN_ON(!(k->flags & PF_KTHREAD));
  77	return k->worker_private;
 
 
 
 
  78}
  79
  80/*
  81 * Variant of to_kthread() that doesn't assume @p is a kthread.
  82 *
  83 * Per construction; when:
  84 *
  85 *   (p->flags & PF_KTHREAD) && p->worker_private
  86 *
  87 * the task is both a kthread and struct kthread is persistent. However
  88 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
  89 * begin_new_exec()).
  90 */
  91static inline struct kthread *__to_kthread(struct task_struct *p)
  92{
  93	void *kthread = p->worker_private;
  94	if (kthread && !(p->flags & PF_KTHREAD))
  95		kthread = NULL;
  96	return kthread;
  97}
  98
  99void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
 100{
 101	struct kthread *kthread = to_kthread(tsk);
 102
 103	if (!kthread || !kthread->full_name) {
 104		__get_task_comm(buf, buf_size, tsk);
 105		return;
 106	}
 107
 108	strscpy_pad(buf, kthread->full_name, buf_size);
 109}
 110
 111bool set_kthread_struct(struct task_struct *p)
 112{
 113	struct kthread *kthread;
 114
 115	if (WARN_ON_ONCE(to_kthread(p)))
 116		return false;
 117
 118	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
 119	if (!kthread)
 120		return false;
 121
 122	init_completion(&kthread->exited);
 123	init_completion(&kthread->parked);
 124	p->vfork_done = &kthread->exited;
 125
 126	p->worker_private = kthread;
 127	return true;
 128}
 129
 130void free_kthread_struct(struct task_struct *k)
 131{
 132	struct kthread *kthread;
 133
 134	/*
 135	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
 
 136	 */
 137	kthread = to_kthread(k);
 138	if (!kthread)
 139		return;
 140
 141#ifdef CONFIG_BLK_CGROUP
 142	WARN_ON_ONCE(kthread->blkcg_css);
 143#endif
 144	k->worker_private = NULL;
 145	kfree(kthread->full_name);
 146	kfree(kthread);
 147}
 148
 149/**
 150 * kthread_should_stop - should this kthread return now?
 151 *
 152 * When someone calls kthread_stop() on your kthread, it will be woken
 153 * and this will return true.  You should then return, and your return
 154 * value will be passed through to kthread_stop().
 155 */
 156bool kthread_should_stop(void)
 157{
 158	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
 159}
 160EXPORT_SYMBOL(kthread_should_stop);
 161
 162static bool __kthread_should_park(struct task_struct *k)
 163{
 164	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
 165}
 
 166
 167/**
 168 * kthread_should_park - should this kthread park now?
 169 *
 170 * When someone calls kthread_park() on your kthread, it will be woken
 171 * and this will return true.  You should then do the necessary
 172 * cleanup and call kthread_parkme()
 173 *
 174 * Similar to kthread_should_stop(), but this keeps the thread alive
 175 * and in a park position. kthread_unpark() "restarts" the thread and
 176 * calls the thread function again.
 177 */
 178bool kthread_should_park(void)
 179{
 180	return __kthread_should_park(current);
 181}
 182EXPORT_SYMBOL_GPL(kthread_should_park);
 183
 184bool kthread_should_stop_or_park(void)
 185{
 186	struct kthread *kthread = __to_kthread(current);
 187
 188	if (!kthread)
 189		return false;
 190
 191	return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
 192}
 193
 194/**
 195 * kthread_freezable_should_stop - should this freezable kthread return now?
 196 * @was_frozen: optional out parameter, indicates whether %current was frozen
 197 *
 198 * kthread_should_stop() for freezable kthreads, which will enter
 199 * refrigerator if necessary.  This function is safe from kthread_stop() /
 200 * freezer deadlock and freezable kthreads should use this function instead
 201 * of calling try_to_freeze() directly.
 202 */
 203bool kthread_freezable_should_stop(bool *was_frozen)
 204{
 205	bool frozen = false;
 206
 207	might_sleep();
 208
 209	if (unlikely(freezing(current)))
 210		frozen = __refrigerator(true);
 211
 212	if (was_frozen)
 213		*was_frozen = frozen;
 214
 215	return kthread_should_stop();
 216}
 217EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
 218
 219/**
 220 * kthread_func - return the function specified on kthread creation
 221 * @task: kthread task in question
 222 *
 223 * Returns NULL if the task is not a kthread.
 224 */
 225void *kthread_func(struct task_struct *task)
 226{
 227	struct kthread *kthread = __to_kthread(task);
 228	if (kthread)
 229		return kthread->threadfn;
 230	return NULL;
 231}
 232EXPORT_SYMBOL_GPL(kthread_func);
 233
 234/**
 235 * kthread_data - return data value specified on kthread creation
 236 * @task: kthread task in question
 237 *
 238 * Return the data value specified when kthread @task was created.
 239 * The caller is responsible for ensuring the validity of @task when
 240 * calling this function.
 241 */
 242void *kthread_data(struct task_struct *task)
 243{
 244	return to_kthread(task)->data;
 245}
 246EXPORT_SYMBOL_GPL(kthread_data);
 247
 248/**
 249 * kthread_probe_data - speculative version of kthread_data()
 250 * @task: possible kthread task in question
 251 *
 252 * @task could be a kthread task.  Return the data value specified when it
 253 * was created if accessible.  If @task isn't a kthread task or its data is
 254 * inaccessible for any reason, %NULL is returned.  This function requires
 255 * that @task itself is safe to dereference.
 256 */
 257void *kthread_probe_data(struct task_struct *task)
 258{
 259	struct kthread *kthread = __to_kthread(task);
 260	void *data = NULL;
 261
 262	if (kthread)
 263		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
 264	return data;
 265}
 266
 267static void __kthread_parkme(struct kthread *self)
 268{
 269	for (;;) {
 270		/*
 271		 * TASK_PARKED is a special state; we must serialize against
 272		 * possible pending wakeups to avoid store-store collisions on
 273		 * task->state.
 274		 *
 275		 * Such a collision might possibly result in the task state
 276		 * changin from TASK_PARKED and us failing the
 277		 * wait_task_inactive() in kthread_park().
 278		 */
 279		set_special_state(TASK_PARKED);
 280		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
 281			break;
 282
 283		/*
 284		 * Thread is going to call schedule(), do not preempt it,
 285		 * or the caller of kthread_park() may spend more time in
 286		 * wait_task_inactive().
 287		 */
 288		preempt_disable();
 289		complete(&self->parked);
 290		schedule_preempt_disabled();
 291		preempt_enable();
 292	}
 293	__set_current_state(TASK_RUNNING);
 294}
 295
 296void kthread_parkme(void)
 297{
 298	__kthread_parkme(to_kthread(current));
 299}
 300EXPORT_SYMBOL_GPL(kthread_parkme);
 301
 302/**
 303 * kthread_exit - Cause the current kthread return @result to kthread_stop().
 304 * @result: The integer value to return to kthread_stop().
 305 *
 306 * While kthread_exit can be called directly, it exists so that
 307 * functions which do some additional work in non-modular code such as
 308 * module_put_and_kthread_exit can be implemented.
 309 *
 310 * Does not return.
 311 */
 312void __noreturn kthread_exit(long result)
 313{
 314	struct kthread *kthread = to_kthread(current);
 315	kthread->result = result;
 316	do_exit(0);
 317}
 318
 319/**
 320 * kthread_complete_and_exit - Exit the current kthread.
 321 * @comp: Completion to complete
 322 * @code: The integer value to return to kthread_stop().
 323 *
 324 * If present, complete @comp and then return code to kthread_stop().
 325 *
 326 * A kernel thread whose module may be removed after the completion of
 327 * @comp can use this function to exit safely.
 328 *
 329 * Does not return.
 330 */
 331void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
 332{
 333	if (comp)
 334		complete(comp);
 335
 336	kthread_exit(code);
 337}
 338EXPORT_SYMBOL(kthread_complete_and_exit);
 339
 340static int kthread(void *_create)
 341{
 342	static const struct sched_param param = { .sched_priority = 0 };
 343	/* Copy data: it's on kthread's stack */
 344	struct kthread_create_info *create = _create;
 345	int (*threadfn)(void *data) = create->threadfn;
 346	void *data = create->data;
 347	struct completion *done;
 348	struct kthread *self;
 349	int ret;
 350
 351	self = to_kthread(current);
 
 352
 353	/* Release the structure when caller killed by a fatal signal. */
 354	done = xchg(&create->done, NULL);
 355	if (!done) {
 356		kfree(create->full_name);
 357		kfree(create);
 358		kthread_exit(-EINTR);
 
 
 
 
 
 
 359	}
 360
 361	self->full_name = create->full_name;
 362	self->threadfn = threadfn;
 363	self->data = data;
 364
 365	/*
 366	 * The new thread inherited kthreadd's priority and CPU mask. Reset
 367	 * back to default in case they have been changed.
 368	 */
 369	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
 370	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
 371
 372	/* OK, tell user we're spawned, wait for stop or wakeup */
 373	__set_current_state(TASK_UNINTERRUPTIBLE);
 374	create->result = current;
 375	/*
 376	 * Thread is going to call schedule(), do not preempt it,
 377	 * or the creator may spend more time in wait_task_inactive().
 378	 */
 379	preempt_disable();
 380	complete(done);
 381	schedule_preempt_disabled();
 382	preempt_enable();
 383
 384	ret = -EINTR;
 385	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
 386		cgroup_kthread_ready();
 387		__kthread_parkme(self);
 388		ret = threadfn(data);
 389	}
 390	kthread_exit(ret);
 391}
 392
 393/* called from kernel_clone() to get node information for about to be created task */
 394int tsk_fork_get_node(struct task_struct *tsk)
 395{
 396#ifdef CONFIG_NUMA
 397	if (tsk == kthreadd_task)
 398		return tsk->pref_node_fork;
 399#endif
 400	return NUMA_NO_NODE;
 401}
 402
 403static void create_kthread(struct kthread_create_info *create)
 404{
 405	int pid;
 406
 407#ifdef CONFIG_NUMA
 408	current->pref_node_fork = create->node;
 409#endif
 410	/* We want our own signal handler (we take no signals by default). */
 411	pid = kernel_thread(kthread, create, create->full_name,
 412			    CLONE_FS | CLONE_FILES | SIGCHLD);
 413	if (pid < 0) {
 414		/* Release the structure when caller killed by a fatal signal. */
 415		struct completion *done = xchg(&create->done, NULL);
 416
 417		kfree(create->full_name);
 418		if (!done) {
 419			kfree(create);
 420			return;
 421		}
 422		create->result = ERR_PTR(pid);
 423		complete(done);
 424	}
 425}
 426
 427static __printf(4, 0)
 428struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 429						    void *data, int node,
 430						    const char namefmt[],
 431						    va_list args)
 432{
 433	DECLARE_COMPLETION_ONSTACK(done);
 434	struct task_struct *task;
 435	struct kthread_create_info *create = kmalloc(sizeof(*create),
 436						     GFP_KERNEL);
 437
 438	if (!create)
 439		return ERR_PTR(-ENOMEM);
 440	create->threadfn = threadfn;
 441	create->data = data;
 442	create->node = node;
 443	create->done = &done;
 444	create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
 445	if (!create->full_name) {
 446		task = ERR_PTR(-ENOMEM);
 447		goto free_create;
 448	}
 449
 450	spin_lock(&kthread_create_lock);
 451	list_add_tail(&create->list, &kthread_create_list);
 452	spin_unlock(&kthread_create_lock);
 453
 454	wake_up_process(kthreadd_task);
 455	/*
 456	 * Wait for completion in killable state, for I might be chosen by
 457	 * the OOM killer while kthreadd is trying to allocate memory for
 458	 * new kernel thread.
 459	 */
 460	if (unlikely(wait_for_completion_killable(&done))) {
 461		/*
 462		 * If I was killed by a fatal signal before kthreadd (or new
 463		 * kernel thread) calls complete(), leave the cleanup of this
 464		 * structure to that thread.
 465		 */
 466		if (xchg(&create->done, NULL))
 467			return ERR_PTR(-EINTR);
 468		/*
 469		 * kthreadd (or new kernel thread) will call complete()
 470		 * shortly.
 471		 */
 472		wait_for_completion(&done);
 473	}
 474	task = create->result;
 475free_create:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476	kfree(create);
 477	return task;
 478}
 479
 480/**
 481 * kthread_create_on_node - create a kthread.
 482 * @threadfn: the function to run until signal_pending(current).
 483 * @data: data ptr for @threadfn.
 484 * @node: task and thread structures for the thread are allocated on this node
 485 * @namefmt: printf-style name for the thread.
 486 *
 487 * Description: This helper function creates and names a kernel
 488 * thread.  The thread will be stopped: use wake_up_process() to start
 489 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 490 * is affine to all CPUs.
 491 *
 492 * If thread is going to be bound on a particular cpu, give its node
 493 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 494 * When woken, the thread will run @threadfn() with @data as its
 495 * argument. @threadfn() can either return directly if it is a
 496 * standalone thread for which no one will call kthread_stop(), or
 497 * return when 'kthread_should_stop()' is true (which means
 498 * kthread_stop() has been called).  The return value should be zero
 499 * or a negative error number; it will be passed to kthread_stop().
 500 *
 501 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 502 */
 503struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 504					   void *data, int node,
 505					   const char namefmt[],
 506					   ...)
 507{
 508	struct task_struct *task;
 509	va_list args;
 510
 511	va_start(args, namefmt);
 512	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
 513	va_end(args);
 514
 515	return task;
 516}
 517EXPORT_SYMBOL(kthread_create_on_node);
 518
 519static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
 520{
 521	unsigned long flags;
 522
 523	if (!wait_task_inactive(p, state)) {
 524		WARN_ON(1);
 525		return;
 526	}
 527
 528	/* It's safe because the task is inactive. */
 529	raw_spin_lock_irqsave(&p->pi_lock, flags);
 530	do_set_cpus_allowed(p, mask);
 531	p->flags |= PF_NO_SETAFFINITY;
 532	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 533}
 534
 535static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
 536{
 537	__kthread_bind_mask(p, cpumask_of(cpu), state);
 538}
 539
 540void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
 541{
 542	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 543}
 544
 545/**
 546 * kthread_bind - bind a just-created kthread to a cpu.
 547 * @p: thread created by kthread_create().
 548 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 549 *
 550 * Description: This function is equivalent to set_cpus_allowed(),
 551 * except that @cpu doesn't need to be online, and the thread must be
 552 * stopped (i.e., just returned from kthread_create()).
 553 */
 554void kthread_bind(struct task_struct *p, unsigned int cpu)
 555{
 556	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 557}
 558EXPORT_SYMBOL(kthread_bind);
 559
 560/**
 561 * kthread_create_on_cpu - Create a cpu bound kthread
 562 * @threadfn: the function to run until signal_pending(current).
 563 * @data: data ptr for @threadfn.
 564 * @cpu: The cpu on which the thread should be bound,
 565 * @namefmt: printf-style name for the thread. Format is restricted
 566 *	     to "name.*%u". Code fills in cpu number.
 567 *
 568 * Description: This helper function creates and names a kernel thread
 
 569 */
 570struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 571					  void *data, unsigned int cpu,
 572					  const char *namefmt)
 573{
 574	struct task_struct *p;
 575
 576	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
 577				   cpu);
 578	if (IS_ERR(p))
 579		return p;
 580	kthread_bind(p, cpu);
 581	/* CPU hotplug need to bind once again when unparking the thread. */
 
 582	to_kthread(p)->cpu = cpu;
 583	return p;
 584}
 585EXPORT_SYMBOL(kthread_create_on_cpu);
 586
 587void kthread_set_per_cpu(struct task_struct *k, int cpu)
 588{
 589	struct kthread *kthread = to_kthread(k);
 590	if (!kthread)
 591		return;
 592
 593	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
 594
 595	if (cpu < 0) {
 596		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 597		return;
 598	}
 599
 600	kthread->cpu = cpu;
 601	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 602}
 603
 604bool kthread_is_per_cpu(struct task_struct *p)
 605{
 606	struct kthread *kthread = __to_kthread(p);
 607	if (!kthread)
 608		return false;
 609
 610	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 611}
 612
 613/**
 614 * kthread_unpark - unpark a thread created by kthread_create().
 615 * @k:		thread created by kthread_create().
 616 *
 617 * Sets kthread_should_park() for @k to return false, wakes it, and
 618 * waits for it to return. If the thread is marked percpu then its
 619 * bound to the cpu again.
 620 */
 621void kthread_unpark(struct task_struct *k)
 622{
 623	struct kthread *kthread = to_kthread(k);
 624
 625	/*
 626	 * Newly created kthread was parked when the CPU was offline.
 627	 * The binding was lost and we need to set it again.
 628	 */
 629	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
 630		__kthread_bind(k, kthread->cpu, TASK_PARKED);
 631
 632	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 633	/*
 634	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
 635	 */
 636	wake_up_state(k, TASK_PARKED);
 637}
 638EXPORT_SYMBOL_GPL(kthread_unpark);
 639
 640/**
 641 * kthread_park - park a thread created by kthread_create().
 642 * @k: thread created by kthread_create().
 643 *
 644 * Sets kthread_should_park() for @k to return true, wakes it, and
 645 * waits for it to return. This can also be called after kthread_create()
 646 * instead of calling wake_up_process(): the thread will park without
 647 * calling threadfn().
 648 *
 649 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 650 * If called by the kthread itself just the park bit is set.
 651 */
 652int kthread_park(struct task_struct *k)
 653{
 654	struct kthread *kthread = to_kthread(k);
 655
 656	if (WARN_ON(k->flags & PF_EXITING))
 657		return -ENOSYS;
 658
 659	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
 660		return -EBUSY;
 661
 662	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 663	if (k != current) {
 664		wake_up_process(k);
 665		/*
 666		 * Wait for __kthread_parkme() to complete(), this means we
 667		 * _will_ have TASK_PARKED and are about to call schedule().
 668		 */
 669		wait_for_completion(&kthread->parked);
 670		/*
 671		 * Now wait for that schedule() to complete and the task to
 672		 * get scheduled out.
 673		 */
 674		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
 675	}
 676
 677	return 0;
 678}
 679EXPORT_SYMBOL_GPL(kthread_park);
 680
 681/**
 682 * kthread_stop - stop a thread created by kthread_create().
 683 * @k: thread created by kthread_create().
 684 *
 685 * Sets kthread_should_stop() for @k to return true, wakes it, and
 686 * waits for it to exit. This can also be called after kthread_create()
 687 * instead of calling wake_up_process(): the thread will exit without
 688 * calling threadfn().
 689 *
 690 * If threadfn() may call kthread_exit() itself, the caller must ensure
 691 * task_struct can't go away.
 692 *
 693 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 694 * was never called.
 695 */
 696int kthread_stop(struct task_struct *k)
 697{
 698	struct kthread *kthread;
 699	int ret;
 700
 701	trace_sched_kthread_stop(k);
 702
 703	get_task_struct(k);
 704	kthread = to_kthread(k);
 705	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
 706	kthread_unpark(k);
 707	set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
 708	wake_up_process(k);
 709	wait_for_completion(&kthread->exited);
 710	ret = kthread->result;
 711	put_task_struct(k);
 712
 713	trace_sched_kthread_stop_ret(ret);
 714	return ret;
 715}
 716EXPORT_SYMBOL(kthread_stop);
 717
 718/**
 719 * kthread_stop_put - stop a thread and put its task struct
 720 * @k: thread created by kthread_create().
 721 *
 722 * Stops a thread created by kthread_create() and put its task_struct.
 723 * Only use when holding an extra task struct reference obtained by
 724 * calling get_task_struct().
 725 */
 726int kthread_stop_put(struct task_struct *k)
 727{
 728	int ret;
 729
 730	ret = kthread_stop(k);
 731	put_task_struct(k);
 732	return ret;
 733}
 734EXPORT_SYMBOL(kthread_stop_put);
 735
 736int kthreadd(void *unused)
 737{
 738	struct task_struct *tsk = current;
 739
 740	/* Setup a clean context for our children to inherit. */
 741	set_task_comm(tsk, "kthreadd");
 742	ignore_signals(tsk);
 743	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
 744	set_mems_allowed(node_states[N_MEMORY]);
 745
 746	current->flags |= PF_NOFREEZE;
 747	cgroup_init_kthreadd();
 748
 749	for (;;) {
 750		set_current_state(TASK_INTERRUPTIBLE);
 751		if (list_empty(&kthread_create_list))
 752			schedule();
 753		__set_current_state(TASK_RUNNING);
 754
 755		spin_lock(&kthread_create_lock);
 756		while (!list_empty(&kthread_create_list)) {
 757			struct kthread_create_info *create;
 758
 759			create = list_entry(kthread_create_list.next,
 760					    struct kthread_create_info, list);
 761			list_del_init(&create->list);
 762			spin_unlock(&kthread_create_lock);
 763
 764			create_kthread(create);
 765
 766			spin_lock(&kthread_create_lock);
 767		}
 768		spin_unlock(&kthread_create_lock);
 769	}
 770
 771	return 0;
 772}
 773
 774void __kthread_init_worker(struct kthread_worker *worker,
 775				const char *name,
 776				struct lock_class_key *key)
 777{
 778	memset(worker, 0, sizeof(struct kthread_worker));
 779	raw_spin_lock_init(&worker->lock);
 780	lockdep_set_class_and_name(&worker->lock, key, name);
 781	INIT_LIST_HEAD(&worker->work_list);
 782	INIT_LIST_HEAD(&worker->delayed_work_list);
 783}
 784EXPORT_SYMBOL_GPL(__kthread_init_worker);
 785
 786/**
 787 * kthread_worker_fn - kthread function to process kthread_worker
 788 * @worker_ptr: pointer to initialized kthread_worker
 789 *
 790 * This function implements the main cycle of kthread worker. It processes
 791 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 792 * is empty.
 793 *
 794 * The works are not allowed to keep any locks, disable preemption or interrupts
 795 * when they finish. There is defined a safe point for freezing when one work
 796 * finishes and before a new one is started.
 797 *
 798 * Also the works must not be handled by more than one worker at the same time,
 799 * see also kthread_queue_work().
 800 */
 801int kthread_worker_fn(void *worker_ptr)
 802{
 803	struct kthread_worker *worker = worker_ptr;
 804	struct kthread_work *work;
 805
 806	/*
 807	 * FIXME: Update the check and remove the assignment when all kthread
 808	 * worker users are created using kthread_create_worker*() functions.
 809	 */
 810	WARN_ON(worker->task && worker->task != current);
 811	worker->task = current;
 812
 813	if (worker->flags & KTW_FREEZABLE)
 814		set_freezable();
 815
 816repeat:
 817	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
 818
 819	if (kthread_should_stop()) {
 820		__set_current_state(TASK_RUNNING);
 821		raw_spin_lock_irq(&worker->lock);
 822		worker->task = NULL;
 823		raw_spin_unlock_irq(&worker->lock);
 824		return 0;
 825	}
 826
 827	work = NULL;
 828	raw_spin_lock_irq(&worker->lock);
 829	if (!list_empty(&worker->work_list)) {
 830		work = list_first_entry(&worker->work_list,
 831					struct kthread_work, node);
 832		list_del_init(&work->node);
 833	}
 834	worker->current_work = work;
 835	raw_spin_unlock_irq(&worker->lock);
 836
 837	if (work) {
 838		kthread_work_func_t func = work->func;
 839		__set_current_state(TASK_RUNNING);
 840		trace_sched_kthread_work_execute_start(work);
 841		work->func(work);
 842		/*
 843		 * Avoid dereferencing work after this point.  The trace
 844		 * event only cares about the address.
 845		 */
 846		trace_sched_kthread_work_execute_end(work, func);
 847	} else if (!freezing(current))
 848		schedule();
 849
 850	try_to_freeze();
 851	cond_resched();
 852	goto repeat;
 853}
 854EXPORT_SYMBOL_GPL(kthread_worker_fn);
 855
 856static __printf(3, 0) struct kthread_worker *
 857__kthread_create_worker(int cpu, unsigned int flags,
 858			const char namefmt[], va_list args)
 859{
 860	struct kthread_worker *worker;
 861	struct task_struct *task;
 862	int node = NUMA_NO_NODE;
 863
 864	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
 865	if (!worker)
 866		return ERR_PTR(-ENOMEM);
 867
 868	kthread_init_worker(worker);
 869
 870	if (cpu >= 0)
 871		node = cpu_to_node(cpu);
 872
 873	task = __kthread_create_on_node(kthread_worker_fn, worker,
 874						node, namefmt, args);
 875	if (IS_ERR(task))
 876		goto fail_task;
 877
 878	if (cpu >= 0)
 879		kthread_bind(task, cpu);
 880
 881	worker->flags = flags;
 882	worker->task = task;
 883	wake_up_process(task);
 884	return worker;
 885
 886fail_task:
 887	kfree(worker);
 888	return ERR_CAST(task);
 889}
 890
 891/**
 892 * kthread_create_worker - create a kthread worker
 893 * @flags: flags modifying the default behavior of the worker
 894 * @namefmt: printf-style name for the kthread worker (task).
 895 *
 896 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 897 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 898 * when the caller was killed by a fatal signal.
 899 */
 900struct kthread_worker *
 901kthread_create_worker(unsigned int flags, const char namefmt[], ...)
 902{
 903	struct kthread_worker *worker;
 904	va_list args;
 905
 906	va_start(args, namefmt);
 907	worker = __kthread_create_worker(-1, flags, namefmt, args);
 908	va_end(args);
 909
 910	return worker;
 911}
 912EXPORT_SYMBOL(kthread_create_worker);
 913
 914/**
 915 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 916 *	to a given CPU and the associated NUMA node.
 917 * @cpu: CPU number
 918 * @flags: flags modifying the default behavior of the worker
 919 * @namefmt: printf-style name for the kthread worker (task).
 920 *
 921 * Use a valid CPU number if you want to bind the kthread worker
 922 * to the given CPU and the associated NUMA node.
 923 *
 924 * A good practice is to add the cpu number also into the worker name.
 925 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 926 *
 927 * CPU hotplug:
 928 * The kthread worker API is simple and generic. It just provides a way
 929 * to create, use, and destroy workers.
 930 *
 931 * It is up to the API user how to handle CPU hotplug. They have to decide
 932 * how to handle pending work items, prevent queuing new ones, and
 933 * restore the functionality when the CPU goes off and on. There are a
 934 * few catches:
 935 *
 936 *    - CPU affinity gets lost when it is scheduled on an offline CPU.
 937 *
 938 *    - The worker might not exist when the CPU was off when the user
 939 *      created the workers.
 940 *
 941 * Good practice is to implement two CPU hotplug callbacks and to
 942 * destroy/create the worker when the CPU goes down/up.
 943 *
 944 * Return:
 945 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 946 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 947 * when the caller was killed by a fatal signal.
 948 */
 949struct kthread_worker *
 950kthread_create_worker_on_cpu(int cpu, unsigned int flags,
 951			     const char namefmt[], ...)
 952{
 953	struct kthread_worker *worker;
 954	va_list args;
 955
 956	va_start(args, namefmt);
 957	worker = __kthread_create_worker(cpu, flags, namefmt, args);
 958	va_end(args);
 959
 960	return worker;
 961}
 962EXPORT_SYMBOL(kthread_create_worker_on_cpu);
 963
 964/*
 965 * Returns true when the work could not be queued at the moment.
 966 * It happens when it is already pending in a worker list
 967 * or when it is being cancelled.
 968 */
 969static inline bool queuing_blocked(struct kthread_worker *worker,
 970				   struct kthread_work *work)
 971{
 972	lockdep_assert_held(&worker->lock);
 973
 974	return !list_empty(&work->node) || work->canceling;
 975}
 976
 977static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
 978					     struct kthread_work *work)
 979{
 980	lockdep_assert_held(&worker->lock);
 981	WARN_ON_ONCE(!list_empty(&work->node));
 982	/* Do not use a work with >1 worker, see kthread_queue_work() */
 983	WARN_ON_ONCE(work->worker && work->worker != worker);
 984}
 985
 986/* insert @work before @pos in @worker */
 987static void kthread_insert_work(struct kthread_worker *worker,
 988				struct kthread_work *work,
 989				struct list_head *pos)
 990{
 991	kthread_insert_work_sanity_check(worker, work);
 992
 993	trace_sched_kthread_work_queue_work(worker, work);
 994
 995	list_add_tail(&work->node, pos);
 996	work->worker = worker;
 997	if (!worker->current_work && likely(worker->task))
 998		wake_up_process(worker->task);
 999}
1000
1001/**
1002 * kthread_queue_work - queue a kthread_work
1003 * @worker: target kthread_worker
1004 * @work: kthread_work to queue
1005 *
1006 * Queue @work to work processor @task for async execution.  @task
1007 * must have been created with kthread_worker_create().  Returns %true
1008 * if @work was successfully queued, %false if it was already pending.
1009 *
1010 * Reinitialize the work if it needs to be used by another worker.
1011 * For example, when the worker was stopped and started again.
1012 */
1013bool kthread_queue_work(struct kthread_worker *worker,
1014			struct kthread_work *work)
1015{
1016	bool ret = false;
1017	unsigned long flags;
1018
1019	raw_spin_lock_irqsave(&worker->lock, flags);
1020	if (!queuing_blocked(worker, work)) {
1021		kthread_insert_work(worker, work, &worker->work_list);
1022		ret = true;
1023	}
1024	raw_spin_unlock_irqrestore(&worker->lock, flags);
1025	return ret;
1026}
1027EXPORT_SYMBOL_GPL(kthread_queue_work);
1028
1029/**
1030 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1031 *	delayed work when the timer expires.
1032 * @t: pointer to the expired timer
1033 *
1034 * The format of the function is defined by struct timer_list.
1035 * It should have been called from irqsafe timer with irq already off.
1036 */
1037void kthread_delayed_work_timer_fn(struct timer_list *t)
1038{
1039	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1040	struct kthread_work *work = &dwork->work;
1041	struct kthread_worker *worker = work->worker;
1042	unsigned long flags;
1043
1044	/*
1045	 * This might happen when a pending work is reinitialized.
1046	 * It means that it is used a wrong way.
1047	 */
1048	if (WARN_ON_ONCE(!worker))
1049		return;
1050
1051	raw_spin_lock_irqsave(&worker->lock, flags);
1052	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1053	WARN_ON_ONCE(work->worker != worker);
1054
1055	/* Move the work from worker->delayed_work_list. */
1056	WARN_ON_ONCE(list_empty(&work->node));
1057	list_del_init(&work->node);
1058	if (!work->canceling)
1059		kthread_insert_work(worker, work, &worker->work_list);
1060
1061	raw_spin_unlock_irqrestore(&worker->lock, flags);
1062}
1063EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1064
1065static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1066					 struct kthread_delayed_work *dwork,
1067					 unsigned long delay)
1068{
1069	struct timer_list *timer = &dwork->timer;
1070	struct kthread_work *work = &dwork->work;
1071
1072	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1073
1074	/*
1075	 * If @delay is 0, queue @dwork->work immediately.  This is for
1076	 * both optimization and correctness.  The earliest @timer can
1077	 * expire is on the closest next tick and delayed_work users depend
1078	 * on that there's no such delay when @delay is 0.
1079	 */
1080	if (!delay) {
1081		kthread_insert_work(worker, work, &worker->work_list);
1082		return;
1083	}
1084
1085	/* Be paranoid and try to detect possible races already now. */
1086	kthread_insert_work_sanity_check(worker, work);
1087
1088	list_add(&work->node, &worker->delayed_work_list);
1089	work->worker = worker;
1090	timer->expires = jiffies + delay;
1091	add_timer(timer);
1092}
1093
1094/**
1095 * kthread_queue_delayed_work - queue the associated kthread work
1096 *	after a delay.
1097 * @worker: target kthread_worker
1098 * @dwork: kthread_delayed_work to queue
1099 * @delay: number of jiffies to wait before queuing
1100 *
1101 * If the work has not been pending it starts a timer that will queue
1102 * the work after the given @delay. If @delay is zero, it queues the
1103 * work immediately.
1104 *
1105 * Return: %false if the @work has already been pending. It means that
1106 * either the timer was running or the work was queued. It returns %true
1107 * otherwise.
1108 */
1109bool kthread_queue_delayed_work(struct kthread_worker *worker,
1110				struct kthread_delayed_work *dwork,
1111				unsigned long delay)
1112{
1113	struct kthread_work *work = &dwork->work;
1114	unsigned long flags;
1115	bool ret = false;
1116
1117	raw_spin_lock_irqsave(&worker->lock, flags);
1118
1119	if (!queuing_blocked(worker, work)) {
1120		__kthread_queue_delayed_work(worker, dwork, delay);
1121		ret = true;
1122	}
1123
1124	raw_spin_unlock_irqrestore(&worker->lock, flags);
1125	return ret;
1126}
1127EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1128
1129struct kthread_flush_work {
1130	struct kthread_work	work;
1131	struct completion	done;
1132};
1133
1134static void kthread_flush_work_fn(struct kthread_work *work)
1135{
1136	struct kthread_flush_work *fwork =
1137		container_of(work, struct kthread_flush_work, work);
1138	complete(&fwork->done);
1139}
1140
1141/**
1142 * kthread_flush_work - flush a kthread_work
1143 * @work: work to flush
1144 *
1145 * If @work is queued or executing, wait for it to finish execution.
1146 */
1147void kthread_flush_work(struct kthread_work *work)
1148{
1149	struct kthread_flush_work fwork = {
1150		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1151		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1152	};
1153	struct kthread_worker *worker;
1154	bool noop = false;
1155
1156	worker = work->worker;
1157	if (!worker)
1158		return;
1159
1160	raw_spin_lock_irq(&worker->lock);
1161	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1162	WARN_ON_ONCE(work->worker != worker);
1163
1164	if (!list_empty(&work->node))
1165		kthread_insert_work(worker, &fwork.work, work->node.next);
1166	else if (worker->current_work == work)
1167		kthread_insert_work(worker, &fwork.work,
1168				    worker->work_list.next);
1169	else
1170		noop = true;
1171
1172	raw_spin_unlock_irq(&worker->lock);
1173
1174	if (!noop)
1175		wait_for_completion(&fwork.done);
1176}
1177EXPORT_SYMBOL_GPL(kthread_flush_work);
1178
1179/*
1180 * Make sure that the timer is neither set nor running and could
1181 * not manipulate the work list_head any longer.
1182 *
1183 * The function is called under worker->lock. The lock is temporary
1184 * released but the timer can't be set again in the meantime.
1185 */
1186static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1187					      unsigned long *flags)
1188{
1189	struct kthread_delayed_work *dwork =
1190		container_of(work, struct kthread_delayed_work, work);
1191	struct kthread_worker *worker = work->worker;
1192
1193	/*
1194	 * del_timer_sync() must be called to make sure that the timer
1195	 * callback is not running. The lock must be temporary released
1196	 * to avoid a deadlock with the callback. In the meantime,
1197	 * any queuing is blocked by setting the canceling counter.
1198	 */
1199	work->canceling++;
1200	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1201	del_timer_sync(&dwork->timer);
1202	raw_spin_lock_irqsave(&worker->lock, *flags);
1203	work->canceling--;
1204}
1205
1206/*
1207 * This function removes the work from the worker queue.
1208 *
1209 * It is called under worker->lock. The caller must make sure that
1210 * the timer used by delayed work is not running, e.g. by calling
1211 * kthread_cancel_delayed_work_timer().
1212 *
1213 * The work might still be in use when this function finishes. See the
1214 * current_work proceed by the worker.
1215 *
1216 * Return: %true if @work was pending and successfully canceled,
1217 *	%false if @work was not pending
1218 */
1219static bool __kthread_cancel_work(struct kthread_work *work)
 
1220{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1221	/*
1222	 * Try to remove the work from a worker list. It might either
1223	 * be from worker->work_list or from worker->delayed_work_list.
1224	 */
1225	if (!list_empty(&work->node)) {
1226		list_del_init(&work->node);
1227		return true;
1228	}
1229
1230	return false;
1231}
1232
1233/**
1234 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1235 * @worker: kthread worker to use
1236 * @dwork: kthread delayed work to queue
1237 * @delay: number of jiffies to wait before queuing
1238 *
1239 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1240 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1241 * @work is guaranteed to be queued immediately.
1242 *
1243 * Return: %false if @dwork was idle and queued, %true otherwise.
 
1244 *
1245 * A special case is when the work is being canceled in parallel.
1246 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1247 * or yet another kthread_mod_delayed_work() call. We let the other command
1248 * win and return %true here. The return value can be used for reference
1249 * counting and the number of queued works stays the same. Anyway, the caller
1250 * is supposed to synchronize these operations a reasonable way.
1251 *
1252 * This function is safe to call from any context including IRQ handler.
1253 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1254 * for details.
1255 */
1256bool kthread_mod_delayed_work(struct kthread_worker *worker,
1257			      struct kthread_delayed_work *dwork,
1258			      unsigned long delay)
1259{
1260	struct kthread_work *work = &dwork->work;
1261	unsigned long flags;
1262	int ret;
1263
1264	raw_spin_lock_irqsave(&worker->lock, flags);
1265
1266	/* Do not bother with canceling when never queued. */
1267	if (!work->worker) {
1268		ret = false;
1269		goto fast_queue;
1270	}
1271
1272	/* Work must not be used with >1 worker, see kthread_queue_work() */
1273	WARN_ON_ONCE(work->worker != worker);
1274
1275	/*
1276	 * Temporary cancel the work but do not fight with another command
1277	 * that is canceling the work as well.
1278	 *
1279	 * It is a bit tricky because of possible races with another
1280	 * mod_delayed_work() and cancel_delayed_work() callers.
1281	 *
1282	 * The timer must be canceled first because worker->lock is released
1283	 * when doing so. But the work can be removed from the queue (list)
1284	 * only when it can be queued again so that the return value can
1285	 * be used for reference counting.
1286	 */
1287	kthread_cancel_delayed_work_timer(work, &flags);
1288	if (work->canceling) {
1289		/* The number of works in the queue does not change. */
1290		ret = true;
1291		goto out;
1292	}
1293	ret = __kthread_cancel_work(work);
1294
 
1295fast_queue:
1296	__kthread_queue_delayed_work(worker, dwork, delay);
1297out:
1298	raw_spin_unlock_irqrestore(&worker->lock, flags);
1299	return ret;
1300}
1301EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1302
1303static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1304{
1305	struct kthread_worker *worker = work->worker;
1306	unsigned long flags;
1307	int ret = false;
1308
1309	if (!worker)
1310		goto out;
1311
1312	raw_spin_lock_irqsave(&worker->lock, flags);
1313	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1314	WARN_ON_ONCE(work->worker != worker);
1315
1316	if (is_dwork)
1317		kthread_cancel_delayed_work_timer(work, &flags);
1318
1319	ret = __kthread_cancel_work(work);
1320
1321	if (worker->current_work != work)
1322		goto out_fast;
1323
1324	/*
1325	 * The work is in progress and we need to wait with the lock released.
1326	 * In the meantime, block any queuing by setting the canceling counter.
1327	 */
1328	work->canceling++;
1329	raw_spin_unlock_irqrestore(&worker->lock, flags);
1330	kthread_flush_work(work);
1331	raw_spin_lock_irqsave(&worker->lock, flags);
1332	work->canceling--;
1333
1334out_fast:
1335	raw_spin_unlock_irqrestore(&worker->lock, flags);
1336out:
1337	return ret;
1338}
1339
1340/**
1341 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1342 * @work: the kthread work to cancel
1343 *
1344 * Cancel @work and wait for its execution to finish.  This function
1345 * can be used even if the work re-queues itself. On return from this
1346 * function, @work is guaranteed to be not pending or executing on any CPU.
1347 *
1348 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1349 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1350 *
1351 * The caller must ensure that the worker on which @work was last
1352 * queued can't be destroyed before this function returns.
1353 *
1354 * Return: %true if @work was pending, %false otherwise.
1355 */
1356bool kthread_cancel_work_sync(struct kthread_work *work)
1357{
1358	return __kthread_cancel_work_sync(work, false);
1359}
1360EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1361
1362/**
1363 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1364 *	wait for it to finish.
1365 * @dwork: the kthread delayed work to cancel
1366 *
1367 * This is kthread_cancel_work_sync() for delayed works.
1368 *
1369 * Return: %true if @dwork was pending, %false otherwise.
1370 */
1371bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1372{
1373	return __kthread_cancel_work_sync(&dwork->work, true);
1374}
1375EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1376
1377/**
1378 * kthread_flush_worker - flush all current works on a kthread_worker
1379 * @worker: worker to flush
1380 *
1381 * Wait until all currently executing or pending works on @worker are
1382 * finished.
1383 */
1384void kthread_flush_worker(struct kthread_worker *worker)
1385{
1386	struct kthread_flush_work fwork = {
1387		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1388		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1389	};
1390
1391	kthread_queue_work(worker, &fwork.work);
1392	wait_for_completion(&fwork.done);
1393}
1394EXPORT_SYMBOL_GPL(kthread_flush_worker);
1395
1396/**
1397 * kthread_destroy_worker - destroy a kthread worker
1398 * @worker: worker to be destroyed
1399 *
1400 * Flush and destroy @worker.  The simple flush is enough because the kthread
1401 * worker API is used only in trivial scenarios.  There are no multi-step state
1402 * machines needed.
1403 *
1404 * Note that this function is not responsible for handling delayed work, so
1405 * caller should be responsible for queuing or canceling all delayed work items
1406 * before invoke this function.
1407 */
1408void kthread_destroy_worker(struct kthread_worker *worker)
1409{
1410	struct task_struct *task;
1411
1412	task = worker->task;
1413	if (WARN_ON(!task))
1414		return;
1415
1416	kthread_flush_worker(worker);
1417	kthread_stop(task);
1418	WARN_ON(!list_empty(&worker->delayed_work_list));
1419	WARN_ON(!list_empty(&worker->work_list));
1420	kfree(worker);
1421}
1422EXPORT_SYMBOL(kthread_destroy_worker);
1423
1424/**
1425 * kthread_use_mm - make the calling kthread operate on an address space
1426 * @mm: address space to operate on
1427 */
1428void kthread_use_mm(struct mm_struct *mm)
1429{
1430	struct mm_struct *active_mm;
1431	struct task_struct *tsk = current;
1432
1433	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1434	WARN_ON_ONCE(tsk->mm);
1435
1436	/*
1437	 * It is possible for mm to be the same as tsk->active_mm, but
1438	 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1439	 * because these references are not equivalent.
1440	 */
1441	mmgrab(mm);
1442
1443	task_lock(tsk);
1444	/* Hold off tlb flush IPIs while switching mm's */
1445	local_irq_disable();
1446	active_mm = tsk->active_mm;
1447	tsk->active_mm = mm;
1448	tsk->mm = mm;
1449	membarrier_update_current_mm(mm);
1450	switch_mm_irqs_off(active_mm, mm, tsk);
1451	local_irq_enable();
1452	task_unlock(tsk);
1453#ifdef finish_arch_post_lock_switch
1454	finish_arch_post_lock_switch();
1455#endif
1456
1457	/*
1458	 * When a kthread starts operating on an address space, the loop
1459	 * in membarrier_{private,global}_expedited() may not observe
1460	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1461	 * memory barrier after storing to tsk->mm, before accessing
1462	 * user-space memory. A full memory barrier for membarrier
1463	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1464	 * mmdrop_lazy_tlb().
1465	 */
1466	mmdrop_lazy_tlb(active_mm);
1467}
1468EXPORT_SYMBOL_GPL(kthread_use_mm);
1469
1470/**
1471 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1472 * @mm: address space to operate on
1473 */
1474void kthread_unuse_mm(struct mm_struct *mm)
1475{
1476	struct task_struct *tsk = current;
1477
1478	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1479	WARN_ON_ONCE(!tsk->mm);
1480
1481	task_lock(tsk);
1482	/*
1483	 * When a kthread stops operating on an address space, the loop
1484	 * in membarrier_{private,global}_expedited() may not observe
1485	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1486	 * memory barrier after accessing user-space memory, before
1487	 * clearing tsk->mm.
1488	 */
1489	smp_mb__after_spinlock();
1490	local_irq_disable();
1491	tsk->mm = NULL;
1492	membarrier_update_current_mm(NULL);
1493	mmgrab_lazy_tlb(mm);
1494	/* active_mm is still 'mm' */
1495	enter_lazy_tlb(mm, tsk);
1496	local_irq_enable();
1497	task_unlock(tsk);
1498
1499	mmdrop(mm);
1500}
1501EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1502
1503#ifdef CONFIG_BLK_CGROUP
1504/**
1505 * kthread_associate_blkcg - associate blkcg to current kthread
1506 * @css: the cgroup info
1507 *
1508 * Current thread must be a kthread. The thread is running jobs on behalf of
1509 * other threads. In some cases, we expect the jobs attach cgroup info of
1510 * original threads instead of that of current thread. This function stores
1511 * original thread's cgroup info in current kthread context for later
1512 * retrieval.
1513 */
1514void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1515{
1516	struct kthread *kthread;
1517
1518	if (!(current->flags & PF_KTHREAD))
1519		return;
1520	kthread = to_kthread(current);
1521	if (!kthread)
1522		return;
1523
1524	if (kthread->blkcg_css) {
1525		css_put(kthread->blkcg_css);
1526		kthread->blkcg_css = NULL;
1527	}
1528	if (css) {
1529		css_get(css);
1530		kthread->blkcg_css = css;
1531	}
1532}
1533EXPORT_SYMBOL(kthread_associate_blkcg);
1534
1535/**
1536 * kthread_blkcg - get associated blkcg css of current kthread
1537 *
1538 * Current thread must be a kthread.
1539 */
1540struct cgroup_subsys_state *kthread_blkcg(void)
1541{
1542	struct kthread *kthread;
1543
1544	if (current->flags & PF_KTHREAD) {
1545		kthread = to_kthread(current);
1546		if (kthread)
1547			return kthread->blkcg_css;
1548	}
1549	return NULL;
1550}
 
1551#endif