Linux Audio

Check our new training course

Loading...
v6.2
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/*
   3 * Task-based RCU implementations.
   4 *
   5 * Copyright (C) 2020 Paul E. McKenney
   6 */
   7
   8#ifdef CONFIG_TASKS_RCU_GENERIC
   9#include "rcu_segcblist.h"
  10
  11////////////////////////////////////////////////////////////////////////
  12//
  13// Generic data structures.
  14
  15struct rcu_tasks;
  16typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
  17typedef void (*pregp_func_t)(struct list_head *hop);
  18typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
  19typedef void (*postscan_func_t)(struct list_head *hop);
  20typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
  21typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
  22
  23/**
  24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
  25 * @cblist: Callback list.
  26 * @lock: Lock protecting per-CPU callback list.
  27 * @rtp_jiffies: Jiffies counter value for statistics.
 
 
  28 * @rtp_n_lock_retries: Rough lock-contention statistic.
  29 * @rtp_work: Work queue for invoking callbacks.
  30 * @rtp_irq_work: IRQ work queue for deferred wakeups.
  31 * @barrier_q_head: RCU callback for barrier operation.
  32 * @rtp_blkd_tasks: List of tasks blocked as readers.
 
  33 * @cpu: CPU number corresponding to this entry.
  34 * @rtpp: Pointer to the rcu_tasks structure.
  35 */
  36struct rcu_tasks_percpu {
  37	struct rcu_segcblist cblist;
  38	raw_spinlock_t __private lock;
  39	unsigned long rtp_jiffies;
  40	unsigned long rtp_n_lock_retries;
 
 
  41	struct work_struct rtp_work;
  42	struct irq_work rtp_irq_work;
  43	struct rcu_head barrier_q_head;
  44	struct list_head rtp_blkd_tasks;
 
  45	int cpu;
  46	struct rcu_tasks *rtpp;
  47};
  48
  49/**
  50 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
  51 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
  52 * @cbs_gbl_lock: Lock protecting callback list.
  53 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
  54 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
  55 * @gp_func: This flavor's grace-period-wait function.
  56 * @gp_state: Grace period's most recent state transition (debugging).
  57 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
  58 * @init_fract: Initial backoff sleep interval.
  59 * @gp_jiffies: Time of last @gp_state transition.
  60 * @gp_start: Most recent grace-period start in jiffies.
  61 * @tasks_gp_seq: Number of grace periods completed since boot.
  62 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
  63 * @n_ipis_fails: Number of IPI-send failures.
 
 
  64 * @pregp_func: This flavor's pre-grace-period function (optional).
  65 * @pertask_func: This flavor's per-task scan function (optional).
  66 * @postscan_func: This flavor's post-task scan function (optional).
  67 * @holdouts_func: This flavor's holdout-list scan function (optional).
  68 * @postgp_func: This flavor's post-grace-period function (optional).
  69 * @call_func: This flavor's call_rcu()-equivalent function.
  70 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
  71 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
  72 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
  73 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
  74 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
  75 * @barrier_q_mutex: Serialize barrier operations.
  76 * @barrier_q_count: Number of queues being waited on.
  77 * @barrier_q_completion: Barrier wait/wakeup mechanism.
  78 * @barrier_q_seq: Sequence number for barrier operations.
  79 * @name: This flavor's textual name.
  80 * @kname: This flavor's kthread name.
  81 */
  82struct rcu_tasks {
  83	struct rcuwait cbs_wait;
  84	raw_spinlock_t cbs_gbl_lock;
  85	struct mutex tasks_gp_mutex;
  86	int gp_state;
  87	int gp_sleep;
  88	int init_fract;
  89	unsigned long gp_jiffies;
  90	unsigned long gp_start;
  91	unsigned long tasks_gp_seq;
  92	unsigned long n_ipis;
  93	unsigned long n_ipis_fails;
  94	struct task_struct *kthread_ptr;
 
  95	rcu_tasks_gp_func_t gp_func;
  96	pregp_func_t pregp_func;
  97	pertask_func_t pertask_func;
  98	postscan_func_t postscan_func;
  99	holdouts_func_t holdouts_func;
 100	postgp_func_t postgp_func;
 101	call_rcu_func_t call_func;
 102	struct rcu_tasks_percpu __percpu *rtpcpu;
 103	int percpu_enqueue_shift;
 104	int percpu_enqueue_lim;
 105	int percpu_dequeue_lim;
 106	unsigned long percpu_dequeue_gpseq;
 107	struct mutex barrier_q_mutex;
 108	atomic_t barrier_q_count;
 109	struct completion barrier_q_completion;
 110	unsigned long barrier_q_seq;
 111	char *name;
 112	char *kname;
 113};
 114
 115static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
 116
 117#define DEFINE_RCU_TASKS(rt_name, gp, call, n)						\
 118static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = {			\
 119	.lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock),		\
 120	.rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup),			\
 121};											\
 122static struct rcu_tasks rt_name =							\
 123{											\
 124	.cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait),				\
 125	.cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock),			\
 126	.tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex),			\
 127	.gp_func = gp,									\
 128	.call_func = call,								\
 129	.rtpcpu = &rt_name ## __percpu,							\
 
 130	.name = n,									\
 131	.percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS),				\
 132	.percpu_enqueue_lim = 1,							\
 133	.percpu_dequeue_lim = 1,							\
 134	.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex),		\
 135	.barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT,				\
 136	.kname = #rt_name,								\
 137}
 138
 139/* Track exiting tasks in order to allow them to be waited for. */
 140DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
 
 
 
 
 141
 142/* Avoid IPIing CPUs early in the grace period. */
 143#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
 144static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
 145module_param(rcu_task_ipi_delay, int, 0644);
 146
 147/* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
 148#define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
 149#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
 150static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
 151module_param(rcu_task_stall_timeout, int, 0644);
 152#define RCU_TASK_STALL_INFO (HZ * 10)
 153static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
 154module_param(rcu_task_stall_info, int, 0644);
 155static int rcu_task_stall_info_mult __read_mostly = 3;
 156module_param(rcu_task_stall_info_mult, int, 0444);
 157
 158static int rcu_task_enqueue_lim __read_mostly = -1;
 159module_param(rcu_task_enqueue_lim, int, 0444);
 160
 161static bool rcu_task_cb_adjust;
 162static int rcu_task_contend_lim __read_mostly = 100;
 163module_param(rcu_task_contend_lim, int, 0444);
 164static int rcu_task_collapse_lim __read_mostly = 10;
 165module_param(rcu_task_collapse_lim, int, 0444);
 
 
 166
 167/* RCU tasks grace-period state for debugging. */
 168#define RTGS_INIT		 0
 169#define RTGS_WAIT_WAIT_CBS	 1
 170#define RTGS_WAIT_GP		 2
 171#define RTGS_PRE_WAIT_GP	 3
 172#define RTGS_SCAN_TASKLIST	 4
 173#define RTGS_POST_SCAN_TASKLIST	 5
 174#define RTGS_WAIT_SCAN_HOLDOUTS	 6
 175#define RTGS_SCAN_HOLDOUTS	 7
 176#define RTGS_POST_GP		 8
 177#define RTGS_WAIT_READERS	 9
 178#define RTGS_INVOKE_CBS		10
 179#define RTGS_WAIT_CBS		11
 180#ifndef CONFIG_TINY_RCU
 181static const char * const rcu_tasks_gp_state_names[] = {
 182	"RTGS_INIT",
 183	"RTGS_WAIT_WAIT_CBS",
 184	"RTGS_WAIT_GP",
 185	"RTGS_PRE_WAIT_GP",
 186	"RTGS_SCAN_TASKLIST",
 187	"RTGS_POST_SCAN_TASKLIST",
 188	"RTGS_WAIT_SCAN_HOLDOUTS",
 189	"RTGS_SCAN_HOLDOUTS",
 190	"RTGS_POST_GP",
 191	"RTGS_WAIT_READERS",
 192	"RTGS_INVOKE_CBS",
 193	"RTGS_WAIT_CBS",
 194};
 195#endif /* #ifndef CONFIG_TINY_RCU */
 196
 197////////////////////////////////////////////////////////////////////////
 198//
 199// Generic code.
 200
 201static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
 202
 203/* Record grace-period phase and time. */
 204static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
 205{
 206	rtp->gp_state = newstate;
 207	rtp->gp_jiffies = jiffies;
 208}
 209
 210#ifndef CONFIG_TINY_RCU
 211/* Return state name. */
 212static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
 213{
 214	int i = data_race(rtp->gp_state); // Let KCSAN detect update races
 215	int j = READ_ONCE(i); // Prevent the compiler from reading twice
 216
 217	if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
 218		return "???";
 219	return rcu_tasks_gp_state_names[j];
 220}
 221#endif /* #ifndef CONFIG_TINY_RCU */
 222
 223// Initialize per-CPU callback lists for the specified flavor of
 224// Tasks RCU.
 225static void cblist_init_generic(struct rcu_tasks *rtp)
 226{
 227	int cpu;
 228	unsigned long flags;
 229	int lim;
 230	int shift;
 231
 232	raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 233	if (rcu_task_enqueue_lim < 0) {
 234		rcu_task_enqueue_lim = 1;
 235		rcu_task_cb_adjust = true;
 236		pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
 237	} else if (rcu_task_enqueue_lim == 0) {
 238		rcu_task_enqueue_lim = 1;
 239	}
 240	lim = rcu_task_enqueue_lim;
 241
 242	if (lim > nr_cpu_ids)
 243		lim = nr_cpu_ids;
 244	shift = ilog2(nr_cpu_ids / lim);
 245	if (((nr_cpu_ids - 1) >> shift) >= lim)
 246		shift++;
 247	WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
 248	WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
 249	smp_store_release(&rtp->percpu_enqueue_lim, lim);
 250	for_each_possible_cpu(cpu) {
 251		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 252
 253		WARN_ON_ONCE(!rtpcp);
 254		if (cpu)
 255			raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
 256		raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
 257		if (rcu_segcblist_empty(&rtpcp->cblist))
 258			rcu_segcblist_init(&rtpcp->cblist);
 259		INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
 260		rtpcp->cpu = cpu;
 261		rtpcp->rtpp = rtp;
 262		if (!rtpcp->rtp_blkd_tasks.next)
 263			INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
 264		raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 265	}
 266	raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 267	pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
 
 268}
 269
 270// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
 271static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
 272{
 273	struct rcu_tasks *rtp;
 274	struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
 275
 276	rtp = rtpcp->rtpp;
 277	rcuwait_wake_up(&rtp->cbs_wait);
 278}
 279
 280// Enqueue a callback for the specified flavor of Tasks RCU.
 281static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
 282				   struct rcu_tasks *rtp)
 283{
 284	int chosen_cpu;
 285	unsigned long flags;
 
 286	int ideal_cpu;
 287	unsigned long j;
 288	bool needadjust = false;
 289	bool needwake;
 290	struct rcu_tasks_percpu *rtpcp;
 291
 292	rhp->next = NULL;
 293	rhp->func = func;
 294	local_irq_save(flags);
 295	rcu_read_lock();
 296	ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
 297	chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
 298	rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
 299	if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
 300		raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
 301		j = jiffies;
 302		if (rtpcp->rtp_jiffies != j) {
 303			rtpcp->rtp_jiffies = j;
 304			rtpcp->rtp_n_lock_retries = 0;
 305		}
 306		if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
 307		    READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
 308			needadjust = true;  // Defer adjustment to avoid deadlock.
 309	}
 310	if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
 311		raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
 312		cblist_init_generic(rtp);
 313		raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
 
 
 
 
 
 
 314	}
 315	needwake = rcu_segcblist_empty(&rtpcp->cblist);
 
 316	rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
 317	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 318	if (unlikely(needadjust)) {
 319		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 320		if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
 321			WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
 322			WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
 323			smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
 324			pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
 325		}
 326		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 327	}
 328	rcu_read_unlock();
 329	/* We can't create the thread unless interrupts are enabled. */
 330	if (needwake && READ_ONCE(rtp->kthread_ptr))
 331		irq_work_queue(&rtpcp->rtp_irq_work);
 332}
 333
 334// RCU callback function for rcu_barrier_tasks_generic().
 335static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
 336{
 337	struct rcu_tasks *rtp;
 338	struct rcu_tasks_percpu *rtpcp;
 339
 340	rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
 341	rtp = rtpcp->rtpp;
 342	if (atomic_dec_and_test(&rtp->barrier_q_count))
 343		complete(&rtp->barrier_q_completion);
 344}
 345
 346// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
 347// Operates in a manner similar to rcu_barrier().
 348static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
 349{
 350	int cpu;
 351	unsigned long flags;
 352	struct rcu_tasks_percpu *rtpcp;
 353	unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
 354
 355	mutex_lock(&rtp->barrier_q_mutex);
 356	if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
 357		smp_mb();
 358		mutex_unlock(&rtp->barrier_q_mutex);
 359		return;
 360	}
 361	rcu_seq_start(&rtp->barrier_q_seq);
 362	init_completion(&rtp->barrier_q_completion);
 363	atomic_set(&rtp->barrier_q_count, 2);
 364	for_each_possible_cpu(cpu) {
 365		if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
 366			break;
 367		rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 368		rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
 369		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 370		if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
 371			atomic_inc(&rtp->barrier_q_count);
 372		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 373	}
 374	if (atomic_sub_and_test(2, &rtp->barrier_q_count))
 375		complete(&rtp->barrier_q_completion);
 376	wait_for_completion(&rtp->barrier_q_completion);
 377	rcu_seq_end(&rtp->barrier_q_seq);
 378	mutex_unlock(&rtp->barrier_q_mutex);
 379}
 380
 381// Advance callbacks and indicate whether either a grace period or
 382// callback invocation is needed.
 383static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
 384{
 385	int cpu;
 
 386	unsigned long flags;
 
 387	long n;
 388	long ncbs = 0;
 389	long ncbsnz = 0;
 390	int needgpcb = 0;
 391
 392	for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
 
 393		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 394
 395		/* Advance and accelerate any new callbacks. */
 396		if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
 397			continue;
 398		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 399		// Should we shrink down to a single callback queue?
 400		n = rcu_segcblist_n_cbs(&rtpcp->cblist);
 401		if (n) {
 402			ncbs += n;
 403			if (cpu > 0)
 404				ncbsnz += n;
 405		}
 406		rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
 407		(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
 408		if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
 
 
 409			needgpcb |= 0x3;
 410		if (!rcu_segcblist_empty(&rtpcp->cblist))
 
 
 
 411			needgpcb |= 0x1;
 412		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 413	}
 414
 415	// Shrink down to a single callback queue if appropriate.
 416	// This is done in two stages: (1) If there are no more than
 417	// rcu_task_collapse_lim callbacks on CPU 0 and none on any other
 418	// CPU, limit enqueueing to CPU 0.  (2) After an RCU grace period,
 419	// if there has not been an increase in callbacks, limit dequeuing
 420	// to CPU 0.  Note the matching RCU read-side critical section in
 421	// call_rcu_tasks_generic().
 422	if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
 423		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 424		if (rtp->percpu_enqueue_lim > 1) {
 425			WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
 426			smp_store_release(&rtp->percpu_enqueue_lim, 1);
 427			rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
 
 428			pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
 429		}
 430		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 431	}
 432	if (rcu_task_cb_adjust && !ncbsnz &&
 433	    poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
 434		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 435		if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
 436			WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
 437			pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
 438		}
 439		for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
 440			struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 
 441
 442			WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
 
 443		}
 444		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 445	}
 446
 447	return needgpcb;
 448}
 449
 450// Advance callbacks and invoke any that are ready.
 451static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
 452{
 453	int cpu;
 454	int cpunext;
 
 455	unsigned long flags;
 456	int len;
 457	struct rcu_head *rhp;
 458	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
 459	struct rcu_tasks_percpu *rtpcp_next;
 460
 461	cpu = rtpcp->cpu;
 462	cpunext = cpu * 2 + 1;
 463	if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
 464		rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
 465		queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
 
 466		cpunext++;
 467		if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
 468			rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
 469			queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
 
 470		}
 471	}
 472
 473	if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
 474		return;
 475	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 476	rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
 477	rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
 478	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 479	len = rcl.len;
 480	for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
 
 481		local_bh_disable();
 482		rhp->func(rhp);
 483		local_bh_enable();
 484		cond_resched();
 485	}
 486	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 487	rcu_segcblist_add_len(&rtpcp->cblist, -len);
 488	(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
 489	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 490}
 491
 492// Workqueue flood to advance callbacks and invoke any that are ready.
 493static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
 494{
 495	struct rcu_tasks *rtp;
 496	struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
 497
 498	rtp = rtpcp->rtpp;
 499	rcu_tasks_invoke_cbs(rtp, rtpcp);
 500}
 501
 502// Wait for one grace period.
 503static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
 504{
 505	int needgpcb;
 506
 507	mutex_lock(&rtp->tasks_gp_mutex);
 508
 509	// If there were none, wait a bit and start over.
 510	if (unlikely(midboot)) {
 511		needgpcb = 0x2;
 512	} else {
 
 513		set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
 514		rcuwait_wait_event(&rtp->cbs_wait,
 515				   (needgpcb = rcu_tasks_need_gpcb(rtp)),
 516				   TASK_IDLE);
 
 517	}
 518
 519	if (needgpcb & 0x2) {
 520		// Wait for one grace period.
 521		set_tasks_gp_state(rtp, RTGS_WAIT_GP);
 522		rtp->gp_start = jiffies;
 523		rcu_seq_start(&rtp->tasks_gp_seq);
 524		rtp->gp_func(rtp);
 525		rcu_seq_end(&rtp->tasks_gp_seq);
 526	}
 527
 528	// Invoke callbacks.
 529	set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
 530	rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
 531	mutex_unlock(&rtp->tasks_gp_mutex);
 532}
 533
 534// RCU-tasks kthread that detects grace periods and invokes callbacks.
 535static int __noreturn rcu_tasks_kthread(void *arg)
 536{
 
 537	struct rcu_tasks *rtp = arg;
 538
 
 
 
 
 
 
 
 539	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
 540	housekeeping_affine(current, HK_TYPE_RCU);
 541	WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
 542
 543	/*
 544	 * Each pass through the following loop makes one check for
 545	 * newly arrived callbacks, and, if there are some, waits for
 546	 * one RCU-tasks grace period and then invokes the callbacks.
 547	 * This loop is terminated by the system going down.  ;-)
 548	 */
 549	for (;;) {
 550		// Wait for one grace period and invoke any callbacks
 551		// that are ready.
 552		rcu_tasks_one_gp(rtp, false);
 553
 554		// Paranoid sleep to keep this from entering a tight loop.
 555		schedule_timeout_idle(rtp->gp_sleep);
 556	}
 557}
 558
 559// Wait for a grace period for the specified flavor of Tasks RCU.
 560static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
 561{
 562	/* Complain if the scheduler has not started.  */
 563	WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
 564			 "synchronize_rcu_tasks called too soon");
 
 565
 566	// If the grace-period kthread is running, use it.
 567	if (READ_ONCE(rtp->kthread_ptr)) {
 568		wait_rcu_gp(rtp->call_func);
 569		return;
 570	}
 571	rcu_tasks_one_gp(rtp, true);
 572}
 573
 574/* Spawn RCU-tasks grace-period kthread. */
 575static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
 576{
 577	struct task_struct *t;
 578
 579	t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
 580	if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
 581		return;
 582	smp_mb(); /* Ensure others see full kthread. */
 583}
 584
 585#ifndef CONFIG_TINY_RCU
 586
 587/*
 588 * Print any non-default Tasks RCU settings.
 589 */
 590static void __init rcu_tasks_bootup_oddness(void)
 591{
 592#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
 593	int rtsimc;
 594
 595	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
 596		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
 597	rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
 598	if (rtsimc != rcu_task_stall_info_mult) {
 599		pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
 600		rcu_task_stall_info_mult = rtsimc;
 601	}
 602#endif /* #ifdef CONFIG_TASKS_RCU */
 603#ifdef CONFIG_TASKS_RCU
 604	pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
 605#endif /* #ifdef CONFIG_TASKS_RCU */
 606#ifdef CONFIG_TASKS_RUDE_RCU
 607	pr_info("\tRude variant of Tasks RCU enabled.\n");
 608#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
 609#ifdef CONFIG_TASKS_TRACE_RCU
 610	pr_info("\tTracing variant of Tasks RCU enabled.\n");
 611#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 612}
 613
 614#endif /* #ifndef CONFIG_TINY_RCU */
 615
 616#ifndef CONFIG_TINY_RCU
 617/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
 618static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
 619{
 620	int cpu;
 621	bool havecbs = false;
 
 
 622
 623	for_each_possible_cpu(cpu) {
 624		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 625
 626		if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
 627			havecbs = true;
 
 
 
 
 
 628			break;
 629		}
 630	}
 631	pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
 632		rtp->kname,
 633		tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
 634		jiffies - data_race(rtp->gp_jiffies),
 635		data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
 636		data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
 637		".k"[!!data_race(rtp->kthread_ptr)],
 638		".C"[havecbs],
 
 
 
 639		s);
 640}
 641#endif // #ifndef CONFIG_TINY_RCU
 642
 643static void exit_tasks_rcu_finish_trace(struct task_struct *t);
 644
 645#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
 646
 647////////////////////////////////////////////////////////////////////////
 648//
 649// Shared code between task-list-scanning variants of Tasks RCU.
 650
 651/* Wait for one RCU-tasks grace period. */
 652static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
 653{
 654	struct task_struct *g;
 655	int fract;
 656	LIST_HEAD(holdouts);
 657	unsigned long j;
 658	unsigned long lastinfo;
 659	unsigned long lastreport;
 660	bool reported = false;
 661	int rtsi;
 662	struct task_struct *t;
 663
 664	set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
 665	rtp->pregp_func(&holdouts);
 666
 667	/*
 668	 * There were callbacks, so we need to wait for an RCU-tasks
 669	 * grace period.  Start off by scanning the task list for tasks
 670	 * that are not already voluntarily blocked.  Mark these tasks
 671	 * and make a list of them in holdouts.
 672	 */
 673	set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
 674	if (rtp->pertask_func) {
 675		rcu_read_lock();
 676		for_each_process_thread(g, t)
 677			rtp->pertask_func(t, &holdouts);
 678		rcu_read_unlock();
 679	}
 680
 681	set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
 682	rtp->postscan_func(&holdouts);
 683
 684	/*
 685	 * Each pass through the following loop scans the list of holdout
 686	 * tasks, removing any that are no longer holdouts.  When the list
 687	 * is empty, we are done.
 688	 */
 689	lastreport = jiffies;
 690	lastinfo = lastreport;
 691	rtsi = READ_ONCE(rcu_task_stall_info);
 692
 693	// Start off with initial wait and slowly back off to 1 HZ wait.
 694	fract = rtp->init_fract;
 695
 696	while (!list_empty(&holdouts)) {
 697		ktime_t exp;
 698		bool firstreport;
 699		bool needreport;
 700		int rtst;
 701
 702		// Slowly back off waiting for holdouts
 703		set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
 704		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
 705			schedule_timeout_idle(fract);
 706		} else {
 707			exp = jiffies_to_nsecs(fract);
 708			__set_current_state(TASK_IDLE);
 709			schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
 710		}
 711
 712		if (fract < HZ)
 713			fract++;
 714
 715		rtst = READ_ONCE(rcu_task_stall_timeout);
 716		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
 717		if (needreport) {
 718			lastreport = jiffies;
 719			reported = true;
 720		}
 721		firstreport = true;
 722		WARN_ON(signal_pending(current));
 723		set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
 724		rtp->holdouts_func(&holdouts, needreport, &firstreport);
 725
 726		// Print pre-stall informational messages if needed.
 727		j = jiffies;
 728		if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
 729			lastinfo = j;
 730			rtsi = rtsi * rcu_task_stall_info_mult;
 731			pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
 732				__func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
 733		}
 734	}
 735
 736	set_tasks_gp_state(rtp, RTGS_POST_GP);
 737	rtp->postgp_func(rtp);
 738}
 739
 740#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
 741
 742#ifdef CONFIG_TASKS_RCU
 743
 744////////////////////////////////////////////////////////////////////////
 745//
 746// Simple variant of RCU whose quiescent states are voluntary context
 747// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
 748// As such, grace periods can take one good long time.  There are no
 749// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
 750// because this implementation is intended to get the system into a safe
 751// state for some of the manipulations involved in tracing and the like.
 752// Finally, this implementation does not support high call_rcu_tasks()
 753// rates from multiple CPUs.  If this is required, per-CPU callback lists
 754// will be needed.
 755//
 756// The implementation uses rcu_tasks_wait_gp(), which relies on function
 757// pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
 758// function sets these function pointers up so that rcu_tasks_wait_gp()
 759// invokes these functions in this order:
 760//
 761// rcu_tasks_pregp_step():
 762//	Invokes synchronize_rcu() in order to wait for all in-flight
 763//	t->on_rq and t->nvcsw transitions to complete.	This works because
 764//	all such transitions are carried out with interrupts disabled.
 765// rcu_tasks_pertask(), invoked on every non-idle task:
 766//	For every runnable non-idle task other than the current one, use
 767//	get_task_struct() to pin down that task, snapshot that task's
 768//	number of voluntary context switches, and add that task to the
 769//	holdout list.
 770// rcu_tasks_postscan():
 771//	Invoke synchronize_srcu() to ensure that all tasks that were
 772//	in the process of exiting (and which thus might not know to
 773//	synchronize with this RCU Tasks grace period) have completed
 774//	exiting.
 
 
 775// check_all_holdout_tasks(), repeatedly until holdout list is empty:
 776//	Scans the holdout list, attempting to identify a quiescent state
 777//	for each task on the list.  If there is a quiescent state, the
 778//	corresponding task is removed from the holdout list.
 779// rcu_tasks_postgp():
 780//	Invokes synchronize_rcu() in order to ensure that all prior
 781//	t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
 782//	to have happened before the end of this RCU Tasks grace period.
 783//	Again, this works because all such transitions are carried out
 784//	with interrupts disabled.
 785//
 786// For each exiting task, the exit_tasks_rcu_start() and
 787// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
 788// read-side critical sections waited for by rcu_tasks_postscan().
 
 
 789//
 790// Pre-grace-period update-side code is ordered before the grace
 791// via the raw_spin_lock.*rcu_node().  Pre-grace-period read-side code
 792// is ordered before the grace period via synchronize_rcu() call in
 793// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
 794// disabling.
 795
 796/* Pre-grace-period preparation. */
 797static void rcu_tasks_pregp_step(struct list_head *hop)
 798{
 799	/*
 800	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
 801	 * to complete.  Invoking synchronize_rcu() suffices because all
 802	 * these transitions occur with interrupts disabled.  Without this
 803	 * synchronize_rcu(), a read-side critical section that started
 804	 * before the grace period might be incorrectly seen as having
 805	 * started after the grace period.
 806	 *
 807	 * This synchronize_rcu() also dispenses with the need for a
 808	 * memory barrier on the first store to t->rcu_tasks_holdout,
 809	 * as it forces the store to happen after the beginning of the
 810	 * grace period.
 811	 */
 812	synchronize_rcu();
 813}
 814
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 815/* Per-task initial processing. */
 816static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
 817{
 818	if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
 819		get_task_struct(t);
 820		t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
 821		WRITE_ONCE(t->rcu_tasks_holdout, true);
 822		list_add(&t->rcu_tasks_holdout_list, hop);
 823	}
 824}
 825
 
 
 
 826/* Processing between scanning taskslist and draining the holdout list. */
 827static void rcu_tasks_postscan(struct list_head *hop)
 828{
 
 
 
 
 
 
 
 
 829	/*
 830	 * Wait for tasks that are in the process of exiting.  This
 831	 * does only part of the job, ensuring that all tasks that were
 832	 * previously exiting reach the point where they have disabled
 833	 * preemption, allowing the later synchronize_rcu() to finish
 834	 * the job.
 
 
 
 
 
 
 
 
 
 
 835	 */
 836	synchronize_srcu(&tasks_rcu_exit_srcu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 837}
 838
 839/* See if tasks are still holding out, complain if so. */
 840static void check_holdout_task(struct task_struct *t,
 841			       bool needreport, bool *firstreport)
 842{
 843	int cpu;
 844
 845	if (!READ_ONCE(t->rcu_tasks_holdout) ||
 846	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
 847	    !READ_ONCE(t->on_rq) ||
 848	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
 849	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
 850		WRITE_ONCE(t->rcu_tasks_holdout, false);
 851		list_del_init(&t->rcu_tasks_holdout_list);
 852		put_task_struct(t);
 853		return;
 854	}
 855	rcu_request_urgent_qs_task(t);
 856	if (!needreport)
 857		return;
 858	if (*firstreport) {
 859		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
 860		*firstreport = false;
 861	}
 862	cpu = task_cpu(t);
 863	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
 864		 t, ".I"[is_idle_task(t)],
 865		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
 866		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
 867		 t->rcu_tasks_idle_cpu, cpu);
 868	sched_show_task(t);
 869}
 870
 871/* Scan the holdout lists for tasks no longer holding out. */
 872static void check_all_holdout_tasks(struct list_head *hop,
 873				    bool needreport, bool *firstreport)
 874{
 875	struct task_struct *t, *t1;
 876
 877	list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
 878		check_holdout_task(t, needreport, firstreport);
 879		cond_resched();
 880	}
 881}
 882
 883/* Finish off the Tasks-RCU grace period. */
 884static void rcu_tasks_postgp(struct rcu_tasks *rtp)
 885{
 886	/*
 887	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
 888	 * memory barriers prior to them in the schedule() path, memory
 889	 * reordering on other CPUs could cause their RCU-tasks read-side
 890	 * critical sections to extend past the end of the grace period.
 891	 * However, because these ->nvcsw updates are carried out with
 892	 * interrupts disabled, we can use synchronize_rcu() to force the
 893	 * needed ordering on all such CPUs.
 894	 *
 895	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
 896	 * accesses to be within the grace period, avoiding the need for
 897	 * memory barriers for ->rcu_tasks_holdout accesses.
 898	 *
 899	 * In addition, this synchronize_rcu() waits for exiting tasks
 900	 * to complete their final preempt_disable() region of execution,
 901	 * cleaning up after the synchronize_srcu() above.
 
 
 902	 */
 903	synchronize_rcu();
 904}
 905
 906void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
 907DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
 
 
 
 
 
 
 
 
 
 
 
 
 908
 909/**
 910 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
 911 * @rhp: structure to be used for queueing the RCU updates.
 912 * @func: actual callback function to be invoked after the grace period
 913 *
 914 * The callback function will be invoked some time after a full grace
 915 * period elapses, in other words after all currently executing RCU
 916 * read-side critical sections have completed. call_rcu_tasks() assumes
 917 * that the read-side critical sections end at a voluntary context
 918 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
 919 * or transition to usermode execution.  As such, there are no read-side
 920 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
 921 * this primitive is intended to determine that all tasks have passed
 922 * through a safe state, not so much for data-structure synchronization.
 923 *
 924 * See the description of call_rcu() for more detailed information on
 925 * memory ordering guarantees.
 926 */
 927void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
 928{
 929	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
 930}
 931EXPORT_SYMBOL_GPL(call_rcu_tasks);
 932
 933/**
 934 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
 935 *
 936 * Control will return to the caller some time after a full rcu-tasks
 937 * grace period has elapsed, in other words after all currently
 938 * executing rcu-tasks read-side critical sections have elapsed.  These
 939 * read-side critical sections are delimited by calls to schedule(),
 940 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
 941 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
 942 *
 943 * This is a very specialized primitive, intended only for a few uses in
 944 * tracing and other situations requiring manipulation of function
 945 * preambles and profiling hooks.  The synchronize_rcu_tasks() function
 946 * is not (yet) intended for heavy use from multiple CPUs.
 947 *
 948 * See the description of synchronize_rcu() for more detailed information
 949 * on memory ordering guarantees.
 950 */
 951void synchronize_rcu_tasks(void)
 952{
 953	synchronize_rcu_tasks_generic(&rcu_tasks);
 954}
 955EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
 956
 957/**
 958 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
 959 *
 960 * Although the current implementation is guaranteed to wait, it is not
 961 * obligated to, for example, if there are no pending callbacks.
 962 */
 963void rcu_barrier_tasks(void)
 964{
 965	rcu_barrier_tasks_generic(&rcu_tasks);
 966}
 967EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
 968
 
 
 
 969static int __init rcu_spawn_tasks_kthread(void)
 970{
 971	cblist_init_generic(&rcu_tasks);
 972	rcu_tasks.gp_sleep = HZ / 10;
 973	rcu_tasks.init_fract = HZ / 10;
 
 
 974	rcu_tasks.pregp_func = rcu_tasks_pregp_step;
 975	rcu_tasks.pertask_func = rcu_tasks_pertask;
 976	rcu_tasks.postscan_func = rcu_tasks_postscan;
 977	rcu_tasks.holdouts_func = check_all_holdout_tasks;
 978	rcu_tasks.postgp_func = rcu_tasks_postgp;
 979	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
 980	return 0;
 981}
 982
 983#if !defined(CONFIG_TINY_RCU)
 984void show_rcu_tasks_classic_gp_kthread(void)
 985{
 986	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
 987}
 988EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
 989#endif // !defined(CONFIG_TINY_RCU)
 990
 991/* Do the srcu_read_lock() for the above synchronize_srcu().  */
 992void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
 993{
 994	preempt_disable();
 995	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
 996	preempt_enable();
 997}
 
 998
 999/* Do the srcu_read_unlock() for the above synchronize_srcu().  */
1000void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
 
 
 
 
 
 
 
 
 
1001{
 
 
1002	struct task_struct *t = current;
1003
 
1004	preempt_disable();
1005	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
 
 
 
 
 
 
1006	preempt_enable();
1007	exit_tasks_rcu_finish_trace(t);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1008}
1009
1010#else /* #ifdef CONFIG_TASKS_RCU */
1011void exit_tasks_rcu_start(void) { }
 
1012void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1013#endif /* #else #ifdef CONFIG_TASKS_RCU */
1014
1015#ifdef CONFIG_TASKS_RUDE_RCU
1016
1017////////////////////////////////////////////////////////////////////////
1018//
1019// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1020// passing an empty function to schedule_on_each_cpu().  This approach
1021// provides an asynchronous call_rcu_tasks_rude() API and batching of
1022// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
1023// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1024// and induces otherwise unnecessary context switches on all online CPUs,
1025// whether idle or not.
1026//
1027// Callback handling is provided by the rcu_tasks_kthread() function.
1028//
1029// Ordering is provided by the scheduler's context-switch code.
1030
1031// Empty function to allow workqueues to force a context switch.
1032static void rcu_tasks_be_rude(struct work_struct *work)
1033{
1034}
1035
1036// Wait for one rude RCU-tasks grace period.
1037static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1038{
1039	if (num_online_cpus() <= 1)
1040		return;	// Fastpath for only one CPU.
1041
1042	rtp->n_ipis += cpumask_weight(cpu_online_mask);
1043	schedule_on_each_cpu(rcu_tasks_be_rude);
1044}
1045
1046void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1047DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1048		 "RCU Tasks Rude");
1049
1050/**
1051 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1052 * @rhp: structure to be used for queueing the RCU updates.
1053 * @func: actual callback function to be invoked after the grace period
1054 *
1055 * The callback function will be invoked some time after a full grace
1056 * period elapses, in other words after all currently executing RCU
1057 * read-side critical sections have completed. call_rcu_tasks_rude()
1058 * assumes that the read-side critical sections end at context switch,
1059 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1060 * usermode execution is schedulable). As such, there are no read-side
1061 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1062 * this primitive is intended to determine that all tasks have passed
1063 * through a safe state, not so much for data-structure synchronization.
1064 *
1065 * See the description of call_rcu() for more detailed information on
1066 * memory ordering guarantees.
1067 */
1068void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1069{
1070	call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1071}
1072EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1073
1074/**
1075 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1076 *
1077 * Control will return to the caller some time after a rude rcu-tasks
1078 * grace period has elapsed, in other words after all currently
1079 * executing rcu-tasks read-side critical sections have elapsed.  These
1080 * read-side critical sections are delimited by calls to schedule(),
1081 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1082 * context), and (in theory, anyway) cond_resched().
1083 *
1084 * This is a very specialized primitive, intended only for a few uses in
1085 * tracing and other situations requiring manipulation of function preambles
1086 * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
1087 * (yet) intended for heavy use from multiple CPUs.
1088 *
1089 * See the description of synchronize_rcu() for more detailed information
1090 * on memory ordering guarantees.
1091 */
1092void synchronize_rcu_tasks_rude(void)
1093{
1094	synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1095}
1096EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1097
1098/**
1099 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1100 *
1101 * Although the current implementation is guaranteed to wait, it is not
1102 * obligated to, for example, if there are no pending callbacks.
1103 */
1104void rcu_barrier_tasks_rude(void)
1105{
1106	rcu_barrier_tasks_generic(&rcu_tasks_rude);
1107}
1108EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1109
 
 
 
1110static int __init rcu_spawn_tasks_rude_kthread(void)
1111{
1112	cblist_init_generic(&rcu_tasks_rude);
1113	rcu_tasks_rude.gp_sleep = HZ / 10;
 
 
1114	rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1115	return 0;
1116}
1117
1118#if !defined(CONFIG_TINY_RCU)
1119void show_rcu_tasks_rude_gp_kthread(void)
1120{
1121	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1122}
1123EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1124#endif // !defined(CONFIG_TINY_RCU)
 
 
 
 
 
 
 
1125#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1126
1127////////////////////////////////////////////////////////////////////////
1128//
1129// Tracing variant of Tasks RCU.  This variant is designed to be used
1130// to protect tracing hooks, including those of BPF.  This variant
1131// therefore:
1132//
1133// 1.	Has explicit read-side markers to allow finite grace periods
1134//	in the face of in-kernel loops for PREEMPT=n builds.
1135//
1136// 2.	Protects code in the idle loop, exception entry/exit, and
1137//	CPU-hotplug code paths, similar to the capabilities of SRCU.
1138//
1139// 3.	Avoids expensive read-side instructions, having overhead similar
1140//	to that of Preemptible RCU.
1141//
1142// There are of course downsides.  For example, the grace-period code
1143// can send IPIs to CPUs, even when those CPUs are in the idle loop or
1144// in nohz_full userspace.  If needed, these downsides can be at least
1145// partially remedied.
1146//
1147// Perhaps most important, this variant of RCU does not affect the vanilla
1148// flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
1149// readers can operate from idle, offline, and exception entry/exit in no
1150// way allows rcu_preempt and rcu_sched readers to also do so.
1151//
1152// The implementation uses rcu_tasks_wait_gp(), which relies on function
1153// pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
1154// function sets these function pointers up so that rcu_tasks_wait_gp()
1155// invokes these functions in this order:
1156//
1157// rcu_tasks_trace_pregp_step():
1158//	Disables CPU hotplug, adds all currently executing tasks to the
1159//	holdout list, then checks the state of all tasks that blocked
1160//	or were preempted within their current RCU Tasks Trace read-side
1161//	critical section, adding them to the holdout list if appropriate.
1162//	Finally, this function re-enables CPU hotplug.
1163// The ->pertask_func() pointer is NULL, so there is no per-task processing.
1164// rcu_tasks_trace_postscan():
1165//	Invokes synchronize_rcu() to wait for late-stage exiting tasks
1166//	to finish exiting.
1167// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1168//	Scans the holdout list, attempting to identify a quiescent state
1169//	for each task on the list.  If there is a quiescent state, the
1170//	corresponding task is removed from the holdout list.  Once this
1171//	list is empty, the grace period has completed.
1172// rcu_tasks_trace_postgp():
1173//	Provides the needed full memory barrier and does debug checks.
1174//
1175// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1176//
1177// Pre-grace-period update-side code is ordered before the grace period
1178// via the ->cbs_lock and barriers in rcu_tasks_kthread().  Pre-grace-period
1179// read-side code is ordered before the grace period by atomic operations
1180// on .b.need_qs flag of each task involved in this process, or by scheduler
1181// context-switch ordering (for locked-down non-running readers).
1182
1183// The lockdep state must be outside of #ifdef to be useful.
1184#ifdef CONFIG_DEBUG_LOCK_ALLOC
1185static struct lock_class_key rcu_lock_trace_key;
1186struct lockdep_map rcu_trace_lock_map =
1187	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1188EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1189#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1190
1191#ifdef CONFIG_TASKS_TRACE_RCU
1192
1193// Record outstanding IPIs to each CPU.  No point in sending two...
1194static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1195
1196// The number of detections of task quiescent state relying on
1197// heavyweight readers executing explicit memory barriers.
1198static unsigned long n_heavy_reader_attempts;
1199static unsigned long n_heavy_reader_updates;
1200static unsigned long n_heavy_reader_ofl_updates;
1201static unsigned long n_trc_holdouts;
1202
1203void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1204DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1205		 "RCU Tasks Trace");
1206
1207/* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1208static u8 rcu_ld_need_qs(struct task_struct *t)
1209{
1210	smp_mb(); // Enforce full grace-period ordering.
1211	return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1212}
1213
1214/* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1215static void rcu_st_need_qs(struct task_struct *t, u8 v)
1216{
1217	smp_store_release(&t->trc_reader_special.b.need_qs, v);
1218	smp_mb(); // Enforce full grace-period ordering.
1219}
1220
1221/*
1222 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1223 * the four-byte operand-size restriction of some platforms.
1224 * Returns the old value, which is often ignored.
1225 */
1226u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1227{
1228	union rcu_special ret;
1229	union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1230	union rcu_special trs_new = trs_old;
1231
1232	if (trs_old.b.need_qs != old)
1233		return trs_old.b.need_qs;
1234	trs_new.b.need_qs = new;
1235	ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1236	return ret.b.need_qs;
1237}
1238EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1239
1240/*
1241 * If we are the last reader, signal the grace-period kthread.
1242 * Also remove from the per-CPU list of blocked tasks.
1243 */
1244void rcu_read_unlock_trace_special(struct task_struct *t)
1245{
1246	unsigned long flags;
1247	struct rcu_tasks_percpu *rtpcp;
1248	union rcu_special trs;
1249
1250	// Open-coded full-word version of rcu_ld_need_qs().
1251	smp_mb(); // Enforce full grace-period ordering.
1252	trs = smp_load_acquire(&t->trc_reader_special);
1253
1254	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1255		smp_mb(); // Pairs with update-side barriers.
1256	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1257	if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1258		u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1259						       TRC_NEED_QS_CHECKED);
1260
1261		WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1262	}
1263	if (trs.b.blocked) {
1264		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1265		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1266		list_del_init(&t->trc_blkd_node);
1267		WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1268		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1269	}
1270	WRITE_ONCE(t->trc_reader_nesting, 0);
1271}
1272EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1273
1274/* Add a newly blocked reader task to its CPU's list. */
1275void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1276{
1277	unsigned long flags;
1278	struct rcu_tasks_percpu *rtpcp;
1279
1280	local_irq_save(flags);
1281	rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1282	raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1283	t->trc_blkd_cpu = smp_processor_id();
1284	if (!rtpcp->rtp_blkd_tasks.next)
1285		INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1286	list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1287	WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1288	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1289}
1290EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1291
1292/* Add a task to the holdout list, if it is not already on the list. */
1293static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1294{
1295	if (list_empty(&t->trc_holdout_list)) {
1296		get_task_struct(t);
1297		list_add(&t->trc_holdout_list, bhp);
1298		n_trc_holdouts++;
1299	}
1300}
1301
1302/* Remove a task from the holdout list, if it is in fact present. */
1303static void trc_del_holdout(struct task_struct *t)
1304{
1305	if (!list_empty(&t->trc_holdout_list)) {
1306		list_del_init(&t->trc_holdout_list);
1307		put_task_struct(t);
1308		n_trc_holdouts--;
1309	}
1310}
1311
1312/* IPI handler to check task state. */
1313static void trc_read_check_handler(void *t_in)
1314{
1315	int nesting;
1316	struct task_struct *t = current;
1317	struct task_struct *texp = t_in;
1318
1319	// If the task is no longer running on this CPU, leave.
1320	if (unlikely(texp != t))
1321		goto reset_ipi; // Already on holdout list, so will check later.
1322
1323	// If the task is not in a read-side critical section, and
1324	// if this is the last reader, awaken the grace-period kthread.
1325	nesting = READ_ONCE(t->trc_reader_nesting);
1326	if (likely(!nesting)) {
1327		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1328		goto reset_ipi;
1329	}
1330	// If we are racing with an rcu_read_unlock_trace(), try again later.
1331	if (unlikely(nesting < 0))
1332		goto reset_ipi;
1333
1334	// Get here if the task is in a read-side critical section.
1335	// Set its state so that it will update state for the grace-period
1336	// kthread upon exit from that critical section.
1337	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1338
1339reset_ipi:
1340	// Allow future IPIs to be sent on CPU and for task.
1341	// Also order this IPI handler against any later manipulations of
1342	// the intended task.
1343	smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1344	smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1345}
1346
1347/* Callback function for scheduler to check locked-down task.  */
1348static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1349{
1350	struct list_head *bhp = bhp_in;
1351	int cpu = task_cpu(t);
1352	int nesting;
1353	bool ofl = cpu_is_offline(cpu);
1354
1355	if (task_curr(t) && !ofl) {
1356		// If no chance of heavyweight readers, do it the hard way.
1357		if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1358			return -EINVAL;
1359
1360		// If heavyweight readers are enabled on the remote task,
1361		// we can inspect its state despite its currently running.
1362		// However, we cannot safely change its state.
1363		n_heavy_reader_attempts++;
1364		// Check for "running" idle tasks on offline CPUs.
1365		if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1366			return -EINVAL; // No quiescent state, do it the hard way.
1367		n_heavy_reader_updates++;
1368		nesting = 0;
1369	} else {
1370		// The task is not running, so C-language access is safe.
1371		nesting = t->trc_reader_nesting;
1372		WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t));
1373		if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1374			n_heavy_reader_ofl_updates++;
1375	}
1376
1377	// If not exiting a read-side critical section, mark as checked
1378	// so that the grace-period kthread will remove it from the
1379	// holdout list.
1380	if (!nesting) {
1381		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1382		return 0;  // In QS, so done.
1383	}
1384	if (nesting < 0)
1385		return -EINVAL; // Reader transitioning, try again later.
1386
1387	// The task is in a read-side critical section, so set up its
1388	// state so that it will update state upon exit from that critical
1389	// section.
1390	if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1391		trc_add_holdout(t, bhp);
1392	return 0;
1393}
1394
1395/* Attempt to extract the state for the specified task. */
1396static void trc_wait_for_one_reader(struct task_struct *t,
1397				    struct list_head *bhp)
1398{
1399	int cpu;
1400
1401	// If a previous IPI is still in flight, let it complete.
1402	if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1403		return;
1404
1405	// The current task had better be in a quiescent state.
1406	if (t == current) {
1407		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1408		WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1409		return;
1410	}
1411
1412	// Attempt to nail down the task for inspection.
1413	get_task_struct(t);
1414	if (!task_call_func(t, trc_inspect_reader, bhp)) {
1415		put_task_struct(t);
1416		return;
1417	}
1418	put_task_struct(t);
1419
1420	// If this task is not yet on the holdout list, then we are in
1421	// an RCU read-side critical section.  Otherwise, the invocation of
1422	// trc_add_holdout() that added it to the list did the necessary
1423	// get_task_struct().  Either way, the task cannot be freed out
1424	// from under this code.
1425
1426	// If currently running, send an IPI, either way, add to list.
1427	trc_add_holdout(t, bhp);
1428	if (task_curr(t) &&
1429	    time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1430		// The task is currently running, so try IPIing it.
1431		cpu = task_cpu(t);
1432
1433		// If there is already an IPI outstanding, let it happen.
1434		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1435			return;
1436
1437		per_cpu(trc_ipi_to_cpu, cpu) = true;
1438		t->trc_ipi_to_cpu = cpu;
1439		rcu_tasks_trace.n_ipis++;
1440		if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1441			// Just in case there is some other reason for
1442			// failure than the target CPU being offline.
1443			WARN_ONCE(1, "%s():  smp_call_function_single() failed for CPU: %d\n",
1444				  __func__, cpu);
1445			rcu_tasks_trace.n_ipis_fails++;
1446			per_cpu(trc_ipi_to_cpu, cpu) = false;
1447			t->trc_ipi_to_cpu = -1;
1448		}
1449	}
1450}
1451
1452/*
1453 * Initialize for first-round processing for the specified task.
1454 * Return false if task is NULL or already taken care of, true otherwise.
1455 */
1456static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1457{
1458	// During early boot when there is only the one boot CPU, there
1459	// is no idle task for the other CPUs.	Also, the grace-period
1460	// kthread is always in a quiescent state.  In addition, just return
1461	// if this task is already on the list.
1462	if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1463		return false;
1464
1465	rcu_st_need_qs(t, 0);
1466	t->trc_ipi_to_cpu = -1;
1467	return true;
1468}
1469
1470/* Do first-round processing for the specified task. */
1471static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1472{
1473	if (rcu_tasks_trace_pertask_prep(t, true))
1474		trc_wait_for_one_reader(t, hop);
1475}
1476
1477/* Initialize for a new RCU-tasks-trace grace period. */
1478static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1479{
1480	LIST_HEAD(blkd_tasks);
1481	int cpu;
1482	unsigned long flags;
1483	struct rcu_tasks_percpu *rtpcp;
1484	struct task_struct *t;
1485
1486	// There shouldn't be any old IPIs, but...
1487	for_each_possible_cpu(cpu)
1488		WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1489
1490	// Disable CPU hotplug across the CPU scan for the benefit of
1491	// any IPIs that might be needed.  This also waits for all readers
1492	// in CPU-hotplug code paths.
1493	cpus_read_lock();
1494
1495	// These rcu_tasks_trace_pertask_prep() calls are serialized to
1496	// allow safe access to the hop list.
1497	for_each_online_cpu(cpu) {
1498		rcu_read_lock();
1499		t = cpu_curr_snapshot(cpu);
1500		if (rcu_tasks_trace_pertask_prep(t, true))
1501			trc_add_holdout(t, hop);
1502		rcu_read_unlock();
1503		cond_resched_tasks_rcu_qs();
1504	}
1505
1506	// Only after all running tasks have been accounted for is it
1507	// safe to take care of the tasks that have blocked within their
1508	// current RCU tasks trace read-side critical section.
1509	for_each_possible_cpu(cpu) {
1510		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1511		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1512		list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1513		while (!list_empty(&blkd_tasks)) {
1514			rcu_read_lock();
1515			t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1516			list_del_init(&t->trc_blkd_node);
1517			list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1518			raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1519			rcu_tasks_trace_pertask(t, hop);
1520			rcu_read_unlock();
1521			raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1522		}
1523		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1524		cond_resched_tasks_rcu_qs();
1525	}
1526
1527	// Re-enable CPU hotplug now that the holdout list is populated.
1528	cpus_read_unlock();
1529}
1530
1531/*
1532 * Do intermediate processing between task and holdout scans.
1533 */
1534static void rcu_tasks_trace_postscan(struct list_head *hop)
1535{
1536	// Wait for late-stage exiting tasks to finish exiting.
1537	// These might have passed the call to exit_tasks_rcu_finish().
1538
1539	// If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1540	synchronize_rcu();
1541	// Any tasks that exit after this point will set
1542	// TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1543}
1544
1545/* Communicate task state back to the RCU tasks trace stall warning request. */
1546struct trc_stall_chk_rdr {
1547	int nesting;
1548	int ipi_to_cpu;
1549	u8 needqs;
1550};
1551
1552static int trc_check_slow_task(struct task_struct *t, void *arg)
1553{
1554	struct trc_stall_chk_rdr *trc_rdrp = arg;
1555
1556	if (task_curr(t) && cpu_online(task_cpu(t)))
1557		return false; // It is running, so decline to inspect it.
1558	trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1559	trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1560	trc_rdrp->needqs = rcu_ld_need_qs(t);
1561	return true;
1562}
1563
1564/* Show the state of a task stalling the current RCU tasks trace GP. */
1565static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1566{
1567	int cpu;
1568	struct trc_stall_chk_rdr trc_rdr;
1569	bool is_idle_tsk = is_idle_task(t);
1570
1571	if (*firstreport) {
1572		pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1573		*firstreport = false;
1574	}
1575	cpu = task_cpu(t);
1576	if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1577		pr_alert("P%d: %c%c\n",
1578			 t->pid,
1579			 ".I"[t->trc_ipi_to_cpu >= 0],
1580			 ".i"[is_idle_tsk]);
1581	else
1582		pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1583			 t->pid,
1584			 ".I"[trc_rdr.ipi_to_cpu >= 0],
1585			 ".i"[is_idle_tsk],
1586			 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1587			 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1588			 trc_rdr.nesting,
1589			 " !CN"[trc_rdr.needqs & 0x3],
1590			 " ?"[trc_rdr.needqs > 0x3],
1591			 cpu, cpu_online(cpu) ? "" : "(offline)");
1592	sched_show_task(t);
1593}
1594
1595/* List stalled IPIs for RCU tasks trace. */
1596static void show_stalled_ipi_trace(void)
1597{
1598	int cpu;
1599
1600	for_each_possible_cpu(cpu)
1601		if (per_cpu(trc_ipi_to_cpu, cpu))
1602			pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1603}
1604
1605/* Do one scan of the holdout list. */
1606static void check_all_holdout_tasks_trace(struct list_head *hop,
1607					  bool needreport, bool *firstreport)
1608{
1609	struct task_struct *g, *t;
1610
1611	// Disable CPU hotplug across the holdout list scan for IPIs.
1612	cpus_read_lock();
1613
1614	list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1615		// If safe and needed, try to check the current task.
1616		if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1617		    !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1618			trc_wait_for_one_reader(t, hop);
1619
1620		// If check succeeded, remove this task from the list.
1621		if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1622		    rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1623			trc_del_holdout(t);
1624		else if (needreport)
1625			show_stalled_task_trace(t, firstreport);
1626		cond_resched_tasks_rcu_qs();
1627	}
1628
1629	// Re-enable CPU hotplug now that the holdout list scan has completed.
1630	cpus_read_unlock();
1631
1632	if (needreport) {
1633		if (*firstreport)
1634			pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1635		show_stalled_ipi_trace();
1636	}
1637}
1638
1639static void rcu_tasks_trace_empty_fn(void *unused)
1640{
1641}
1642
1643/* Wait for grace period to complete and provide ordering. */
1644static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1645{
1646	int cpu;
1647
1648	// Wait for any lingering IPI handlers to complete.  Note that
1649	// if a CPU has gone offline or transitioned to userspace in the
1650	// meantime, all IPI handlers should have been drained beforehand.
1651	// Yes, this assumes that CPUs process IPIs in order.  If that ever
1652	// changes, there will need to be a recheck and/or timed wait.
1653	for_each_online_cpu(cpu)
1654		if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1655			smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1656
1657	smp_mb(); // Caller's code must be ordered after wakeup.
1658		  // Pairs with pretty much every ordering primitive.
1659}
1660
1661/* Report any needed quiescent state for this exiting task. */
1662static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1663{
1664	union rcu_special trs = READ_ONCE(t->trc_reader_special);
1665
1666	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1667	WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1668	if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1669		rcu_read_unlock_trace_special(t);
1670	else
1671		WRITE_ONCE(t->trc_reader_nesting, 0);
1672}
1673
1674/**
1675 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1676 * @rhp: structure to be used for queueing the RCU updates.
1677 * @func: actual callback function to be invoked after the grace period
1678 *
1679 * The callback function will be invoked some time after a trace rcu-tasks
1680 * grace period elapses, in other words after all currently executing
1681 * trace rcu-tasks read-side critical sections have completed. These
1682 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1683 * and rcu_read_unlock_trace().
1684 *
1685 * See the description of call_rcu() for more detailed information on
1686 * memory ordering guarantees.
1687 */
1688void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1689{
1690	call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1691}
1692EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1693
1694/**
1695 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1696 *
1697 * Control will return to the caller some time after a trace rcu-tasks
1698 * grace period has elapsed, in other words after all currently executing
1699 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1700 * critical sections are delimited by calls to rcu_read_lock_trace()
1701 * and rcu_read_unlock_trace().
1702 *
1703 * This is a very specialized primitive, intended only for a few uses in
1704 * tracing and other situations requiring manipulation of function preambles
1705 * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1706 * (yet) intended for heavy use from multiple CPUs.
1707 *
1708 * See the description of synchronize_rcu() for more detailed information
1709 * on memory ordering guarantees.
1710 */
1711void synchronize_rcu_tasks_trace(void)
1712{
1713	RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1714	synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1715}
1716EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1717
1718/**
1719 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1720 *
1721 * Although the current implementation is guaranteed to wait, it is not
1722 * obligated to, for example, if there are no pending callbacks.
1723 */
1724void rcu_barrier_tasks_trace(void)
1725{
1726	rcu_barrier_tasks_generic(&rcu_tasks_trace);
1727}
1728EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1729
 
 
 
1730static int __init rcu_spawn_tasks_trace_kthread(void)
1731{
1732	cblist_init_generic(&rcu_tasks_trace);
1733	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1734		rcu_tasks_trace.gp_sleep = HZ / 10;
1735		rcu_tasks_trace.init_fract = HZ / 10;
1736	} else {
1737		rcu_tasks_trace.gp_sleep = HZ / 200;
1738		if (rcu_tasks_trace.gp_sleep <= 0)
1739			rcu_tasks_trace.gp_sleep = 1;
1740		rcu_tasks_trace.init_fract = HZ / 200;
1741		if (rcu_tasks_trace.init_fract <= 0)
1742			rcu_tasks_trace.init_fract = 1;
1743	}
 
 
1744	rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1745	rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1746	rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1747	rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1748	rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1749	return 0;
1750}
1751
1752#if !defined(CONFIG_TINY_RCU)
1753void show_rcu_tasks_trace_gp_kthread(void)
1754{
1755	char buf[64];
1756
1757	sprintf(buf, "N%lu h:%lu/%lu/%lu",
1758		data_race(n_trc_holdouts),
1759		data_race(n_heavy_reader_ofl_updates),
1760		data_race(n_heavy_reader_updates),
1761		data_race(n_heavy_reader_attempts));
1762	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1763}
1764EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1765#endif // !defined(CONFIG_TINY_RCU)
1766
 
 
 
 
 
 
1767#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1768static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1769#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1770
1771#ifndef CONFIG_TINY_RCU
1772void show_rcu_tasks_gp_kthreads(void)
1773{
1774	show_rcu_tasks_classic_gp_kthread();
1775	show_rcu_tasks_rude_gp_kthread();
1776	show_rcu_tasks_trace_gp_kthread();
1777}
1778#endif /* #ifndef CONFIG_TINY_RCU */
1779
1780#ifdef CONFIG_PROVE_RCU
1781struct rcu_tasks_test_desc {
1782	struct rcu_head rh;
1783	const char *name;
1784	bool notrun;
1785	unsigned long runstart;
1786};
1787
1788static struct rcu_tasks_test_desc tests[] = {
1789	{
1790		.name = "call_rcu_tasks()",
1791		/* If not defined, the test is skipped. */
1792		.notrun = IS_ENABLED(CONFIG_TASKS_RCU),
1793	},
1794	{
1795		.name = "call_rcu_tasks_rude()",
1796		/* If not defined, the test is skipped. */
1797		.notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1798	},
1799	{
1800		.name = "call_rcu_tasks_trace()",
1801		/* If not defined, the test is skipped. */
1802		.notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1803	}
1804};
1805
1806static void test_rcu_tasks_callback(struct rcu_head *rhp)
1807{
1808	struct rcu_tasks_test_desc *rttd =
1809		container_of(rhp, struct rcu_tasks_test_desc, rh);
1810
1811	pr_info("Callback from %s invoked.\n", rttd->name);
1812
1813	rttd->notrun = false;
1814}
1815
1816static void rcu_tasks_initiate_self_tests(void)
1817{
1818	unsigned long j = jiffies;
1819
1820	pr_info("Running RCU-tasks wait API self tests\n");
1821#ifdef CONFIG_TASKS_RCU
1822	tests[0].runstart = j;
 
1823	synchronize_rcu_tasks();
1824	call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1825#endif
1826
1827#ifdef CONFIG_TASKS_RUDE_RCU
1828	tests[1].runstart = j;
 
1829	synchronize_rcu_tasks_rude();
1830	call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1831#endif
1832
1833#ifdef CONFIG_TASKS_TRACE_RCU
1834	tests[2].runstart = j;
 
1835	synchronize_rcu_tasks_trace();
1836	call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1837#endif
1838}
1839
1840/*
1841 * Return:  0 - test passed
1842 *	    1 - test failed, but have not timed out yet
1843 *	   -1 - test failed and timed out
1844 */
1845static int rcu_tasks_verify_self_tests(void)
1846{
1847	int ret = 0;
1848	int i;
1849	unsigned long bst = rcu_task_stall_timeout;
1850
1851	if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
1852		bst = RCU_TASK_BOOT_STALL_TIMEOUT;
1853	for (i = 0; i < ARRAY_SIZE(tests); i++) {
1854		while (tests[i].notrun) {		// still hanging.
1855			if (time_after(jiffies, tests[i].runstart + bst)) {
1856				pr_err("%s has failed boot-time tests.\n", tests[i].name);
1857				ret = -1;
1858				break;
1859			}
1860			ret = 1;
1861			break;
1862		}
1863	}
1864	WARN_ON(ret < 0);
1865
1866	return ret;
1867}
1868
1869/*
1870 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
1871 * test passes or has timed out.
1872 */
1873static struct delayed_work rcu_tasks_verify_work;
1874static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
1875{
1876	int ret = rcu_tasks_verify_self_tests();
1877
1878	if (ret <= 0)
1879		return;
1880
1881	/* Test fails but not timed out yet, reschedule another check */
1882	schedule_delayed_work(&rcu_tasks_verify_work, HZ);
1883}
1884
1885static int rcu_tasks_verify_schedule_work(void)
1886{
1887	INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
1888	rcu_tasks_verify_work_fn(NULL);
1889	return 0;
1890}
1891late_initcall(rcu_tasks_verify_schedule_work);
1892#else /* #ifdef CONFIG_PROVE_RCU */
1893static void rcu_tasks_initiate_self_tests(void) { }
1894#endif /* #else #ifdef CONFIG_PROVE_RCU */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1895
1896void __init rcu_init_tasks_generic(void)
1897{
1898#ifdef CONFIG_TASKS_RCU
1899	rcu_spawn_tasks_kthread();
1900#endif
1901
1902#ifdef CONFIG_TASKS_RUDE_RCU
1903	rcu_spawn_tasks_rude_kthread();
1904#endif
1905
1906#ifdef CONFIG_TASKS_TRACE_RCU
1907	rcu_spawn_tasks_trace_kthread();
1908#endif
1909
1910	// Run the self-tests.
1911	rcu_tasks_initiate_self_tests();
1912}
1913
1914#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1915static inline void rcu_tasks_bootup_oddness(void) {}
1916#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
v6.9.4
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/*
   3 * Task-based RCU implementations.
   4 *
   5 * Copyright (C) 2020 Paul E. McKenney
   6 */
   7
   8#ifdef CONFIG_TASKS_RCU_GENERIC
   9#include "rcu_segcblist.h"
  10
  11////////////////////////////////////////////////////////////////////////
  12//
  13// Generic data structures.
  14
  15struct rcu_tasks;
  16typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
  17typedef void (*pregp_func_t)(struct list_head *hop);
  18typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
  19typedef void (*postscan_func_t)(struct list_head *hop);
  20typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
  21typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
  22
  23/**
  24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
  25 * @cblist: Callback list.
  26 * @lock: Lock protecting per-CPU callback list.
  27 * @rtp_jiffies: Jiffies counter value for statistics.
  28 * @lazy_timer: Timer to unlazify callbacks.
  29 * @urgent_gp: Number of additional non-lazy grace periods.
  30 * @rtp_n_lock_retries: Rough lock-contention statistic.
  31 * @rtp_work: Work queue for invoking callbacks.
  32 * @rtp_irq_work: IRQ work queue for deferred wakeups.
  33 * @barrier_q_head: RCU callback for barrier operation.
  34 * @rtp_blkd_tasks: List of tasks blocked as readers.
  35 * @rtp_exit_list: List of tasks in the latter portion of do_exit().
  36 * @cpu: CPU number corresponding to this entry.
  37 * @rtpp: Pointer to the rcu_tasks structure.
  38 */
  39struct rcu_tasks_percpu {
  40	struct rcu_segcblist cblist;
  41	raw_spinlock_t __private lock;
  42	unsigned long rtp_jiffies;
  43	unsigned long rtp_n_lock_retries;
  44	struct timer_list lazy_timer;
  45	unsigned int urgent_gp;
  46	struct work_struct rtp_work;
  47	struct irq_work rtp_irq_work;
  48	struct rcu_head barrier_q_head;
  49	struct list_head rtp_blkd_tasks;
  50	struct list_head rtp_exit_list;
  51	int cpu;
  52	struct rcu_tasks *rtpp;
  53};
  54
  55/**
  56 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
  57 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
  58 * @cbs_gbl_lock: Lock protecting callback list.
  59 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
 
  60 * @gp_func: This flavor's grace-period-wait function.
  61 * @gp_state: Grace period's most recent state transition (debugging).
  62 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
  63 * @init_fract: Initial backoff sleep interval.
  64 * @gp_jiffies: Time of last @gp_state transition.
  65 * @gp_start: Most recent grace-period start in jiffies.
  66 * @tasks_gp_seq: Number of grace periods completed since boot.
  67 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
  68 * @n_ipis_fails: Number of IPI-send failures.
  69 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
  70 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
  71 * @pregp_func: This flavor's pre-grace-period function (optional).
  72 * @pertask_func: This flavor's per-task scan function (optional).
  73 * @postscan_func: This flavor's post-task scan function (optional).
  74 * @holdouts_func: This flavor's holdout-list scan function (optional).
  75 * @postgp_func: This flavor's post-grace-period function (optional).
  76 * @call_func: This flavor's call_rcu()-equivalent function.
  77 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
  78 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
  79 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
  80 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
  81 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
  82 * @barrier_q_mutex: Serialize barrier operations.
  83 * @barrier_q_count: Number of queues being waited on.
  84 * @barrier_q_completion: Barrier wait/wakeup mechanism.
  85 * @barrier_q_seq: Sequence number for barrier operations.
  86 * @name: This flavor's textual name.
  87 * @kname: This flavor's kthread name.
  88 */
  89struct rcu_tasks {
  90	struct rcuwait cbs_wait;
  91	raw_spinlock_t cbs_gbl_lock;
  92	struct mutex tasks_gp_mutex;
  93	int gp_state;
  94	int gp_sleep;
  95	int init_fract;
  96	unsigned long gp_jiffies;
  97	unsigned long gp_start;
  98	unsigned long tasks_gp_seq;
  99	unsigned long n_ipis;
 100	unsigned long n_ipis_fails;
 101	struct task_struct *kthread_ptr;
 102	unsigned long lazy_jiffies;
 103	rcu_tasks_gp_func_t gp_func;
 104	pregp_func_t pregp_func;
 105	pertask_func_t pertask_func;
 106	postscan_func_t postscan_func;
 107	holdouts_func_t holdouts_func;
 108	postgp_func_t postgp_func;
 109	call_rcu_func_t call_func;
 110	struct rcu_tasks_percpu __percpu *rtpcpu;
 111	int percpu_enqueue_shift;
 112	int percpu_enqueue_lim;
 113	int percpu_dequeue_lim;
 114	unsigned long percpu_dequeue_gpseq;
 115	struct mutex barrier_q_mutex;
 116	atomic_t barrier_q_count;
 117	struct completion barrier_q_completion;
 118	unsigned long barrier_q_seq;
 119	char *name;
 120	char *kname;
 121};
 122
 123static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
 124
 125#define DEFINE_RCU_TASKS(rt_name, gp, call, n)						\
 126static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = {			\
 127	.lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock),		\
 128	.rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup),			\
 129};											\
 130static struct rcu_tasks rt_name =							\
 131{											\
 132	.cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait),				\
 133	.cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock),			\
 134	.tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex),			\
 135	.gp_func = gp,									\
 136	.call_func = call,								\
 137	.rtpcpu = &rt_name ## __percpu,							\
 138	.lazy_jiffies = DIV_ROUND_UP(HZ, 4),						\
 139	.name = n,									\
 140	.percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS),				\
 141	.percpu_enqueue_lim = 1,							\
 142	.percpu_dequeue_lim = 1,							\
 143	.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex),		\
 144	.barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT,				\
 145	.kname = #rt_name,								\
 146}
 147
 148#ifdef CONFIG_TASKS_RCU
 149
 150/* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
 151static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
 152static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
 153#endif
 154
 155/* Avoid IPIing CPUs early in the grace period. */
 156#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
 157static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
 158module_param(rcu_task_ipi_delay, int, 0644);
 159
 160/* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
 161#define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
 162#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
 163static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
 164module_param(rcu_task_stall_timeout, int, 0644);
 165#define RCU_TASK_STALL_INFO (HZ * 10)
 166static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
 167module_param(rcu_task_stall_info, int, 0644);
 168static int rcu_task_stall_info_mult __read_mostly = 3;
 169module_param(rcu_task_stall_info_mult, int, 0444);
 170
 171static int rcu_task_enqueue_lim __read_mostly = -1;
 172module_param(rcu_task_enqueue_lim, int, 0444);
 173
 174static bool rcu_task_cb_adjust;
 175static int rcu_task_contend_lim __read_mostly = 100;
 176module_param(rcu_task_contend_lim, int, 0444);
 177static int rcu_task_collapse_lim __read_mostly = 10;
 178module_param(rcu_task_collapse_lim, int, 0444);
 179static int rcu_task_lazy_lim __read_mostly = 32;
 180module_param(rcu_task_lazy_lim, int, 0444);
 181
 182/* RCU tasks grace-period state for debugging. */
 183#define RTGS_INIT		 0
 184#define RTGS_WAIT_WAIT_CBS	 1
 185#define RTGS_WAIT_GP		 2
 186#define RTGS_PRE_WAIT_GP	 3
 187#define RTGS_SCAN_TASKLIST	 4
 188#define RTGS_POST_SCAN_TASKLIST	 5
 189#define RTGS_WAIT_SCAN_HOLDOUTS	 6
 190#define RTGS_SCAN_HOLDOUTS	 7
 191#define RTGS_POST_GP		 8
 192#define RTGS_WAIT_READERS	 9
 193#define RTGS_INVOKE_CBS		10
 194#define RTGS_WAIT_CBS		11
 195#ifndef CONFIG_TINY_RCU
 196static const char * const rcu_tasks_gp_state_names[] = {
 197	"RTGS_INIT",
 198	"RTGS_WAIT_WAIT_CBS",
 199	"RTGS_WAIT_GP",
 200	"RTGS_PRE_WAIT_GP",
 201	"RTGS_SCAN_TASKLIST",
 202	"RTGS_POST_SCAN_TASKLIST",
 203	"RTGS_WAIT_SCAN_HOLDOUTS",
 204	"RTGS_SCAN_HOLDOUTS",
 205	"RTGS_POST_GP",
 206	"RTGS_WAIT_READERS",
 207	"RTGS_INVOKE_CBS",
 208	"RTGS_WAIT_CBS",
 209};
 210#endif /* #ifndef CONFIG_TINY_RCU */
 211
 212////////////////////////////////////////////////////////////////////////
 213//
 214// Generic code.
 215
 216static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
 217
 218/* Record grace-period phase and time. */
 219static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
 220{
 221	rtp->gp_state = newstate;
 222	rtp->gp_jiffies = jiffies;
 223}
 224
 225#ifndef CONFIG_TINY_RCU
 226/* Return state name. */
 227static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
 228{
 229	int i = data_race(rtp->gp_state); // Let KCSAN detect update races
 230	int j = READ_ONCE(i); // Prevent the compiler from reading twice
 231
 232	if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
 233		return "???";
 234	return rcu_tasks_gp_state_names[j];
 235}
 236#endif /* #ifndef CONFIG_TINY_RCU */
 237
 238// Initialize per-CPU callback lists for the specified flavor of
 239// Tasks RCU.  Do not enqueue callbacks before this function is invoked.
 240static void cblist_init_generic(struct rcu_tasks *rtp)
 241{
 242	int cpu;
 
 243	int lim;
 244	int shift;
 245
 
 246	if (rcu_task_enqueue_lim < 0) {
 247		rcu_task_enqueue_lim = 1;
 248		rcu_task_cb_adjust = true;
 
 249	} else if (rcu_task_enqueue_lim == 0) {
 250		rcu_task_enqueue_lim = 1;
 251	}
 252	lim = rcu_task_enqueue_lim;
 253
 254	if (lim > nr_cpu_ids)
 255		lim = nr_cpu_ids;
 256	shift = ilog2(nr_cpu_ids / lim);
 257	if (((nr_cpu_ids - 1) >> shift) >= lim)
 258		shift++;
 259	WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
 260	WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
 261	smp_store_release(&rtp->percpu_enqueue_lim, lim);
 262	for_each_possible_cpu(cpu) {
 263		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 264
 265		WARN_ON_ONCE(!rtpcp);
 266		if (cpu)
 267			raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
 
 268		if (rcu_segcblist_empty(&rtpcp->cblist))
 269			rcu_segcblist_init(&rtpcp->cblist);
 270		INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
 271		rtpcp->cpu = cpu;
 272		rtpcp->rtpp = rtp;
 273		if (!rtpcp->rtp_blkd_tasks.next)
 274			INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
 275		if (!rtpcp->rtp_exit_list.next)
 276			INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
 277	}
 278
 279	pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
 280			data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
 281}
 282
 283// Compute wakeup time for lazy callback timer.
 284static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
 285{
 286	return jiffies + rtp->lazy_jiffies;
 287}
 288
 289// Timer handler that unlazifies lazy callbacks.
 290static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
 291{
 292	unsigned long flags;
 293	bool needwake = false;
 294	struct rcu_tasks *rtp;
 295	struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
 296
 297	rtp = rtpcp->rtpp;
 298	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 299	if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
 300		if (!rtpcp->urgent_gp)
 301			rtpcp->urgent_gp = 1;
 302		needwake = true;
 303		mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
 304	}
 305	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 306	if (needwake)
 307		rcuwait_wake_up(&rtp->cbs_wait);
 308}
 309
 310// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
 311static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
 312{
 313	struct rcu_tasks *rtp;
 314	struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
 315
 316	rtp = rtpcp->rtpp;
 317	rcuwait_wake_up(&rtp->cbs_wait);
 318}
 319
 320// Enqueue a callback for the specified flavor of Tasks RCU.
 321static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
 322				   struct rcu_tasks *rtp)
 323{
 324	int chosen_cpu;
 325	unsigned long flags;
 326	bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
 327	int ideal_cpu;
 328	unsigned long j;
 329	bool needadjust = false;
 330	bool needwake;
 331	struct rcu_tasks_percpu *rtpcp;
 332
 333	rhp->next = NULL;
 334	rhp->func = func;
 335	local_irq_save(flags);
 336	rcu_read_lock();
 337	ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
 338	chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
 339	rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
 340	if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
 341		raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
 342		j = jiffies;
 343		if (rtpcp->rtp_jiffies != j) {
 344			rtpcp->rtp_jiffies = j;
 345			rtpcp->rtp_n_lock_retries = 0;
 346		}
 347		if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
 348		    READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
 349			needadjust = true;  // Defer adjustment to avoid deadlock.
 350	}
 351	// Queuing callbacks before initialization not yet supported.
 352	if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
 353		rcu_segcblist_init(&rtpcp->cblist);
 354	needwake = (func == wakeme_after_rcu) ||
 355		   (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
 356	if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
 357		if (rtp->lazy_jiffies)
 358			mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
 359		else
 360			needwake = rcu_segcblist_empty(&rtpcp->cblist);
 361	}
 362	if (needwake)
 363		rtpcp->urgent_gp = 3;
 364	rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
 365	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 366	if (unlikely(needadjust)) {
 367		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 368		if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
 369			WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
 370			WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
 371			smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
 372			pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
 373		}
 374		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 375	}
 376	rcu_read_unlock();
 377	/* We can't create the thread unless interrupts are enabled. */
 378	if (needwake && READ_ONCE(rtp->kthread_ptr))
 379		irq_work_queue(&rtpcp->rtp_irq_work);
 380}
 381
 382// RCU callback function for rcu_barrier_tasks_generic().
 383static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
 384{
 385	struct rcu_tasks *rtp;
 386	struct rcu_tasks_percpu *rtpcp;
 387
 388	rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
 389	rtp = rtpcp->rtpp;
 390	if (atomic_dec_and_test(&rtp->barrier_q_count))
 391		complete(&rtp->barrier_q_completion);
 392}
 393
 394// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
 395// Operates in a manner similar to rcu_barrier().
 396static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
 397{
 398	int cpu;
 399	unsigned long flags;
 400	struct rcu_tasks_percpu *rtpcp;
 401	unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
 402
 403	mutex_lock(&rtp->barrier_q_mutex);
 404	if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
 405		smp_mb();
 406		mutex_unlock(&rtp->barrier_q_mutex);
 407		return;
 408	}
 409	rcu_seq_start(&rtp->barrier_q_seq);
 410	init_completion(&rtp->barrier_q_completion);
 411	atomic_set(&rtp->barrier_q_count, 2);
 412	for_each_possible_cpu(cpu) {
 413		if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
 414			break;
 415		rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 416		rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
 417		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 418		if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
 419			atomic_inc(&rtp->barrier_q_count);
 420		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 421	}
 422	if (atomic_sub_and_test(2, &rtp->barrier_q_count))
 423		complete(&rtp->barrier_q_completion);
 424	wait_for_completion(&rtp->barrier_q_completion);
 425	rcu_seq_end(&rtp->barrier_q_seq);
 426	mutex_unlock(&rtp->barrier_q_mutex);
 427}
 428
 429// Advance callbacks and indicate whether either a grace period or
 430// callback invocation is needed.
 431static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
 432{
 433	int cpu;
 434	int dequeue_limit;
 435	unsigned long flags;
 436	bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
 437	long n;
 438	long ncbs = 0;
 439	long ncbsnz = 0;
 440	int needgpcb = 0;
 441
 442	dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
 443	for (cpu = 0; cpu < dequeue_limit; cpu++) {
 444		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 445
 446		/* Advance and accelerate any new callbacks. */
 447		if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
 448			continue;
 449		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 450		// Should we shrink down to a single callback queue?
 451		n = rcu_segcblist_n_cbs(&rtpcp->cblist);
 452		if (n) {
 453			ncbs += n;
 454			if (cpu > 0)
 455				ncbsnz += n;
 456		}
 457		rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
 458		(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
 459		if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
 460			if (rtp->lazy_jiffies)
 461				rtpcp->urgent_gp--;
 462			needgpcb |= 0x3;
 463		} else if (rcu_segcblist_empty(&rtpcp->cblist)) {
 464			rtpcp->urgent_gp = 0;
 465		}
 466		if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
 467			needgpcb |= 0x1;
 468		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 469	}
 470
 471	// Shrink down to a single callback queue if appropriate.
 472	// This is done in two stages: (1) If there are no more than
 473	// rcu_task_collapse_lim callbacks on CPU 0 and none on any other
 474	// CPU, limit enqueueing to CPU 0.  (2) After an RCU grace period,
 475	// if there has not been an increase in callbacks, limit dequeuing
 476	// to CPU 0.  Note the matching RCU read-side critical section in
 477	// call_rcu_tasks_generic().
 478	if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
 479		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 480		if (rtp->percpu_enqueue_lim > 1) {
 481			WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
 482			smp_store_release(&rtp->percpu_enqueue_lim, 1);
 483			rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
 484			gpdone = false;
 485			pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
 486		}
 487		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 488	}
 489	if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
 
 490		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
 491		if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
 492			WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
 493			pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
 494		}
 495		if (rtp->percpu_dequeue_lim == 1) {
 496			for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
 497				struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 498
 499				WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
 500			}
 501		}
 502		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
 503	}
 504
 505	return needgpcb;
 506}
 507
 508// Advance callbacks and invoke any that are ready.
 509static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
 510{
 511	int cpu;
 512	int cpunext;
 513	int cpuwq;
 514	unsigned long flags;
 515	int len;
 516	struct rcu_head *rhp;
 517	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
 518	struct rcu_tasks_percpu *rtpcp_next;
 519
 520	cpu = rtpcp->cpu;
 521	cpunext = cpu * 2 + 1;
 522	if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
 523		rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
 524		cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
 525		queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
 526		cpunext++;
 527		if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
 528			rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
 529			cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
 530			queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
 531		}
 532	}
 533
 534	if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
 535		return;
 536	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 537	rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
 538	rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
 539	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 540	len = rcl.len;
 541	for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
 542		debug_rcu_head_callback(rhp);
 543		local_bh_disable();
 544		rhp->func(rhp);
 545		local_bh_enable();
 546		cond_resched();
 547	}
 548	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
 549	rcu_segcblist_add_len(&rtpcp->cblist, -len);
 550	(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
 551	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
 552}
 553
 554// Workqueue flood to advance callbacks and invoke any that are ready.
 555static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
 556{
 557	struct rcu_tasks *rtp;
 558	struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
 559
 560	rtp = rtpcp->rtpp;
 561	rcu_tasks_invoke_cbs(rtp, rtpcp);
 562}
 563
 564// Wait for one grace period.
 565static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
 566{
 567	int needgpcb;
 568
 569	mutex_lock(&rtp->tasks_gp_mutex);
 570
 571	// If there were none, wait a bit and start over.
 572	if (unlikely(midboot)) {
 573		needgpcb = 0x2;
 574	} else {
 575		mutex_unlock(&rtp->tasks_gp_mutex);
 576		set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
 577		rcuwait_wait_event(&rtp->cbs_wait,
 578				   (needgpcb = rcu_tasks_need_gpcb(rtp)),
 579				   TASK_IDLE);
 580		mutex_lock(&rtp->tasks_gp_mutex);
 581	}
 582
 583	if (needgpcb & 0x2) {
 584		// Wait for one grace period.
 585		set_tasks_gp_state(rtp, RTGS_WAIT_GP);
 586		rtp->gp_start = jiffies;
 587		rcu_seq_start(&rtp->tasks_gp_seq);
 588		rtp->gp_func(rtp);
 589		rcu_seq_end(&rtp->tasks_gp_seq);
 590	}
 591
 592	// Invoke callbacks.
 593	set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
 594	rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
 595	mutex_unlock(&rtp->tasks_gp_mutex);
 596}
 597
 598// RCU-tasks kthread that detects grace periods and invokes callbacks.
 599static int __noreturn rcu_tasks_kthread(void *arg)
 600{
 601	int cpu;
 602	struct rcu_tasks *rtp = arg;
 603
 604	for_each_possible_cpu(cpu) {
 605		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 606
 607		timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
 608		rtpcp->urgent_gp = 1;
 609	}
 610
 611	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
 612	housekeeping_affine(current, HK_TYPE_RCU);
 613	smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
 614
 615	/*
 616	 * Each pass through the following loop makes one check for
 617	 * newly arrived callbacks, and, if there are some, waits for
 618	 * one RCU-tasks grace period and then invokes the callbacks.
 619	 * This loop is terminated by the system going down.  ;-)
 620	 */
 621	for (;;) {
 622		// Wait for one grace period and invoke any callbacks
 623		// that are ready.
 624		rcu_tasks_one_gp(rtp, false);
 625
 626		// Paranoid sleep to keep this from entering a tight loop.
 627		schedule_timeout_idle(rtp->gp_sleep);
 628	}
 629}
 630
 631// Wait for a grace period for the specified flavor of Tasks RCU.
 632static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
 633{
 634	/* Complain if the scheduler has not started.  */
 635	if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
 636			 "synchronize_%s() called too soon", rtp->name))
 637		return;
 638
 639	// If the grace-period kthread is running, use it.
 640	if (READ_ONCE(rtp->kthread_ptr)) {
 641		wait_rcu_gp(rtp->call_func);
 642		return;
 643	}
 644	rcu_tasks_one_gp(rtp, true);
 645}
 646
 647/* Spawn RCU-tasks grace-period kthread. */
 648static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
 649{
 650	struct task_struct *t;
 651
 652	t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
 653	if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
 654		return;
 655	smp_mb(); /* Ensure others see full kthread. */
 656}
 657
 658#ifndef CONFIG_TINY_RCU
 659
 660/*
 661 * Print any non-default Tasks RCU settings.
 662 */
 663static void __init rcu_tasks_bootup_oddness(void)
 664{
 665#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
 666	int rtsimc;
 667
 668	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
 669		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
 670	rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
 671	if (rtsimc != rcu_task_stall_info_mult) {
 672		pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
 673		rcu_task_stall_info_mult = rtsimc;
 674	}
 675#endif /* #ifdef CONFIG_TASKS_RCU */
 676#ifdef CONFIG_TASKS_RCU
 677	pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
 678#endif /* #ifdef CONFIG_TASKS_RCU */
 679#ifdef CONFIG_TASKS_RUDE_RCU
 680	pr_info("\tRude variant of Tasks RCU enabled.\n");
 681#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
 682#ifdef CONFIG_TASKS_TRACE_RCU
 683	pr_info("\tTracing variant of Tasks RCU enabled.\n");
 684#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 685}
 686
 687#endif /* #ifndef CONFIG_TINY_RCU */
 688
 689#ifndef CONFIG_TINY_RCU
 690/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
 691static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
 692{
 693	int cpu;
 694	bool havecbs = false;
 695	bool haveurgent = false;
 696	bool haveurgentcbs = false;
 697
 698	for_each_possible_cpu(cpu) {
 699		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 700
 701		if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
 702			havecbs = true;
 703		if (data_race(rtpcp->urgent_gp))
 704			haveurgent = true;
 705		if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
 706			haveurgentcbs = true;
 707		if (havecbs && haveurgent && haveurgentcbs)
 708			break;
 
 709	}
 710	pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
 711		rtp->kname,
 712		tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
 713		jiffies - data_race(rtp->gp_jiffies),
 714		data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
 715		data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
 716		".k"[!!data_race(rtp->kthread_ptr)],
 717		".C"[havecbs],
 718		".u"[haveurgent],
 719		".U"[haveurgentcbs],
 720		rtp->lazy_jiffies,
 721		s);
 722}
 723#endif // #ifndef CONFIG_TINY_RCU
 724
 725static void exit_tasks_rcu_finish_trace(struct task_struct *t);
 726
 727#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
 728
 729////////////////////////////////////////////////////////////////////////
 730//
 731// Shared code between task-list-scanning variants of Tasks RCU.
 732
 733/* Wait for one RCU-tasks grace period. */
 734static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
 735{
 736	struct task_struct *g;
 737	int fract;
 738	LIST_HEAD(holdouts);
 739	unsigned long j;
 740	unsigned long lastinfo;
 741	unsigned long lastreport;
 742	bool reported = false;
 743	int rtsi;
 744	struct task_struct *t;
 745
 746	set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
 747	rtp->pregp_func(&holdouts);
 748
 749	/*
 750	 * There were callbacks, so we need to wait for an RCU-tasks
 751	 * grace period.  Start off by scanning the task list for tasks
 752	 * that are not already voluntarily blocked.  Mark these tasks
 753	 * and make a list of them in holdouts.
 754	 */
 755	set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
 756	if (rtp->pertask_func) {
 757		rcu_read_lock();
 758		for_each_process_thread(g, t)
 759			rtp->pertask_func(t, &holdouts);
 760		rcu_read_unlock();
 761	}
 762
 763	set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
 764	rtp->postscan_func(&holdouts);
 765
 766	/*
 767	 * Each pass through the following loop scans the list of holdout
 768	 * tasks, removing any that are no longer holdouts.  When the list
 769	 * is empty, we are done.
 770	 */
 771	lastreport = jiffies;
 772	lastinfo = lastreport;
 773	rtsi = READ_ONCE(rcu_task_stall_info);
 774
 775	// Start off with initial wait and slowly back off to 1 HZ wait.
 776	fract = rtp->init_fract;
 777
 778	while (!list_empty(&holdouts)) {
 779		ktime_t exp;
 780		bool firstreport;
 781		bool needreport;
 782		int rtst;
 783
 784		// Slowly back off waiting for holdouts
 785		set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
 786		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
 787			schedule_timeout_idle(fract);
 788		} else {
 789			exp = jiffies_to_nsecs(fract);
 790			__set_current_state(TASK_IDLE);
 791			schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
 792		}
 793
 794		if (fract < HZ)
 795			fract++;
 796
 797		rtst = READ_ONCE(rcu_task_stall_timeout);
 798		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
 799		if (needreport) {
 800			lastreport = jiffies;
 801			reported = true;
 802		}
 803		firstreport = true;
 804		WARN_ON(signal_pending(current));
 805		set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
 806		rtp->holdouts_func(&holdouts, needreport, &firstreport);
 807
 808		// Print pre-stall informational messages if needed.
 809		j = jiffies;
 810		if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
 811			lastinfo = j;
 812			rtsi = rtsi * rcu_task_stall_info_mult;
 813			pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
 814				__func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
 815		}
 816	}
 817
 818	set_tasks_gp_state(rtp, RTGS_POST_GP);
 819	rtp->postgp_func(rtp);
 820}
 821
 822#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
 823
 824#ifdef CONFIG_TASKS_RCU
 825
 826////////////////////////////////////////////////////////////////////////
 827//
 828// Simple variant of RCU whose quiescent states are voluntary context
 829// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
 830// As such, grace periods can take one good long time.  There are no
 831// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
 832// because this implementation is intended to get the system into a safe
 833// state for some of the manipulations involved in tracing and the like.
 834// Finally, this implementation does not support high call_rcu_tasks()
 835// rates from multiple CPUs.  If this is required, per-CPU callback lists
 836// will be needed.
 837//
 838// The implementation uses rcu_tasks_wait_gp(), which relies on function
 839// pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
 840// function sets these function pointers up so that rcu_tasks_wait_gp()
 841// invokes these functions in this order:
 842//
 843// rcu_tasks_pregp_step():
 844//	Invokes synchronize_rcu() in order to wait for all in-flight
 845//	t->on_rq and t->nvcsw transitions to complete.	This works because
 846//	all such transitions are carried out with interrupts disabled.
 847// rcu_tasks_pertask(), invoked on every non-idle task:
 848//	For every runnable non-idle task other than the current one, use
 849//	get_task_struct() to pin down that task, snapshot that task's
 850//	number of voluntary context switches, and add that task to the
 851//	holdout list.
 852// rcu_tasks_postscan():
 853//	Gather per-CPU lists of tasks in do_exit() to ensure that all
 854//	tasks that were in the process of exiting (and which thus might
 855//	not know to synchronize with this RCU Tasks grace period) have
 856//	completed exiting.  The synchronize_rcu() in rcu_tasks_postgp()
 857//	will take care of any tasks stuck in the non-preemptible region
 858//	of do_exit() following its call to exit_tasks_rcu_stop().
 859// check_all_holdout_tasks(), repeatedly until holdout list is empty:
 860//	Scans the holdout list, attempting to identify a quiescent state
 861//	for each task on the list.  If there is a quiescent state, the
 862//	corresponding task is removed from the holdout list.
 863// rcu_tasks_postgp():
 864//	Invokes synchronize_rcu() in order to ensure that all prior
 865//	t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
 866//	to have happened before the end of this RCU Tasks grace period.
 867//	Again, this works because all such transitions are carried out
 868//	with interrupts disabled.
 869//
 870// For each exiting task, the exit_tasks_rcu_start() and
 871// exit_tasks_rcu_finish() functions add and remove, respectively, the
 872// current task to a per-CPU list of tasks that rcu_tasks_postscan() must
 873// wait on.  This is necessary because rcu_tasks_postscan() must wait on
 874// tasks that have already been removed from the global list of tasks.
 875//
 876// Pre-grace-period update-side code is ordered before the grace
 877// via the raw_spin_lock.*rcu_node().  Pre-grace-period read-side code
 878// is ordered before the grace period via synchronize_rcu() call in
 879// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
 880// disabling.
 881
 882/* Pre-grace-period preparation. */
 883static void rcu_tasks_pregp_step(struct list_head *hop)
 884{
 885	/*
 886	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
 887	 * to complete.  Invoking synchronize_rcu() suffices because all
 888	 * these transitions occur with interrupts disabled.  Without this
 889	 * synchronize_rcu(), a read-side critical section that started
 890	 * before the grace period might be incorrectly seen as having
 891	 * started after the grace period.
 892	 *
 893	 * This synchronize_rcu() also dispenses with the need for a
 894	 * memory barrier on the first store to t->rcu_tasks_holdout,
 895	 * as it forces the store to happen after the beginning of the
 896	 * grace period.
 897	 */
 898	synchronize_rcu();
 899}
 900
 901/* Check for quiescent states since the pregp's synchronize_rcu() */
 902static bool rcu_tasks_is_holdout(struct task_struct *t)
 903{
 904	int cpu;
 905
 906	/* Has the task been seen voluntarily sleeping? */
 907	if (!READ_ONCE(t->on_rq))
 908		return false;
 909
 910	/*
 911	 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
 912	 * quiescent states. But CPU boot code performed by the idle task
 913	 * isn't a quiescent state.
 914	 */
 915	if (is_idle_task(t))
 916		return false;
 917
 918	cpu = task_cpu(t);
 919
 920	/* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
 921	if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
 922		return false;
 923
 924	return true;
 925}
 926
 927/* Per-task initial processing. */
 928static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
 929{
 930	if (t != current && rcu_tasks_is_holdout(t)) {
 931		get_task_struct(t);
 932		t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
 933		WRITE_ONCE(t->rcu_tasks_holdout, true);
 934		list_add(&t->rcu_tasks_holdout_list, hop);
 935	}
 936}
 937
 938void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
 939DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
 940
 941/* Processing between scanning taskslist and draining the holdout list. */
 942static void rcu_tasks_postscan(struct list_head *hop)
 943{
 944	int cpu;
 945	int rtsi = READ_ONCE(rcu_task_stall_info);
 946
 947	if (!IS_ENABLED(CONFIG_TINY_RCU)) {
 948		tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
 949		add_timer(&tasks_rcu_exit_srcu_stall_timer);
 950	}
 951
 952	/*
 953	 * Exiting tasks may escape the tasklist scan. Those are vulnerable
 954	 * until their final schedule() with TASK_DEAD state. To cope with
 955	 * this, divide the fragile exit path part in two intersecting
 956	 * read side critical sections:
 957	 *
 958	 * 1) A task_struct list addition before calling exit_notify(),
 959	 *    which may remove the task from the tasklist, with the
 960	 *    removal after the final preempt_disable() call in do_exit().
 961	 *
 962	 * 2) An _RCU_ read side starting with the final preempt_disable()
 963	 *    call in do_exit() and ending with the final call to schedule()
 964	 *    with TASK_DEAD state.
 965	 *
 966	 * This handles the part 1). And postgp will handle part 2) with a
 967	 * call to synchronize_rcu().
 968	 */
 969
 970	for_each_possible_cpu(cpu) {
 971		unsigned long j = jiffies + 1;
 972		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu);
 973		struct task_struct *t;
 974		struct task_struct *t1;
 975		struct list_head tmp;
 976
 977		raw_spin_lock_irq_rcu_node(rtpcp);
 978		list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) {
 979			if (list_empty(&t->rcu_tasks_holdout_list))
 980				rcu_tasks_pertask(t, hop);
 981
 982			// RT kernels need frequent pauses, otherwise
 983			// pause at least once per pair of jiffies.
 984			if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j))
 985				continue;
 986
 987			// Keep our place in the list while pausing.
 988			// Nothing else traverses this list, so adding a
 989			// bare list_head is OK.
 990			list_add(&tmp, &t->rcu_tasks_exit_list);
 991			raw_spin_unlock_irq_rcu_node(rtpcp);
 992			cond_resched(); // For CONFIG_PREEMPT=n kernels
 993			raw_spin_lock_irq_rcu_node(rtpcp);
 994			t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list);
 995			list_del(&tmp);
 996			j = jiffies + 1;
 997		}
 998		raw_spin_unlock_irq_rcu_node(rtpcp);
 999	}
1000
1001	if (!IS_ENABLED(CONFIG_TINY_RCU))
1002		del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
1003}
1004
1005/* See if tasks are still holding out, complain if so. */
1006static void check_holdout_task(struct task_struct *t,
1007			       bool needreport, bool *firstreport)
1008{
1009	int cpu;
1010
1011	if (!READ_ONCE(t->rcu_tasks_holdout) ||
1012	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
1013	    !rcu_tasks_is_holdout(t) ||
1014	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
1015	     !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) {
1016		WRITE_ONCE(t->rcu_tasks_holdout, false);
1017		list_del_init(&t->rcu_tasks_holdout_list);
1018		put_task_struct(t);
1019		return;
1020	}
1021	rcu_request_urgent_qs_task(t);
1022	if (!needreport)
1023		return;
1024	if (*firstreport) {
1025		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
1026		*firstreport = false;
1027	}
1028	cpu = task_cpu(t);
1029	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
1030		 t, ".I"[is_idle_task(t)],
1031		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
1032		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
1033		 data_race(t->rcu_tasks_idle_cpu), cpu);
1034	sched_show_task(t);
1035}
1036
1037/* Scan the holdout lists for tasks no longer holding out. */
1038static void check_all_holdout_tasks(struct list_head *hop,
1039				    bool needreport, bool *firstreport)
1040{
1041	struct task_struct *t, *t1;
1042
1043	list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1044		check_holdout_task(t, needreport, firstreport);
1045		cond_resched();
1046	}
1047}
1048
1049/* Finish off the Tasks-RCU grace period. */
1050static void rcu_tasks_postgp(struct rcu_tasks *rtp)
1051{
1052	/*
1053	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1054	 * memory barriers prior to them in the schedule() path, memory
1055	 * reordering on other CPUs could cause their RCU-tasks read-side
1056	 * critical sections to extend past the end of the grace period.
1057	 * However, because these ->nvcsw updates are carried out with
1058	 * interrupts disabled, we can use synchronize_rcu() to force the
1059	 * needed ordering on all such CPUs.
1060	 *
1061	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1062	 * accesses to be within the grace period, avoiding the need for
1063	 * memory barriers for ->rcu_tasks_holdout accesses.
1064	 *
1065	 * In addition, this synchronize_rcu() waits for exiting tasks
1066	 * to complete their final preempt_disable() region of execution,
1067	 * enforcing the whole region before tasklist removal until
1068	 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1069	 * read side critical section.
1070	 */
1071	synchronize_rcu();
1072}
1073
1074static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1075{
1076#ifndef CONFIG_TINY_RCU
1077	int rtsi;
1078
1079	rtsi = READ_ONCE(rcu_task_stall_info);
1080	pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1081		__func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1082		tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1083	pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1084	tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1085	add_timer(&tasks_rcu_exit_srcu_stall_timer);
1086#endif // #ifndef CONFIG_TINY_RCU
1087}
1088
1089/**
1090 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1091 * @rhp: structure to be used for queueing the RCU updates.
1092 * @func: actual callback function to be invoked after the grace period
1093 *
1094 * The callback function will be invoked some time after a full grace
1095 * period elapses, in other words after all currently executing RCU
1096 * read-side critical sections have completed. call_rcu_tasks() assumes
1097 * that the read-side critical sections end at a voluntary context
1098 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
1099 * or transition to usermode execution.  As such, there are no read-side
1100 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1101 * this primitive is intended to determine that all tasks have passed
1102 * through a safe state, not so much for data-structure synchronization.
1103 *
1104 * See the description of call_rcu() for more detailed information on
1105 * memory ordering guarantees.
1106 */
1107void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
1108{
1109	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
1110}
1111EXPORT_SYMBOL_GPL(call_rcu_tasks);
1112
1113/**
1114 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1115 *
1116 * Control will return to the caller some time after a full rcu-tasks
1117 * grace period has elapsed, in other words after all currently
1118 * executing rcu-tasks read-side critical sections have elapsed.  These
1119 * read-side critical sections are delimited by calls to schedule(),
1120 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1121 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1122 *
1123 * This is a very specialized primitive, intended only for a few uses in
1124 * tracing and other situations requiring manipulation of function
1125 * preambles and profiling hooks.  The synchronize_rcu_tasks() function
1126 * is not (yet) intended for heavy use from multiple CPUs.
1127 *
1128 * See the description of synchronize_rcu() for more detailed information
1129 * on memory ordering guarantees.
1130 */
1131void synchronize_rcu_tasks(void)
1132{
1133	synchronize_rcu_tasks_generic(&rcu_tasks);
1134}
1135EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
1136
1137/**
1138 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1139 *
1140 * Although the current implementation is guaranteed to wait, it is not
1141 * obligated to, for example, if there are no pending callbacks.
1142 */
1143void rcu_barrier_tasks(void)
1144{
1145	rcu_barrier_tasks_generic(&rcu_tasks);
1146}
1147EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
1148
1149static int rcu_tasks_lazy_ms = -1;
1150module_param(rcu_tasks_lazy_ms, int, 0444);
1151
1152static int __init rcu_spawn_tasks_kthread(void)
1153{
 
1154	rcu_tasks.gp_sleep = HZ / 10;
1155	rcu_tasks.init_fract = HZ / 10;
1156	if (rcu_tasks_lazy_ms >= 0)
1157		rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
1158	rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1159	rcu_tasks.pertask_func = rcu_tasks_pertask;
1160	rcu_tasks.postscan_func = rcu_tasks_postscan;
1161	rcu_tasks.holdouts_func = check_all_holdout_tasks;
1162	rcu_tasks.postgp_func = rcu_tasks_postgp;
1163	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1164	return 0;
1165}
1166
1167#if !defined(CONFIG_TINY_RCU)
1168void show_rcu_tasks_classic_gp_kthread(void)
1169{
1170	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1171}
1172EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
1173#endif // !defined(CONFIG_TINY_RCU)
1174
1175struct task_struct *get_rcu_tasks_gp_kthread(void)
 
1176{
1177	return rcu_tasks.kthread_ptr;
 
 
1178}
1179EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1180
1181/*
1182 * Protect against tasklist scan blind spot while the task is exiting and
1183 * may be removed from the tasklist.  Do this by adding the task to yet
1184 * another list.
1185 *
1186 * Note that the task will remove itself from this list, so there is no
1187 * need for get_task_struct(), except in the case where rcu_tasks_pertask()
1188 * adds it to the holdout list, in which case rcu_tasks_pertask() supplies
1189 * the needed get_task_struct().
1190 */
1191void exit_tasks_rcu_start(void)
1192{
1193	unsigned long flags;
1194	struct rcu_tasks_percpu *rtpcp;
1195	struct task_struct *t = current;
1196
1197	WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list));
1198	preempt_disable();
1199	rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu);
1200	t->rcu_tasks_exit_cpu = smp_processor_id();
1201	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1202	if (!rtpcp->rtp_exit_list.next)
1203		INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
1204	list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list);
1205	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1206	preempt_enable();
1207}
1208
1209/*
1210 * Remove the task from the "yet another list" because do_exit() is now
1211 * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
1212 */
1213void exit_tasks_rcu_stop(void)
1214{
1215	unsigned long flags;
1216	struct rcu_tasks_percpu *rtpcp;
1217	struct task_struct *t = current;
1218
1219	WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list));
1220	rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu);
1221	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1222	list_del_init(&t->rcu_tasks_exit_list);
1223	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1224}
1225
1226/*
1227 * Contribute to protect against tasklist scan blind spot while the
1228 * task is exiting and may be removed from the tasklist. See
1229 * corresponding synchronize_srcu() for further details.
1230 */
1231void exit_tasks_rcu_finish(void)
1232{
1233	exit_tasks_rcu_stop();
1234	exit_tasks_rcu_finish_trace(current);
1235}
1236
1237#else /* #ifdef CONFIG_TASKS_RCU */
1238void exit_tasks_rcu_start(void) { }
1239void exit_tasks_rcu_stop(void) { }
1240void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1241#endif /* #else #ifdef CONFIG_TASKS_RCU */
1242
1243#ifdef CONFIG_TASKS_RUDE_RCU
1244
1245////////////////////////////////////////////////////////////////////////
1246//
1247// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1248// passing an empty function to schedule_on_each_cpu().  This approach
1249// provides an asynchronous call_rcu_tasks_rude() API and batching of
1250// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
1251// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1252// and induces otherwise unnecessary context switches on all online CPUs,
1253// whether idle or not.
1254//
1255// Callback handling is provided by the rcu_tasks_kthread() function.
1256//
1257// Ordering is provided by the scheduler's context-switch code.
1258
1259// Empty function to allow workqueues to force a context switch.
1260static void rcu_tasks_be_rude(struct work_struct *work)
1261{
1262}
1263
1264// Wait for one rude RCU-tasks grace period.
1265static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1266{
 
 
 
1267	rtp->n_ipis += cpumask_weight(cpu_online_mask);
1268	schedule_on_each_cpu(rcu_tasks_be_rude);
1269}
1270
1271void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1272DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1273		 "RCU Tasks Rude");
1274
1275/**
1276 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1277 * @rhp: structure to be used for queueing the RCU updates.
1278 * @func: actual callback function to be invoked after the grace period
1279 *
1280 * The callback function will be invoked some time after a full grace
1281 * period elapses, in other words after all currently executing RCU
1282 * read-side critical sections have completed. call_rcu_tasks_rude()
1283 * assumes that the read-side critical sections end at context switch,
1284 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1285 * usermode execution is schedulable). As such, there are no read-side
1286 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1287 * this primitive is intended to determine that all tasks have passed
1288 * through a safe state, not so much for data-structure synchronization.
1289 *
1290 * See the description of call_rcu() for more detailed information on
1291 * memory ordering guarantees.
1292 */
1293void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1294{
1295	call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1296}
1297EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1298
1299/**
1300 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1301 *
1302 * Control will return to the caller some time after a rude rcu-tasks
1303 * grace period has elapsed, in other words after all currently
1304 * executing rcu-tasks read-side critical sections have elapsed.  These
1305 * read-side critical sections are delimited by calls to schedule(),
1306 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1307 * context), and (in theory, anyway) cond_resched().
1308 *
1309 * This is a very specialized primitive, intended only for a few uses in
1310 * tracing and other situations requiring manipulation of function preambles
1311 * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
1312 * (yet) intended for heavy use from multiple CPUs.
1313 *
1314 * See the description of synchronize_rcu() for more detailed information
1315 * on memory ordering guarantees.
1316 */
1317void synchronize_rcu_tasks_rude(void)
1318{
1319	synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1320}
1321EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1322
1323/**
1324 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1325 *
1326 * Although the current implementation is guaranteed to wait, it is not
1327 * obligated to, for example, if there are no pending callbacks.
1328 */
1329void rcu_barrier_tasks_rude(void)
1330{
1331	rcu_barrier_tasks_generic(&rcu_tasks_rude);
1332}
1333EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1334
1335int rcu_tasks_rude_lazy_ms = -1;
1336module_param(rcu_tasks_rude_lazy_ms, int, 0444);
1337
1338static int __init rcu_spawn_tasks_rude_kthread(void)
1339{
 
1340	rcu_tasks_rude.gp_sleep = HZ / 10;
1341	if (rcu_tasks_rude_lazy_ms >= 0)
1342		rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
1343	rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1344	return 0;
1345}
1346
1347#if !defined(CONFIG_TINY_RCU)
1348void show_rcu_tasks_rude_gp_kthread(void)
1349{
1350	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1351}
1352EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1353#endif // !defined(CONFIG_TINY_RCU)
1354
1355struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1356{
1357	return rcu_tasks_rude.kthread_ptr;
1358}
1359EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1360
1361#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1362
1363////////////////////////////////////////////////////////////////////////
1364//
1365// Tracing variant of Tasks RCU.  This variant is designed to be used
1366// to protect tracing hooks, including those of BPF.  This variant
1367// therefore:
1368//
1369// 1.	Has explicit read-side markers to allow finite grace periods
1370//	in the face of in-kernel loops for PREEMPT=n builds.
1371//
1372// 2.	Protects code in the idle loop, exception entry/exit, and
1373//	CPU-hotplug code paths, similar to the capabilities of SRCU.
1374//
1375// 3.	Avoids expensive read-side instructions, having overhead similar
1376//	to that of Preemptible RCU.
1377//
1378// There are of course downsides.  For example, the grace-period code
1379// can send IPIs to CPUs, even when those CPUs are in the idle loop or
1380// in nohz_full userspace.  If needed, these downsides can be at least
1381// partially remedied.
1382//
1383// Perhaps most important, this variant of RCU does not affect the vanilla
1384// flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
1385// readers can operate from idle, offline, and exception entry/exit in no
1386// way allows rcu_preempt and rcu_sched readers to also do so.
1387//
1388// The implementation uses rcu_tasks_wait_gp(), which relies on function
1389// pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
1390// function sets these function pointers up so that rcu_tasks_wait_gp()
1391// invokes these functions in this order:
1392//
1393// rcu_tasks_trace_pregp_step():
1394//	Disables CPU hotplug, adds all currently executing tasks to the
1395//	holdout list, then checks the state of all tasks that blocked
1396//	or were preempted within their current RCU Tasks Trace read-side
1397//	critical section, adding them to the holdout list if appropriate.
1398//	Finally, this function re-enables CPU hotplug.
1399// The ->pertask_func() pointer is NULL, so there is no per-task processing.
1400// rcu_tasks_trace_postscan():
1401//	Invokes synchronize_rcu() to wait for late-stage exiting tasks
1402//	to finish exiting.
1403// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1404//	Scans the holdout list, attempting to identify a quiescent state
1405//	for each task on the list.  If there is a quiescent state, the
1406//	corresponding task is removed from the holdout list.  Once this
1407//	list is empty, the grace period has completed.
1408// rcu_tasks_trace_postgp():
1409//	Provides the needed full memory barrier and does debug checks.
1410//
1411// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1412//
1413// Pre-grace-period update-side code is ordered before the grace period
1414// via the ->cbs_lock and barriers in rcu_tasks_kthread().  Pre-grace-period
1415// read-side code is ordered before the grace period by atomic operations
1416// on .b.need_qs flag of each task involved in this process, or by scheduler
1417// context-switch ordering (for locked-down non-running readers).
1418
1419// The lockdep state must be outside of #ifdef to be useful.
1420#ifdef CONFIG_DEBUG_LOCK_ALLOC
1421static struct lock_class_key rcu_lock_trace_key;
1422struct lockdep_map rcu_trace_lock_map =
1423	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1424EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1425#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1426
1427#ifdef CONFIG_TASKS_TRACE_RCU
1428
1429// Record outstanding IPIs to each CPU.  No point in sending two...
1430static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1431
1432// The number of detections of task quiescent state relying on
1433// heavyweight readers executing explicit memory barriers.
1434static unsigned long n_heavy_reader_attempts;
1435static unsigned long n_heavy_reader_updates;
1436static unsigned long n_heavy_reader_ofl_updates;
1437static unsigned long n_trc_holdouts;
1438
1439void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1440DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1441		 "RCU Tasks Trace");
1442
1443/* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1444static u8 rcu_ld_need_qs(struct task_struct *t)
1445{
1446	smp_mb(); // Enforce full grace-period ordering.
1447	return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1448}
1449
1450/* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1451static void rcu_st_need_qs(struct task_struct *t, u8 v)
1452{
1453	smp_store_release(&t->trc_reader_special.b.need_qs, v);
1454	smp_mb(); // Enforce full grace-period ordering.
1455}
1456
1457/*
1458 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1459 * the four-byte operand-size restriction of some platforms.
1460 * Returns the old value, which is often ignored.
1461 */
1462u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1463{
1464	union rcu_special ret;
1465	union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1466	union rcu_special trs_new = trs_old;
1467
1468	if (trs_old.b.need_qs != old)
1469		return trs_old.b.need_qs;
1470	trs_new.b.need_qs = new;
1471	ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1472	return ret.b.need_qs;
1473}
1474EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1475
1476/*
1477 * If we are the last reader, signal the grace-period kthread.
1478 * Also remove from the per-CPU list of blocked tasks.
1479 */
1480void rcu_read_unlock_trace_special(struct task_struct *t)
1481{
1482	unsigned long flags;
1483	struct rcu_tasks_percpu *rtpcp;
1484	union rcu_special trs;
1485
1486	// Open-coded full-word version of rcu_ld_need_qs().
1487	smp_mb(); // Enforce full grace-period ordering.
1488	trs = smp_load_acquire(&t->trc_reader_special);
1489
1490	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1491		smp_mb(); // Pairs with update-side barriers.
1492	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1493	if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1494		u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1495						       TRC_NEED_QS_CHECKED);
1496
1497		WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1498	}
1499	if (trs.b.blocked) {
1500		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1501		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1502		list_del_init(&t->trc_blkd_node);
1503		WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1504		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1505	}
1506	WRITE_ONCE(t->trc_reader_nesting, 0);
1507}
1508EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1509
1510/* Add a newly blocked reader task to its CPU's list. */
1511void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1512{
1513	unsigned long flags;
1514	struct rcu_tasks_percpu *rtpcp;
1515
1516	local_irq_save(flags);
1517	rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1518	raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1519	t->trc_blkd_cpu = smp_processor_id();
1520	if (!rtpcp->rtp_blkd_tasks.next)
1521		INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1522	list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1523	WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1524	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1525}
1526EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1527
1528/* Add a task to the holdout list, if it is not already on the list. */
1529static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1530{
1531	if (list_empty(&t->trc_holdout_list)) {
1532		get_task_struct(t);
1533		list_add(&t->trc_holdout_list, bhp);
1534		n_trc_holdouts++;
1535	}
1536}
1537
1538/* Remove a task from the holdout list, if it is in fact present. */
1539static void trc_del_holdout(struct task_struct *t)
1540{
1541	if (!list_empty(&t->trc_holdout_list)) {
1542		list_del_init(&t->trc_holdout_list);
1543		put_task_struct(t);
1544		n_trc_holdouts--;
1545	}
1546}
1547
1548/* IPI handler to check task state. */
1549static void trc_read_check_handler(void *t_in)
1550{
1551	int nesting;
1552	struct task_struct *t = current;
1553	struct task_struct *texp = t_in;
1554
1555	// If the task is no longer running on this CPU, leave.
1556	if (unlikely(texp != t))
1557		goto reset_ipi; // Already on holdout list, so will check later.
1558
1559	// If the task is not in a read-side critical section, and
1560	// if this is the last reader, awaken the grace-period kthread.
1561	nesting = READ_ONCE(t->trc_reader_nesting);
1562	if (likely(!nesting)) {
1563		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1564		goto reset_ipi;
1565	}
1566	// If we are racing with an rcu_read_unlock_trace(), try again later.
1567	if (unlikely(nesting < 0))
1568		goto reset_ipi;
1569
1570	// Get here if the task is in a read-side critical section.
1571	// Set its state so that it will update state for the grace-period
1572	// kthread upon exit from that critical section.
1573	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1574
1575reset_ipi:
1576	// Allow future IPIs to be sent on CPU and for task.
1577	// Also order this IPI handler against any later manipulations of
1578	// the intended task.
1579	smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1580	smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1581}
1582
1583/* Callback function for scheduler to check locked-down task.  */
1584static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1585{
1586	struct list_head *bhp = bhp_in;
1587	int cpu = task_cpu(t);
1588	int nesting;
1589	bool ofl = cpu_is_offline(cpu);
1590
1591	if (task_curr(t) && !ofl) {
1592		// If no chance of heavyweight readers, do it the hard way.
1593		if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1594			return -EINVAL;
1595
1596		// If heavyweight readers are enabled on the remote task,
1597		// we can inspect its state despite its currently running.
1598		// However, we cannot safely change its state.
1599		n_heavy_reader_attempts++;
1600		// Check for "running" idle tasks on offline CPUs.
1601		if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1602			return -EINVAL; // No quiescent state, do it the hard way.
1603		n_heavy_reader_updates++;
1604		nesting = 0;
1605	} else {
1606		// The task is not running, so C-language access is safe.
1607		nesting = t->trc_reader_nesting;
1608		WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
1609		if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1610			n_heavy_reader_ofl_updates++;
1611	}
1612
1613	// If not exiting a read-side critical section, mark as checked
1614	// so that the grace-period kthread will remove it from the
1615	// holdout list.
1616	if (!nesting) {
1617		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1618		return 0;  // In QS, so done.
1619	}
1620	if (nesting < 0)
1621		return -EINVAL; // Reader transitioning, try again later.
1622
1623	// The task is in a read-side critical section, so set up its
1624	// state so that it will update state upon exit from that critical
1625	// section.
1626	if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1627		trc_add_holdout(t, bhp);
1628	return 0;
1629}
1630
1631/* Attempt to extract the state for the specified task. */
1632static void trc_wait_for_one_reader(struct task_struct *t,
1633				    struct list_head *bhp)
1634{
1635	int cpu;
1636
1637	// If a previous IPI is still in flight, let it complete.
1638	if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1639		return;
1640
1641	// The current task had better be in a quiescent state.
1642	if (t == current) {
1643		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1644		WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1645		return;
1646	}
1647
1648	// Attempt to nail down the task for inspection.
1649	get_task_struct(t);
1650	if (!task_call_func(t, trc_inspect_reader, bhp)) {
1651		put_task_struct(t);
1652		return;
1653	}
1654	put_task_struct(t);
1655
1656	// If this task is not yet on the holdout list, then we are in
1657	// an RCU read-side critical section.  Otherwise, the invocation of
1658	// trc_add_holdout() that added it to the list did the necessary
1659	// get_task_struct().  Either way, the task cannot be freed out
1660	// from under this code.
1661
1662	// If currently running, send an IPI, either way, add to list.
1663	trc_add_holdout(t, bhp);
1664	if (task_curr(t) &&
1665	    time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1666		// The task is currently running, so try IPIing it.
1667		cpu = task_cpu(t);
1668
1669		// If there is already an IPI outstanding, let it happen.
1670		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1671			return;
1672
1673		per_cpu(trc_ipi_to_cpu, cpu) = true;
1674		t->trc_ipi_to_cpu = cpu;
1675		rcu_tasks_trace.n_ipis++;
1676		if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1677			// Just in case there is some other reason for
1678			// failure than the target CPU being offline.
1679			WARN_ONCE(1, "%s():  smp_call_function_single() failed for CPU: %d\n",
1680				  __func__, cpu);
1681			rcu_tasks_trace.n_ipis_fails++;
1682			per_cpu(trc_ipi_to_cpu, cpu) = false;
1683			t->trc_ipi_to_cpu = -1;
1684		}
1685	}
1686}
1687
1688/*
1689 * Initialize for first-round processing for the specified task.
1690 * Return false if task is NULL or already taken care of, true otherwise.
1691 */
1692static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1693{
1694	// During early boot when there is only the one boot CPU, there
1695	// is no idle task for the other CPUs.	Also, the grace-period
1696	// kthread is always in a quiescent state.  In addition, just return
1697	// if this task is already on the list.
1698	if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1699		return false;
1700
1701	rcu_st_need_qs(t, 0);
1702	t->trc_ipi_to_cpu = -1;
1703	return true;
1704}
1705
1706/* Do first-round processing for the specified task. */
1707static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1708{
1709	if (rcu_tasks_trace_pertask_prep(t, true))
1710		trc_wait_for_one_reader(t, hop);
1711}
1712
1713/* Initialize for a new RCU-tasks-trace grace period. */
1714static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1715{
1716	LIST_HEAD(blkd_tasks);
1717	int cpu;
1718	unsigned long flags;
1719	struct rcu_tasks_percpu *rtpcp;
1720	struct task_struct *t;
1721
1722	// There shouldn't be any old IPIs, but...
1723	for_each_possible_cpu(cpu)
1724		WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1725
1726	// Disable CPU hotplug across the CPU scan for the benefit of
1727	// any IPIs that might be needed.  This also waits for all readers
1728	// in CPU-hotplug code paths.
1729	cpus_read_lock();
1730
1731	// These rcu_tasks_trace_pertask_prep() calls are serialized to
1732	// allow safe access to the hop list.
1733	for_each_online_cpu(cpu) {
1734		rcu_read_lock();
1735		t = cpu_curr_snapshot(cpu);
1736		if (rcu_tasks_trace_pertask_prep(t, true))
1737			trc_add_holdout(t, hop);
1738		rcu_read_unlock();
1739		cond_resched_tasks_rcu_qs();
1740	}
1741
1742	// Only after all running tasks have been accounted for is it
1743	// safe to take care of the tasks that have blocked within their
1744	// current RCU tasks trace read-side critical section.
1745	for_each_possible_cpu(cpu) {
1746		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1747		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1748		list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1749		while (!list_empty(&blkd_tasks)) {
1750			rcu_read_lock();
1751			t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1752			list_del_init(&t->trc_blkd_node);
1753			list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1754			raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1755			rcu_tasks_trace_pertask(t, hop);
1756			rcu_read_unlock();
1757			raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1758		}
1759		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1760		cond_resched_tasks_rcu_qs();
1761	}
1762
1763	// Re-enable CPU hotplug now that the holdout list is populated.
1764	cpus_read_unlock();
1765}
1766
1767/*
1768 * Do intermediate processing between task and holdout scans.
1769 */
1770static void rcu_tasks_trace_postscan(struct list_head *hop)
1771{
1772	// Wait for late-stage exiting tasks to finish exiting.
1773	// These might have passed the call to exit_tasks_rcu_finish().
1774
1775	// If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1776	synchronize_rcu();
1777	// Any tasks that exit after this point will set
1778	// TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1779}
1780
1781/* Communicate task state back to the RCU tasks trace stall warning request. */
1782struct trc_stall_chk_rdr {
1783	int nesting;
1784	int ipi_to_cpu;
1785	u8 needqs;
1786};
1787
1788static int trc_check_slow_task(struct task_struct *t, void *arg)
1789{
1790	struct trc_stall_chk_rdr *trc_rdrp = arg;
1791
1792	if (task_curr(t) && cpu_online(task_cpu(t)))
1793		return false; // It is running, so decline to inspect it.
1794	trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1795	trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1796	trc_rdrp->needqs = rcu_ld_need_qs(t);
1797	return true;
1798}
1799
1800/* Show the state of a task stalling the current RCU tasks trace GP. */
1801static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1802{
1803	int cpu;
1804	struct trc_stall_chk_rdr trc_rdr;
1805	bool is_idle_tsk = is_idle_task(t);
1806
1807	if (*firstreport) {
1808		pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1809		*firstreport = false;
1810	}
1811	cpu = task_cpu(t);
1812	if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1813		pr_alert("P%d: %c%c\n",
1814			 t->pid,
1815			 ".I"[t->trc_ipi_to_cpu >= 0],
1816			 ".i"[is_idle_tsk]);
1817	else
1818		pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1819			 t->pid,
1820			 ".I"[trc_rdr.ipi_to_cpu >= 0],
1821			 ".i"[is_idle_tsk],
1822			 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1823			 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1824			 trc_rdr.nesting,
1825			 " !CN"[trc_rdr.needqs & 0x3],
1826			 " ?"[trc_rdr.needqs > 0x3],
1827			 cpu, cpu_online(cpu) ? "" : "(offline)");
1828	sched_show_task(t);
1829}
1830
1831/* List stalled IPIs for RCU tasks trace. */
1832static void show_stalled_ipi_trace(void)
1833{
1834	int cpu;
1835
1836	for_each_possible_cpu(cpu)
1837		if (per_cpu(trc_ipi_to_cpu, cpu))
1838			pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1839}
1840
1841/* Do one scan of the holdout list. */
1842static void check_all_holdout_tasks_trace(struct list_head *hop,
1843					  bool needreport, bool *firstreport)
1844{
1845	struct task_struct *g, *t;
1846
1847	// Disable CPU hotplug across the holdout list scan for IPIs.
1848	cpus_read_lock();
1849
1850	list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1851		// If safe and needed, try to check the current task.
1852		if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1853		    !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1854			trc_wait_for_one_reader(t, hop);
1855
1856		// If check succeeded, remove this task from the list.
1857		if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1858		    rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1859			trc_del_holdout(t);
1860		else if (needreport)
1861			show_stalled_task_trace(t, firstreport);
1862		cond_resched_tasks_rcu_qs();
1863	}
1864
1865	// Re-enable CPU hotplug now that the holdout list scan has completed.
1866	cpus_read_unlock();
1867
1868	if (needreport) {
1869		if (*firstreport)
1870			pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1871		show_stalled_ipi_trace();
1872	}
1873}
1874
1875static void rcu_tasks_trace_empty_fn(void *unused)
1876{
1877}
1878
1879/* Wait for grace period to complete and provide ordering. */
1880static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1881{
1882	int cpu;
1883
1884	// Wait for any lingering IPI handlers to complete.  Note that
1885	// if a CPU has gone offline or transitioned to userspace in the
1886	// meantime, all IPI handlers should have been drained beforehand.
1887	// Yes, this assumes that CPUs process IPIs in order.  If that ever
1888	// changes, there will need to be a recheck and/or timed wait.
1889	for_each_online_cpu(cpu)
1890		if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1891			smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1892
1893	smp_mb(); // Caller's code must be ordered after wakeup.
1894		  // Pairs with pretty much every ordering primitive.
1895}
1896
1897/* Report any needed quiescent state for this exiting task. */
1898static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1899{
1900	union rcu_special trs = READ_ONCE(t->trc_reader_special);
1901
1902	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1903	WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1904	if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1905		rcu_read_unlock_trace_special(t);
1906	else
1907		WRITE_ONCE(t->trc_reader_nesting, 0);
1908}
1909
1910/**
1911 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1912 * @rhp: structure to be used for queueing the RCU updates.
1913 * @func: actual callback function to be invoked after the grace period
1914 *
1915 * The callback function will be invoked some time after a trace rcu-tasks
1916 * grace period elapses, in other words after all currently executing
1917 * trace rcu-tasks read-side critical sections have completed. These
1918 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1919 * and rcu_read_unlock_trace().
1920 *
1921 * See the description of call_rcu() for more detailed information on
1922 * memory ordering guarantees.
1923 */
1924void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1925{
1926	call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1927}
1928EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1929
1930/**
1931 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1932 *
1933 * Control will return to the caller some time after a trace rcu-tasks
1934 * grace period has elapsed, in other words after all currently executing
1935 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1936 * critical sections are delimited by calls to rcu_read_lock_trace()
1937 * and rcu_read_unlock_trace().
1938 *
1939 * This is a very specialized primitive, intended only for a few uses in
1940 * tracing and other situations requiring manipulation of function preambles
1941 * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1942 * (yet) intended for heavy use from multiple CPUs.
1943 *
1944 * See the description of synchronize_rcu() for more detailed information
1945 * on memory ordering guarantees.
1946 */
1947void synchronize_rcu_tasks_trace(void)
1948{
1949	RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1950	synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1951}
1952EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1953
1954/**
1955 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1956 *
1957 * Although the current implementation is guaranteed to wait, it is not
1958 * obligated to, for example, if there are no pending callbacks.
1959 */
1960void rcu_barrier_tasks_trace(void)
1961{
1962	rcu_barrier_tasks_generic(&rcu_tasks_trace);
1963}
1964EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1965
1966int rcu_tasks_trace_lazy_ms = -1;
1967module_param(rcu_tasks_trace_lazy_ms, int, 0444);
1968
1969static int __init rcu_spawn_tasks_trace_kthread(void)
1970{
 
1971	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1972		rcu_tasks_trace.gp_sleep = HZ / 10;
1973		rcu_tasks_trace.init_fract = HZ / 10;
1974	} else {
1975		rcu_tasks_trace.gp_sleep = HZ / 200;
1976		if (rcu_tasks_trace.gp_sleep <= 0)
1977			rcu_tasks_trace.gp_sleep = 1;
1978		rcu_tasks_trace.init_fract = HZ / 200;
1979		if (rcu_tasks_trace.init_fract <= 0)
1980			rcu_tasks_trace.init_fract = 1;
1981	}
1982	if (rcu_tasks_trace_lazy_ms >= 0)
1983		rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
1984	rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1985	rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1986	rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1987	rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1988	rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1989	return 0;
1990}
1991
1992#if !defined(CONFIG_TINY_RCU)
1993void show_rcu_tasks_trace_gp_kthread(void)
1994{
1995	char buf[64];
1996
1997	snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
1998		data_race(n_trc_holdouts),
1999		data_race(n_heavy_reader_ofl_updates),
2000		data_race(n_heavy_reader_updates),
2001		data_race(n_heavy_reader_attempts));
2002	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
2003}
2004EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
2005#endif // !defined(CONFIG_TINY_RCU)
2006
2007struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
2008{
2009	return rcu_tasks_trace.kthread_ptr;
2010}
2011EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
2012
2013#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
2014static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
2015#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
2016
2017#ifndef CONFIG_TINY_RCU
2018void show_rcu_tasks_gp_kthreads(void)
2019{
2020	show_rcu_tasks_classic_gp_kthread();
2021	show_rcu_tasks_rude_gp_kthread();
2022	show_rcu_tasks_trace_gp_kthread();
2023}
2024#endif /* #ifndef CONFIG_TINY_RCU */
2025
2026#ifdef CONFIG_PROVE_RCU
2027struct rcu_tasks_test_desc {
2028	struct rcu_head rh;
2029	const char *name;
2030	bool notrun;
2031	unsigned long runstart;
2032};
2033
2034static struct rcu_tasks_test_desc tests[] = {
2035	{
2036		.name = "call_rcu_tasks()",
2037		/* If not defined, the test is skipped. */
2038		.notrun = IS_ENABLED(CONFIG_TASKS_RCU),
2039	},
2040	{
2041		.name = "call_rcu_tasks_rude()",
2042		/* If not defined, the test is skipped. */
2043		.notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
2044	},
2045	{
2046		.name = "call_rcu_tasks_trace()",
2047		/* If not defined, the test is skipped. */
2048		.notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
2049	}
2050};
2051
2052static void test_rcu_tasks_callback(struct rcu_head *rhp)
2053{
2054	struct rcu_tasks_test_desc *rttd =
2055		container_of(rhp, struct rcu_tasks_test_desc, rh);
2056
2057	pr_info("Callback from %s invoked.\n", rttd->name);
2058
2059	rttd->notrun = false;
2060}
2061
2062static void rcu_tasks_initiate_self_tests(void)
2063{
 
 
 
2064#ifdef CONFIG_TASKS_RCU
2065	pr_info("Running RCU Tasks wait API self tests\n");
2066	tests[0].runstart = jiffies;
2067	synchronize_rcu_tasks();
2068	call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2069#endif
2070
2071#ifdef CONFIG_TASKS_RUDE_RCU
2072	pr_info("Running RCU Tasks Rude wait API self tests\n");
2073	tests[1].runstart = jiffies;
2074	synchronize_rcu_tasks_rude();
2075	call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
2076#endif
2077
2078#ifdef CONFIG_TASKS_TRACE_RCU
2079	pr_info("Running RCU Tasks Trace wait API self tests\n");
2080	tests[2].runstart = jiffies;
2081	synchronize_rcu_tasks_trace();
2082	call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
2083#endif
2084}
2085
2086/*
2087 * Return:  0 - test passed
2088 *	    1 - test failed, but have not timed out yet
2089 *	   -1 - test failed and timed out
2090 */
2091static int rcu_tasks_verify_self_tests(void)
2092{
2093	int ret = 0;
2094	int i;
2095	unsigned long bst = rcu_task_stall_timeout;
2096
2097	if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
2098		bst = RCU_TASK_BOOT_STALL_TIMEOUT;
2099	for (i = 0; i < ARRAY_SIZE(tests); i++) {
2100		while (tests[i].notrun) {		// still hanging.
2101			if (time_after(jiffies, tests[i].runstart + bst)) {
2102				pr_err("%s has failed boot-time tests.\n", tests[i].name);
2103				ret = -1;
2104				break;
2105			}
2106			ret = 1;
2107			break;
2108		}
2109	}
2110	WARN_ON(ret < 0);
2111
2112	return ret;
2113}
2114
2115/*
2116 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2117 * test passes or has timed out.
2118 */
2119static struct delayed_work rcu_tasks_verify_work;
2120static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2121{
2122	int ret = rcu_tasks_verify_self_tests();
2123
2124	if (ret <= 0)
2125		return;
2126
2127	/* Test fails but not timed out yet, reschedule another check */
2128	schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2129}
2130
2131static int rcu_tasks_verify_schedule_work(void)
2132{
2133	INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2134	rcu_tasks_verify_work_fn(NULL);
2135	return 0;
2136}
2137late_initcall(rcu_tasks_verify_schedule_work);
2138#else /* #ifdef CONFIG_PROVE_RCU */
2139static void rcu_tasks_initiate_self_tests(void) { }
2140#endif /* #else #ifdef CONFIG_PROVE_RCU */
2141
2142void __init tasks_cblist_init_generic(void)
2143{
2144	lockdep_assert_irqs_disabled();
2145	WARN_ON(num_online_cpus() > 1);
2146
2147#ifdef CONFIG_TASKS_RCU
2148	cblist_init_generic(&rcu_tasks);
2149#endif
2150
2151#ifdef CONFIG_TASKS_RUDE_RCU
2152	cblist_init_generic(&rcu_tasks_rude);
2153#endif
2154
2155#ifdef CONFIG_TASKS_TRACE_RCU
2156	cblist_init_generic(&rcu_tasks_trace);
2157#endif
2158}
2159
2160void __init rcu_init_tasks_generic(void)
2161{
2162#ifdef CONFIG_TASKS_RCU
2163	rcu_spawn_tasks_kthread();
2164#endif
2165
2166#ifdef CONFIG_TASKS_RUDE_RCU
2167	rcu_spawn_tasks_rude_kthread();
2168#endif
2169
2170#ifdef CONFIG_TASKS_TRACE_RCU
2171	rcu_spawn_tasks_trace_kthread();
2172#endif
2173
2174	// Run the self-tests.
2175	rcu_tasks_initiate_self_tests();
2176}
2177
2178#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
2179static inline void rcu_tasks_bootup_oddness(void) {}
2180#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */