Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * RCU expedited grace periods
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 *
 18 * Copyright IBM Corporation, 2016
 19 *
 20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 21 */
 22
 
 
 
 
 
 23/*
 24 * Record the start of an expedited grace period.
 25 */
 26static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
 27{
 28	rcu_seq_start(&rsp->expedited_sequence);
 29}
 30
 31/*
 32 * Return then value that expedited-grace-period counter will have
 33 * at the end of the current grace period.
 34 */
 35static __maybe_unused unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp)
 36{
 37	return rcu_seq_endval(&rsp->expedited_sequence);
 38}
 39
 40/*
 41 * Record the end of an expedited grace period.
 42 */
 43static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
 44{
 45	rcu_seq_end(&rsp->expedited_sequence);
 46	smp_mb(); /* Ensure that consecutive grace periods serialize. */
 47}
 48
 49/*
 50 * Take a snapshot of the expedited-grace-period counter.
 
 
 51 */
 52static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
 53{
 54	unsigned long s;
 55
 56	smp_mb(); /* Caller's modifications seen first by other CPUs. */
 57	s = rcu_seq_snap(&rsp->expedited_sequence);
 58	trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
 59	return s;
 60}
 61
 62/*
 63 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
 64 * if a full expedited grace period has elapsed since that snapshot
 65 * was taken.
 66 */
 67static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
 68{
 69	return rcu_seq_done(&rsp->expedited_sequence, s);
 70}
 71
 72/*
 73 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
 74 * recent CPU-online activity.  Note that these masks are not cleared
 75 * when CPUs go offline, so they reflect the union of all CPUs that have
 76 * ever been online.  This means that this function normally takes its
 77 * no-work-to-do fastpath.
 78 */
 79static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
 80{
 81	bool done;
 82	unsigned long flags;
 83	unsigned long mask;
 84	unsigned long oldmask;
 85	int ncpus = smp_load_acquire(&rsp->ncpus); /* Order against locking. */
 86	struct rcu_node *rnp;
 87	struct rcu_node *rnp_up;
 88
 89	/* If no new CPUs onlined since last time, nothing to do. */
 90	if (likely(ncpus == rsp->ncpus_snap))
 91		return;
 92	rsp->ncpus_snap = ncpus;
 93
 94	/*
 95	 * Each pass through the following loop propagates newly onlined
 96	 * CPUs for the current rcu_node structure up the rcu_node tree.
 97	 */
 98	rcu_for_each_leaf_node(rsp, rnp) {
 99		raw_spin_lock_irqsave_rcu_node(rnp, flags);
100		if (rnp->expmaskinit == rnp->expmaskinitnext) {
101			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
102			continue;  /* No new CPUs, nothing to do. */
103		}
104
105		/* Update this node's mask, track old value for propagation. */
106		oldmask = rnp->expmaskinit;
107		rnp->expmaskinit = rnp->expmaskinitnext;
108		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
109
110		/* If was already nonzero, nothing to propagate. */
111		if (oldmask)
112			continue;
113
114		/* Propagate the new CPU up the tree. */
115		mask = rnp->grpmask;
116		rnp_up = rnp->parent;
117		done = false;
118		while (rnp_up) {
119			raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
120			if (rnp_up->expmaskinit)
121				done = true;
122			rnp_up->expmaskinit |= mask;
123			raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
124			if (done)
125				break;
126			mask = rnp_up->grpmask;
127			rnp_up = rnp_up->parent;
128		}
129	}
130}
131
132/*
133 * Reset the ->expmask values in the rcu_node tree in preparation for
134 * a new expedited grace period.
135 */
136static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
137{
138	unsigned long flags;
139	struct rcu_node *rnp;
140
141	sync_exp_reset_tree_hotplug(rsp);
142	rcu_for_each_node_breadth_first(rsp, rnp) {
143		raw_spin_lock_irqsave_rcu_node(rnp, flags);
144		WARN_ON_ONCE(rnp->expmask);
145		rnp->expmask = rnp->expmaskinit;
146		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
147	}
148}
149
150/*
151 * Return non-zero if there is no RCU expedited grace period in progress
152 * for the specified rcu_node structure, in other words, if all CPUs and
153 * tasks covered by the specified rcu_node structure have done their bit
154 * for the current expedited grace period.  Works only for preemptible
155 * RCU -- other RCU implementation use other means.
156 *
157 * Caller must hold the rcu_state's exp_mutex.
158 */
159static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
160{
161	return rnp->exp_tasks == NULL &&
 
162	       READ_ONCE(rnp->expmask) == 0;
163}
164
165/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166 * Report the exit from RCU read-side critical section for the last task
167 * that queued itself during or before the current expedited preemptible-RCU
168 * grace period.  This event is reported either to the rcu_node structure on
169 * which the task was queued or to one of that rcu_node structure's ancestors,
170 * recursively up the tree.  (Calm down, calm down, we do the recursion
171 * iteratively!)
172 *
173 * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
174 * structure's ->lock.
175 */
176static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
177				 bool wake, unsigned long flags)
178	__releases(rnp->lock)
179{
180	unsigned long mask;
181
 
182	for (;;) {
183		if (!sync_rcu_preempt_exp_done(rnp)) {
184			if (!rnp->expmask)
185				rcu_initiate_boost(rnp, flags);
186			else
187				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
188			break;
189		}
190		if (rnp->parent == NULL) {
191			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
192			if (wake) {
193				smp_mb(); /* EGP done before wake_up(). */
194				swake_up(&rsp->expedited_wq);
195			}
196			break;
197		}
198		mask = rnp->grpmask;
199		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
200		rnp = rnp->parent;
201		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
202		WARN_ON_ONCE(!(rnp->expmask & mask));
203		rnp->expmask &= ~mask;
204	}
205}
206
207/*
208 * Report expedited quiescent state for specified node.  This is a
209 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
210 *
211 * Caller must hold the rcu_state's exp_mutex.
212 */
213static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
214					      struct rcu_node *rnp, bool wake)
215{
216	unsigned long flags;
217
218	raw_spin_lock_irqsave_rcu_node(rnp, flags);
219	__rcu_report_exp_rnp(rsp, rnp, wake, flags);
220}
221
222/*
223 * Report expedited quiescent state for multiple CPUs, all covered by the
224 * specified leaf rcu_node structure.  Caller must hold the rcu_state's
225 * exp_mutex.
226 */
227static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
228				    unsigned long mask, bool wake)
229{
 
230	unsigned long flags;
 
231
232	raw_spin_lock_irqsave_rcu_node(rnp, flags);
233	if (!(rnp->expmask & mask)) {
234		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
235		return;
236	}
237	rnp->expmask &= ~mask;
238	__rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
 
 
 
 
 
 
 
239}
240
241/*
242 * Report expedited quiescent state for specified rcu_data (CPU).
243 */
244static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
245			       bool wake)
246{
247	rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
 
248}
249
250/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
251static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
252			       unsigned long s)
253{
254	if (rcu_exp_gp_seq_done(rsp, s)) {
255		trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
256		/* Ensure test happens before caller kfree(). */
257		smp_mb__before_atomic(); /* ^^^ */
258		atomic_long_inc(stat);
259		return true;
260	}
261	return false;
262}
263
264/*
265 * Funnel-lock acquisition for expedited grace periods.  Returns true
266 * if some other task completed an expedited grace period that this task
267 * can piggy-back on, and with no mutex held.  Otherwise, returns false
268 * with the mutex held, indicating that the caller must actually do the
269 * expedited grace period.
270 */
271static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
272{
273	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
274	struct rcu_node *rnp = rdp->mynode;
275	struct rcu_node *rnp_root = rcu_get_root(rsp);
276
277	/* Low-contention fastpath. */
278	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
279	    (rnp == rnp_root ||
280	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
281	    mutex_trylock(&rsp->exp_mutex))
282		goto fastpath;
283
284	/*
285	 * Each pass through the following loop works its way up
286	 * the rcu_node tree, returning if others have done the work or
287	 * otherwise falls through to acquire rsp->exp_mutex.  The mapping
288	 * from CPU to rcu_node structure can be inexact, as it is just
289	 * promoting locality and is not strictly needed for correctness.
290	 */
291	for (; rnp != NULL; rnp = rnp->parent) {
292		if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
293			return true;
294
295		/* Work not done, either wait here or go up. */
296		spin_lock(&rnp->exp_lock);
297		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
298
299			/* Someone else doing GP, so wait for them. */
300			spin_unlock(&rnp->exp_lock);
301			trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
302						  rnp->grplo, rnp->grphi,
303						  TPS("wait"));
304			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
305				   sync_exp_work_done(rsp,
306						      &rdp->exp_workdone2, s));
307			return true;
308		}
309		rnp->exp_seq_rq = s; /* Followers can wait on us. */
310		spin_unlock(&rnp->exp_lock);
311		trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
312					  rnp->grphi, TPS("nxtlvl"));
313	}
314	mutex_lock(&rsp->exp_mutex);
315fastpath:
316	if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
317		mutex_unlock(&rsp->exp_mutex);
318		return true;
319	}
320	rcu_exp_gp_seq_start(rsp);
321	trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
322	return false;
323}
324
325/* Invoked on each online non-idle CPU for expedited quiescent state. */
326static void sync_sched_exp_handler(void *data)
327{
328	struct rcu_data *rdp;
329	struct rcu_node *rnp;
330	struct rcu_state *rsp = data;
331
332	rdp = this_cpu_ptr(rsp->rda);
333	rnp = rdp->mynode;
334	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
335	    __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
336		return;
337	if (rcu_is_cpu_rrupt_from_idle()) {
338		rcu_report_exp_rdp(&rcu_sched_state,
339				   this_cpu_ptr(&rcu_sched_data), true);
340		return;
341	}
342	__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
343	/* Store .exp before .rcu_urgent_qs. */
344	smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
345	resched_cpu(smp_processor_id());
346}
347
348/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
349static void sync_sched_exp_online_cleanup(int cpu)
350{
351	struct rcu_data *rdp;
352	int ret;
353	struct rcu_node *rnp;
354	struct rcu_state *rsp = &rcu_sched_state;
355
356	rdp = per_cpu_ptr(rsp->rda, cpu);
357	rnp = rdp->mynode;
358	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
359		return;
360	ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
361	WARN_ON_ONCE(ret);
362}
363
364/*
365 * Select the nodes that the upcoming expedited grace period needs
366 * to wait for.
367 */
368static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
369				     smp_call_func_t func)
370{
371	int cpu;
372	unsigned long flags;
373	unsigned long mask_ofl_test;
374	unsigned long mask_ofl_ipi;
375	int ret;
376	struct rcu_node *rnp;
377
378	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
379	sync_exp_reset_tree(rsp);
380	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select"));
381	rcu_for_each_leaf_node(rsp, rnp) {
382		raw_spin_lock_irqsave_rcu_node(rnp, flags);
383
384		/* Each pass checks a CPU for identity, offline, and idle. */
385		mask_ofl_test = 0;
386		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
387			unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
388			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
389			struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu);
390			int snap;
391
392			if (raw_smp_processor_id() == cpu ||
393			    !(rnp->qsmaskinitnext & mask)) {
 
 
 
 
 
 
 
 
 
 
 
394				mask_ofl_test |= mask;
395			} else {
396				snap = rcu_dynticks_snap(rdtp);
397				if (rcu_dynticks_in_eqs(snap))
398					mask_ofl_test |= mask;
399				else
400					rdp->exp_dynticks_snap = snap;
401			}
402		}
403		mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
404
405		/*
406		 * Need to wait for any blocked tasks as well.  Note that
407		 * additional blocking tasks will also block the expedited
408		 * GP until such time as the ->expmask bits are cleared.
409		 */
410		if (rcu_preempt_has_tasks(rnp))
411			rnp->exp_tasks = rnp->blkd_tasks.next;
412		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
413
414		/* IPI the remaining CPUs for expedited quiescent state. */
415		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
416			unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
417			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
 
 
 
 
 
 
 
 
 
418
419			if (!(mask_ofl_ipi & mask))
420				continue;
421retry_ipi:
422			if (rcu_dynticks_in_eqs_since(rdp->dynticks,
423						      rdp->exp_dynticks_snap)) {
424				mask_ofl_test |= mask;
425				continue;
426			}
427			ret = smp_call_function_single(cpu, func, rsp, 0);
428			if (!ret) {
429				mask_ofl_ipi &= ~mask;
430				continue;
431			}
432			/* Failed, raced with CPU hotplug operation. */
433			raw_spin_lock_irqsave_rcu_node(rnp, flags);
434			if ((rnp->qsmaskinitnext & mask) &&
435			    (rnp->expmask & mask)) {
436				/* Online, so delay for a bit and try again. */
437				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
438				trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("selectofl"));
439				schedule_timeout_uninterruptible(1);
440				goto retry_ipi;
441			}
442			/* CPU really is offline, so we can ignore it. */
443			if (!(rnp->expmask & mask))
444				mask_ofl_ipi &= ~mask;
445			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
446		}
447		/* Report quiescent states for those that went offline. */
448		mask_ofl_test |= mask_ofl_ipi;
449		if (mask_ofl_test)
450			rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452}
453
454static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 
 
 
 
455{
456	int cpu;
 
457	unsigned long jiffies_stall;
458	unsigned long jiffies_start;
459	unsigned long mask;
460	int ndetected;
 
461	struct rcu_node *rnp;
462	struct rcu_node *rnp_root = rcu_get_root(rsp);
463	int ret;
464
465	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait"));
466	jiffies_stall = rcu_jiffies_till_stall_check();
467	jiffies_start = jiffies;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468
469	for (;;) {
470		ret = swait_event_timeout(
471				rsp->expedited_wq,
472				sync_rcu_preempt_exp_done(rnp_root),
473				jiffies_stall);
474		if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
475			return;
476		WARN_ON(ret < 0);  /* workqueues should not be signaled. */
477		if (rcu_cpu_stall_suppress)
478			continue;
479		panic_on_rcu_stall();
 
480		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
481		       rsp->name);
482		ndetected = 0;
483		rcu_for_each_leaf_node(rsp, rnp) {
484			ndetected += rcu_print_task_exp_stall(rnp);
485			for_each_leaf_node_possible_cpu(rnp, cpu) {
486				struct rcu_data *rdp;
487
488				mask = leaf_node_cpu_bit(rnp, cpu);
489				if (!(rnp->expmask & mask))
490					continue;
491				ndetected++;
492				rdp = per_cpu_ptr(rsp->rda, cpu);
493				pr_cont(" %d-%c%c%c", cpu,
494					"O."[!!cpu_online(cpu)],
495					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
496					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
497			}
498		}
499		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
500			jiffies - jiffies_start, rsp->expedited_sequence,
501			rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
 
502		if (ndetected) {
503			pr_err("blocking rcu_node structures:");
504			rcu_for_each_node_breadth_first(rsp, rnp) {
505				if (rnp == rnp_root)
506					continue; /* printed unconditionally */
507				if (sync_rcu_preempt_exp_done(rnp))
508					continue;
509				pr_cont(" l=%u:%d-%d:%#lx/%c",
510					rnp->level, rnp->grplo, rnp->grphi,
511					rnp->expmask,
512					".T"[!!rnp->exp_tasks]);
513			}
514			pr_cont("\n");
515		}
516		rcu_for_each_leaf_node(rsp, rnp) {
517			for_each_leaf_node_possible_cpu(rnp, cpu) {
518				mask = leaf_node_cpu_bit(rnp, cpu);
519				if (!(rnp->expmask & mask))
520					continue;
521				dump_cpu_task(cpu);
522			}
523		}
524		jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
525	}
526}
527
528/*
529 * Wait for the current expedited grace period to complete, and then
530 * wake up everyone who piggybacked on the just-completed expedited
531 * grace period.  Also update all the ->exp_seq_rq counters as needed
532 * in order to avoid counter-wrap problems.
533 */
534static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
535{
536	struct rcu_node *rnp;
537
538	synchronize_sched_expedited_wait(rsp);
539	rcu_exp_gp_seq_end(rsp);
540	trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
541
542	/*
543	 * Switch over to wakeup mode, allowing the next GP, but -only- the
544	 * next GP, to proceed.
545	 */
546	mutex_lock(&rsp->exp_wake_mutex);
 
547
548	rcu_for_each_node_breadth_first(rsp, rnp) {
549		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
550			spin_lock(&rnp->exp_lock);
551			/* Recheck, avoid hang in case someone just arrived. */
552			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
553				rnp->exp_seq_rq = s;
554			spin_unlock(&rnp->exp_lock);
555		}
556		smp_mb(); /* All above changes before wakeup. */
557		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
558	}
559	trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
560	mutex_unlock(&rsp->exp_wake_mutex);
561}
562
563/* Let the workqueue handler know what it is supposed to do. */
564struct rcu_exp_work {
565	smp_call_func_t rew_func;
566	struct rcu_state *rew_rsp;
567	unsigned long rew_s;
568	struct work_struct rew_work;
569};
570
571/*
572 * Common code to drive an expedited grace period forward, used by
573 * workqueues and mid-boot-time tasks.
574 */
575static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
576				  smp_call_func_t func, unsigned long s)
577{
578	/* Initialize the rcu_node tree in preparation for the wait. */
579	sync_rcu_exp_select_cpus(rsp, func);
580
581	/* Wait and clean up, including waking everyone. */
582	rcu_exp_wait_wake(rsp, s);
583}
584
585/*
586 * Work-queue handler to drive an expedited grace period forward.
587 */
588static void wait_rcu_exp_gp(struct work_struct *wp)
589{
590	struct rcu_exp_work *rewp;
591
592	rewp = container_of(wp, struct rcu_exp_work, rew_work);
593	rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
594}
595
 
 
596/*
597 * Given an rcu_state pointer and a smp_call_function() handler, kick
598 * off the specified flavor of expedited grace period.
 
 
 
599 */
600static void _synchronize_rcu_expedited(struct rcu_state *rsp,
601				       smp_call_func_t func)
602{
603	struct rcu_data *rdp;
604	struct rcu_exp_work rew;
605	struct rcu_node *rnp;
606	unsigned long s;
 
607
608	/* If expedited grace periods are prohibited, fall back to normal. */
609	if (rcu_gp_is_normal()) {
610		wait_rcu_gp(rsp->call);
 
 
 
 
 
 
 
 
 
 
 
611		return;
612	}
613
614	/* Take a snapshot of the sequence number.  */
615	s = rcu_exp_gp_seq_snap(rsp);
616	if (exp_funnel_lock(rsp, s))
617		return;  /* Someone else did our work for us. */
618
619	/* Ensure that load happens before action based on it. */
620	if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
621		/* Direct call during scheduler init and early_initcalls(). */
622		rcu_exp_sel_wait_wake(rsp, func, s);
623	} else {
624		/* Marshall arguments & schedule the expedited grace period. */
625		rew.rew_func = func;
626		rew.rew_rsp = rsp;
627		rew.rew_s = s;
628		INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
629		queue_work(rcu_gp_wq, &rew.rew_work);
 
 
 
 
630	}
631
632	/* Wait for expedited grace period to complete. */
633	rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
634	rnp = rcu_get_root(rsp);
635	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
636		   sync_exp_work_done(rsp, &rdp->exp_workdone0, s));
637	smp_mb(); /* Workqueue actions happen before return. */
638
639	/* Let the next expedited grace period start. */
640	mutex_unlock(&rsp->exp_mutex);
 
641}
642
643/**
644 * synchronize_sched_expedited - Brute-force RCU-sched grace period
645 *
646 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
647 * approach to force the grace period to end quickly.  This consumes
648 * significant time on all CPUs and is unfriendly to real-time workloads,
649 * so is thus not recommended for any sort of common-case code.  In fact,
650 * if you are using synchronize_sched_expedited() in a loop, please
651 * restructure your code to batch your updates, and then use a single
652 * synchronize_sched() instead.
653 *
654 * This implementation can be thought of as an application of sequence
655 * locking to expedited grace periods, but using the sequence counter to
656 * determine when someone else has already done the work instead of for
657 * retrying readers.
658 */
659void synchronize_sched_expedited(void)
660{
661	struct rcu_state *rsp = &rcu_sched_state;
 
 
662
663	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
664			 lock_is_held(&rcu_lock_map) ||
665			 lock_is_held(&rcu_sched_lock_map),
666			 "Illegal synchronize_sched_expedited() in RCU read-side critical section");
 
 
 
 
 
 
 
 
667
668	/* If only one CPU, this is automatically a grace period. */
669	if (rcu_blocking_is_gp())
670		return;
671
672	_synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
 
 
 
 
 
 
 
673}
674EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
675
676#ifdef CONFIG_PREEMPT_RCU
 
 
 
 
677
678/*
679 * Remote handler for smp_call_function_single().  If there is an
680 * RCU read-side critical section in effect, request that the
681 * next rcu_read_unlock() record the quiescent state up the
682 * ->expmask fields in the rcu_node tree.  Otherwise, immediately
683 * report the quiescent state.
684 */
685static void sync_rcu_exp_handler(void *info)
 
 
 
 
686{
 
 
687	struct rcu_data *rdp;
688	struct rcu_state *rsp = info;
689	struct task_struct *t = current;
690
691	/*
692	 * Within an RCU read-side critical section, request that the next
693	 * rcu_read_unlock() report.  Unless this RCU read-side critical
694	 * section has already blocked, in which case it is already set
695	 * up for the expedited grace period to wait on it.
696	 */
697	if (t->rcu_read_lock_nesting > 0 &&
698	    !t->rcu_read_unlock_special.b.blocked) {
699		t->rcu_read_unlock_special.b.exp_need_qs = true;
700		return;
701	}
 
 
 
 
 
 
 
 
 
 
 
 
 
702
703	/*
704	 * We are either exiting an RCU read-side critical section (negative
705	 * values of t->rcu_read_lock_nesting) or are not in one at all
706	 * (zero value of t->rcu_read_lock_nesting).  Or we are in an RCU
707	 * read-side critical section that blocked before this expedited
708	 * grace period started.  Either way, we can immediately report
709	 * the quiescent state.
710	 */
711	rdp = this_cpu_ptr(rsp->rda);
712	rcu_report_exp_rdp(rsp, rdp, true);
713}
714
 
 
715/**
716 * synchronize_rcu_expedited - Brute-force RCU grace period
717 *
718 * Wait for an RCU-preempt grace period, but expedite it.  The basic
719 * idea is to IPI all non-idle non-nohz online CPUs.  The IPI handler
720 * checks whether the CPU is in an RCU-preempt critical section, and
721 * if so, it sets a flag that causes the outermost rcu_read_unlock()
722 * to report the quiescent state.  On the other hand, if the CPU is
723 * not in an RCU read-side critical section, the IPI handler reports
724 * the quiescent state immediately.
725 *
726 * Although this is a greate improvement over previous expedited
727 * implementations, it is still unfriendly to real-time workloads, so is
728 * thus not recommended for any sort of common-case code.  In fact, if
729 * you are using synchronize_rcu_expedited() in a loop, please restructure
730 * your code to batch your updates, and then Use a single synchronize_rcu()
731 * instead.
 
 
732 */
733void synchronize_rcu_expedited(void)
734{
735	struct rcu_state *rsp = rcu_state_p;
 
 
 
736
737	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
738			 lock_is_held(&rcu_lock_map) ||
739			 lock_is_held(&rcu_sched_lock_map),
740			 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
741
742	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
 
743		return;
744	_synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
745}
746EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
747
748#else /* #ifdef CONFIG_PREEMPT_RCU */
 
 
 
 
749
750/*
751 * Wait for an rcu-preempt grace period, but make it happen quickly.
752 * But because preemptible RCU does not exist, map to rcu-sched.
753 */
754void synchronize_rcu_expedited(void)
755{
756	synchronize_sched_expedited();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
757}
758EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
759
760#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0+ */
  2/*
  3 * RCU expedited grace periods
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Copyright IBM Corporation, 2016
  6 *
  7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 10#include <linux/lockdep.h>
 11
 12static void rcu_exp_handler(void *unused);
 13static int rcu_print_task_exp_stall(struct rcu_node *rnp);
 14
 15/*
 16 * Record the start of an expedited grace period.
 17 */
 18static void rcu_exp_gp_seq_start(void)
 19{
 20	rcu_seq_start(&rcu_state.expedited_sequence);
 21}
 22
 23/*
 24 * Return the value that the expedited-grace-period counter will have
 25 * at the end of the current grace period.
 26 */
 27static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
 28{
 29	return rcu_seq_endval(&rcu_state.expedited_sequence);
 30}
 31
 32/*
 33 * Record the end of an expedited grace period.
 34 */
 35static void rcu_exp_gp_seq_end(void)
 36{
 37	rcu_seq_end(&rcu_state.expedited_sequence);
 38	smp_mb(); /* Ensure that consecutive grace periods serialize. */
 39}
 40
 41/*
 42 * Take a snapshot of the expedited-grace-period counter, which is the
 43 * earliest value that will indicate that a full grace period has
 44 * elapsed since the current time.
 45 */
 46static unsigned long rcu_exp_gp_seq_snap(void)
 47{
 48	unsigned long s;
 49
 50	smp_mb(); /* Caller's modifications seen first by other CPUs. */
 51	s = rcu_seq_snap(&rcu_state.expedited_sequence);
 52	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
 53	return s;
 54}
 55
 56/*
 57 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
 58 * if a full expedited grace period has elapsed since that snapshot
 59 * was taken.
 60 */
 61static bool rcu_exp_gp_seq_done(unsigned long s)
 62{
 63	return rcu_seq_done(&rcu_state.expedited_sequence, s);
 64}
 65
 66/*
 67 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
 68 * recent CPU-online activity.  Note that these masks are not cleared
 69 * when CPUs go offline, so they reflect the union of all CPUs that have
 70 * ever been online.  This means that this function normally takes its
 71 * no-work-to-do fastpath.
 72 */
 73static void sync_exp_reset_tree_hotplug(void)
 74{
 75	bool done;
 76	unsigned long flags;
 77	unsigned long mask;
 78	unsigned long oldmask;
 79	int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
 80	struct rcu_node *rnp;
 81	struct rcu_node *rnp_up;
 82
 83	/* If no new CPUs onlined since last time, nothing to do. */
 84	if (likely(ncpus == rcu_state.ncpus_snap))
 85		return;
 86	rcu_state.ncpus_snap = ncpus;
 87
 88	/*
 89	 * Each pass through the following loop propagates newly onlined
 90	 * CPUs for the current rcu_node structure up the rcu_node tree.
 91	 */
 92	rcu_for_each_leaf_node(rnp) {
 93		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 94		if (rnp->expmaskinit == rnp->expmaskinitnext) {
 95			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 96			continue;  /* No new CPUs, nothing to do. */
 97		}
 98
 99		/* Update this node's mask, track old value for propagation. */
100		oldmask = rnp->expmaskinit;
101		rnp->expmaskinit = rnp->expmaskinitnext;
102		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
103
104		/* If was already nonzero, nothing to propagate. */
105		if (oldmask)
106			continue;
107
108		/* Propagate the new CPU up the tree. */
109		mask = rnp->grpmask;
110		rnp_up = rnp->parent;
111		done = false;
112		while (rnp_up) {
113			raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
114			if (rnp_up->expmaskinit)
115				done = true;
116			rnp_up->expmaskinit |= mask;
117			raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
118			if (done)
119				break;
120			mask = rnp_up->grpmask;
121			rnp_up = rnp_up->parent;
122		}
123	}
124}
125
126/*
127 * Reset the ->expmask values in the rcu_node tree in preparation for
128 * a new expedited grace period.
129 */
130static void __maybe_unused sync_exp_reset_tree(void)
131{
132	unsigned long flags;
133	struct rcu_node *rnp;
134
135	sync_exp_reset_tree_hotplug();
136	rcu_for_each_node_breadth_first(rnp) {
137		raw_spin_lock_irqsave_rcu_node(rnp, flags);
138		WARN_ON_ONCE(rnp->expmask);
139		WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
140		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
141	}
142}
143
144/*
145 * Return non-zero if there is no RCU expedited grace period in progress
146 * for the specified rcu_node structure, in other words, if all CPUs and
147 * tasks covered by the specified rcu_node structure have done their bit
148 * for the current expedited grace period.
 
 
 
149 */
150static bool sync_rcu_exp_done(struct rcu_node *rnp)
151{
152	raw_lockdep_assert_held_rcu_node(rnp);
153	return READ_ONCE(rnp->exp_tasks) == NULL &&
154	       READ_ONCE(rnp->expmask) == 0;
155}
156
157/*
158 * Like sync_rcu_exp_done(), but where the caller does not hold the
159 * rcu_node's ->lock.
160 */
161static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
162{
163	unsigned long flags;
164	bool ret;
165
166	raw_spin_lock_irqsave_rcu_node(rnp, flags);
167	ret = sync_rcu_exp_done(rnp);
168	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
169
170	return ret;
171}
172
173
174/*
175 * Report the exit from RCU read-side critical section for the last task
176 * that queued itself during or before the current expedited preemptible-RCU
177 * grace period.  This event is reported either to the rcu_node structure on
178 * which the task was queued or to one of that rcu_node structure's ancestors,
179 * recursively up the tree.  (Calm down, calm down, we do the recursion
180 * iteratively!)
 
 
 
181 */
182static void __rcu_report_exp_rnp(struct rcu_node *rnp,
183				 bool wake, unsigned long flags)
184	__releases(rnp->lock)
185{
186	unsigned long mask;
187
188	raw_lockdep_assert_held_rcu_node(rnp);
189	for (;;) {
190		if (!sync_rcu_exp_done(rnp)) {
191			if (!rnp->expmask)
192				rcu_initiate_boost(rnp, flags);
193			else
194				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
195			break;
196		}
197		if (rnp->parent == NULL) {
198			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199			if (wake) {
200				smp_mb(); /* EGP done before wake_up(). */
201				swake_up_one(&rcu_state.expedited_wq);
202			}
203			break;
204		}
205		mask = rnp->grpmask;
206		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
207		rnp = rnp->parent;
208		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
209		WARN_ON_ONCE(!(rnp->expmask & mask));
210		WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
211	}
212}
213
214/*
215 * Report expedited quiescent state for specified node.  This is a
216 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
 
 
217 */
218static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
 
219{
220	unsigned long flags;
221
222	raw_spin_lock_irqsave_rcu_node(rnp, flags);
223	__rcu_report_exp_rnp(rnp, wake, flags);
224}
225
226/*
227 * Report expedited quiescent state for multiple CPUs, all covered by the
228 * specified leaf rcu_node structure.
 
229 */
230static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
231				    unsigned long mask, bool wake)
232{
233	int cpu;
234	unsigned long flags;
235	struct rcu_data *rdp;
236
237	raw_spin_lock_irqsave_rcu_node(rnp, flags);
238	if (!(rnp->expmask & mask)) {
239		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
240		return;
241	}
242	WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
243	for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
244		rdp = per_cpu_ptr(&rcu_data, cpu);
245		if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
246			continue;
247		rdp->rcu_forced_tick_exp = false;
248		tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
249	}
250	__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
251}
252
253/*
254 * Report expedited quiescent state for specified rcu_data (CPU).
255 */
256static void rcu_report_exp_rdp(struct rcu_data *rdp)
 
257{
258	WRITE_ONCE(rdp->exp_deferred_qs, false);
259	rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
260}
261
262/* Common code for work-done checking. */
263static bool sync_exp_work_done(unsigned long s)
 
264{
265	if (rcu_exp_gp_seq_done(s)) {
266		trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
267		smp_mb(); /* Ensure test happens before caller kfree(). */
 
 
268		return true;
269	}
270	return false;
271}
272
273/*
274 * Funnel-lock acquisition for expedited grace periods.  Returns true
275 * if some other task completed an expedited grace period that this task
276 * can piggy-back on, and with no mutex held.  Otherwise, returns false
277 * with the mutex held, indicating that the caller must actually do the
278 * expedited grace period.
279 */
280static bool exp_funnel_lock(unsigned long s)
281{
282	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
283	struct rcu_node *rnp = rdp->mynode;
284	struct rcu_node *rnp_root = rcu_get_root();
285
286	/* Low-contention fastpath. */
287	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
288	    (rnp == rnp_root ||
289	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
290	    mutex_trylock(&rcu_state.exp_mutex))
291		goto fastpath;
292
293	/*
294	 * Each pass through the following loop works its way up
295	 * the rcu_node tree, returning if others have done the work or
296	 * otherwise falls through to acquire ->exp_mutex.  The mapping
297	 * from CPU to rcu_node structure can be inexact, as it is just
298	 * promoting locality and is not strictly needed for correctness.
299	 */
300	for (; rnp != NULL; rnp = rnp->parent) {
301		if (sync_exp_work_done(s))
302			return true;
303
304		/* Work not done, either wait here or go up. */
305		spin_lock(&rnp->exp_lock);
306		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
307
308			/* Someone else doing GP, so wait for them. */
309			spin_unlock(&rnp->exp_lock);
310			trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
311						  rnp->grplo, rnp->grphi,
312						  TPS("wait"));
313			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
314				   sync_exp_work_done(s));
 
315			return true;
316		}
317		WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
318		spin_unlock(&rnp->exp_lock);
319		trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
320					  rnp->grplo, rnp->grphi, TPS("nxtlvl"));
321	}
322	mutex_lock(&rcu_state.exp_mutex);
323fastpath:
324	if (sync_exp_work_done(s)) {
325		mutex_unlock(&rcu_state.exp_mutex);
326		return true;
327	}
328	rcu_exp_gp_seq_start();
329	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
330	return false;
331}
332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333/*
334 * Select the CPUs within the specified rcu_node that the upcoming
335 * expedited grace period needs to wait for.
336 */
337static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 
338{
339	int cpu;
340	unsigned long flags;
341	unsigned long mask_ofl_test;
342	unsigned long mask_ofl_ipi;
343	int ret;
344	struct rcu_exp_work *rewp =
345		container_of(wp, struct rcu_exp_work, rew_work);
346	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
 
 
 
 
347
348	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 
 
 
 
 
 
349
350	/* Each pass checks a CPU for identity, offline, and idle. */
351	mask_ofl_test = 0;
352	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
353		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
354		unsigned long mask = rdp->grpmask;
355		int snap;
356
357		if (raw_smp_processor_id() == cpu ||
358		    !(rnp->qsmaskinitnext & mask)) {
359			mask_ofl_test |= mask;
360		} else {
361			snap = rcu_dynticks_snap(rdp);
362			if (rcu_dynticks_in_eqs(snap))
363				mask_ofl_test |= mask;
364			else
365				rdp->exp_dynticks_snap = snap;
 
 
 
 
 
366		}
367	}
368	mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
 
 
 
 
 
 
 
 
369
370	/*
371	 * Need to wait for any blocked tasks as well.	Note that
372	 * additional blocking tasks will also block the expedited GP
373	 * until such time as the ->expmask bits are cleared.
374	 */
375	if (rcu_preempt_has_tasks(rnp))
376		WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
377	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378
379	/* IPI the remaining CPUs for expedited quiescent state. */
380	for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
381		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
382		unsigned long mask = rdp->grpmask;
383
 
 
384retry_ipi:
385		if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
386			mask_ofl_test |= mask;
387			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388		}
389		if (get_cpu() == cpu) {
390			put_cpu();
391			continue;
392		}
393		ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
394		put_cpu();
395		/* The CPU will report the QS in response to the IPI. */
396		if (!ret)
397			continue;
398
399		/* Failed, raced with CPU hotplug operation. */
400		raw_spin_lock_irqsave_rcu_node(rnp, flags);
401		if ((rnp->qsmaskinitnext & mask) &&
402		    (rnp->expmask & mask)) {
403			/* Online, so delay for a bit and try again. */
404			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
405			trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
406			schedule_timeout_idle(1);
407			goto retry_ipi;
408		}
409		/* CPU really is offline, so we must report its QS. */
410		if (rnp->expmask & mask)
411			mask_ofl_test |= mask;
412		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
413	}
414	/* Report quiescent states for those that went offline. */
415	if (mask_ofl_test)
416		rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
417}
418
419/*
420 * Select the nodes that the upcoming expedited grace period needs
421 * to wait for.
422 */
423static void sync_rcu_exp_select_cpus(void)
424{
425	int cpu;
426	struct rcu_node *rnp;
427
428	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
429	sync_exp_reset_tree();
430	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
431
432	/* Schedule work for each leaf rcu_node structure. */
433	rcu_for_each_leaf_node(rnp) {
434		rnp->exp_need_flush = false;
435		if (!READ_ONCE(rnp->expmask))
436			continue; /* Avoid early boot non-existent wq. */
437		if (!READ_ONCE(rcu_par_gp_wq) ||
438		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
439		    rcu_is_last_leaf_node(rnp)) {
440			/* No workqueues yet or last leaf, do direct call. */
441			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
442			continue;
443		}
444		INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
445		cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
446		/* If all offline, queue the work on an unbound CPU. */
447		if (unlikely(cpu > rnp->grphi - rnp->grplo))
448			cpu = WORK_CPU_UNBOUND;
449		else
450			cpu += rnp->grplo;
451		queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
452		rnp->exp_need_flush = true;
453	}
454
455	/* Wait for workqueue jobs (if any) to complete. */
456	rcu_for_each_leaf_node(rnp)
457		if (rnp->exp_need_flush)
458			flush_work(&rnp->rew.rew_work);
459}
460
461/*
462 * Wait for the expedited grace period to elapse, within time limit.
463 * If the time limit is exceeded without the grace period elapsing,
464 * return false, otherwise return true.
465 */
466static bool synchronize_rcu_expedited_wait_once(long tlimit)
467{
468	int t;
469	struct rcu_node *rnp_root = rcu_get_root();
470
471	t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
472					  sync_rcu_exp_done_unlocked(rnp_root),
473					  tlimit);
474	// Workqueues should not be signaled.
475	if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
476		return true;
477	WARN_ON(t < 0);  /* workqueues should not be signaled. */
478	return false;
479}
480
481/*
482 * Wait for the expedited grace period to elapse, issuing any needed
483 * RCU CPU stall warnings along the way.
484 */
485static void synchronize_rcu_expedited_wait(void)
486{
487	int cpu;
488	unsigned long j;
489	unsigned long jiffies_stall;
490	unsigned long jiffies_start;
491	unsigned long mask;
492	int ndetected;
493	struct rcu_data *rdp;
494	struct rcu_node *rnp;
495	struct rcu_node *rnp_root = rcu_get_root();
 
496
497	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
498	jiffies_stall = rcu_jiffies_till_stall_check();
499	jiffies_start = jiffies;
500	if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
501		if (synchronize_rcu_expedited_wait_once(1))
502			return;
503		rcu_for_each_leaf_node(rnp) {
504			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
505				rdp = per_cpu_ptr(&rcu_data, cpu);
506				if (rdp->rcu_forced_tick_exp)
507					continue;
508				rdp->rcu_forced_tick_exp = true;
509				tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
510			}
511		}
512		j = READ_ONCE(jiffies_till_first_fqs);
513		if (synchronize_rcu_expedited_wait_once(j + HZ))
514			return;
515		WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT));
516	}
517
518	for (;;) {
519		if (synchronize_rcu_expedited_wait_once(jiffies_stall))
 
 
 
 
520			return;
521		if (rcu_stall_is_suppressed())
 
522			continue;
523		panic_on_rcu_stall();
524		trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
525		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
526		       rcu_state.name);
527		ndetected = 0;
528		rcu_for_each_leaf_node(rnp) {
529			ndetected += rcu_print_task_exp_stall(rnp);
530			for_each_leaf_node_possible_cpu(rnp, cpu) {
531				struct rcu_data *rdp;
532
533				mask = leaf_node_cpu_bit(rnp, cpu);
534				if (!(READ_ONCE(rnp->expmask) & mask))
535					continue;
536				ndetected++;
537				rdp = per_cpu_ptr(&rcu_data, cpu);
538				pr_cont(" %d-%c%c%c", cpu,
539					"O."[!!cpu_online(cpu)],
540					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
541					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
542			}
543		}
544		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
545			jiffies - jiffies_start, rcu_state.expedited_sequence,
546			data_race(rnp_root->expmask),
547			".T"[!!data_race(rnp_root->exp_tasks)]);
548		if (ndetected) {
549			pr_err("blocking rcu_node structures (internal RCU debug):");
550			rcu_for_each_node_breadth_first(rnp) {
551				if (rnp == rnp_root)
552					continue; /* printed unconditionally */
553				if (sync_rcu_exp_done_unlocked(rnp))
554					continue;
555				pr_cont(" l=%u:%d-%d:%#lx/%c",
556					rnp->level, rnp->grplo, rnp->grphi,
557					data_race(rnp->expmask),
558					".T"[!!data_race(rnp->exp_tasks)]);
559			}
560			pr_cont("\n");
561		}
562		rcu_for_each_leaf_node(rnp) {
563			for_each_leaf_node_possible_cpu(rnp, cpu) {
564				mask = leaf_node_cpu_bit(rnp, cpu);
565				if (!(READ_ONCE(rnp->expmask) & mask))
566					continue;
567				dump_cpu_task(cpu);
568			}
569		}
570		jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
571	}
572}
573
574/*
575 * Wait for the current expedited grace period to complete, and then
576 * wake up everyone who piggybacked on the just-completed expedited
577 * grace period.  Also update all the ->exp_seq_rq counters as needed
578 * in order to avoid counter-wrap problems.
579 */
580static void rcu_exp_wait_wake(unsigned long s)
581{
582	struct rcu_node *rnp;
583
584	synchronize_rcu_expedited_wait();
 
 
585
586	// Switch over to wakeup mode, allowing the next GP to proceed.
587	// End the previous grace period only after acquiring the mutex
588	// to ensure that only one GP runs concurrently with wakeups.
589	mutex_lock(&rcu_state.exp_wake_mutex);
590	rcu_exp_gp_seq_end();
591	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
592
593	rcu_for_each_node_breadth_first(rnp) {
594		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
595			spin_lock(&rnp->exp_lock);
596			/* Recheck, avoid hang in case someone just arrived. */
597			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
598				WRITE_ONCE(rnp->exp_seq_rq, s);
599			spin_unlock(&rnp->exp_lock);
600		}
601		smp_mb(); /* All above changes before wakeup. */
602		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
603	}
604	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
605	mutex_unlock(&rcu_state.exp_wake_mutex);
606}
607
 
 
 
 
 
 
 
 
608/*
609 * Common code to drive an expedited grace period forward, used by
610 * workqueues and mid-boot-time tasks.
611 */
612static void rcu_exp_sel_wait_wake(unsigned long s)
 
613{
614	/* Initialize the rcu_node tree in preparation for the wait. */
615	sync_rcu_exp_select_cpus();
616
617	/* Wait and clean up, including waking everyone. */
618	rcu_exp_wait_wake(s);
619}
620
621/*
622 * Work-queue handler to drive an expedited grace period forward.
623 */
624static void wait_rcu_exp_gp(struct work_struct *wp)
625{
626	struct rcu_exp_work *rewp;
627
628	rewp = container_of(wp, struct rcu_exp_work, rew_work);
629	rcu_exp_sel_wait_wake(rewp->rew_s);
630}
631
632#ifdef CONFIG_PREEMPT_RCU
633
634/*
635 * Remote handler for smp_call_function_single().  If there is an
636 * RCU read-side critical section in effect, request that the
637 * next rcu_read_unlock() record the quiescent state up the
638 * ->expmask fields in the rcu_node tree.  Otherwise, immediately
639 * report the quiescent state.
640 */
641static void rcu_exp_handler(void *unused)
 
642{
643	int depth = rcu_preempt_depth();
644	unsigned long flags;
645	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
646	struct rcu_node *rnp = rdp->mynode;
647	struct task_struct *t = current;
648
649	/*
650	 * First, the common case of not being in an RCU read-side
651	 * critical section.  If also enabled or idle, immediately
652	 * report the quiescent state, otherwise defer.
653	 */
654	if (!depth) {
655		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
656		    rcu_dynticks_curr_cpu_in_eqs()) {
657			rcu_report_exp_rdp(rdp);
658		} else {
659			rdp->exp_deferred_qs = true;
660			set_tsk_need_resched(t);
661			set_preempt_need_resched();
662		}
663		return;
664	}
665
666	/*
667	 * Second, the less-common case of being in an RCU read-side
668	 * critical section.  In this case we can count on a future
669	 * rcu_read_unlock().  However, this rcu_read_unlock() might
670	 * execute on some other CPU, but in that case there will be
671	 * a future context switch.  Either way, if the expedited
672	 * grace period is still waiting on this CPU, set ->deferred_qs
673	 * so that the eventual quiescent state will be reported.
674	 * Note that there is a large group of race conditions that
675	 * can have caused this quiescent state to already have been
676	 * reported, so we really do need to check ->expmask.
677	 */
678	if (depth > 0) {
679		raw_spin_lock_irqsave_rcu_node(rnp, flags);
680		if (rnp->expmask & rdp->grpmask) {
681			rdp->exp_deferred_qs = true;
682			t->rcu_read_unlock_special.b.exp_hint = true;
683		}
684		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
685		return;
686	}
687
688	// Finally, negative nesting depth should not happen.
689	WARN_ON_ONCE(1);
690}
 
 
 
691
692/* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
693static void sync_sched_exp_online_cleanup(int cpu)
694{
695}
696
697/*
698 * Scan the current list of tasks blocked within RCU read-side critical
699 * sections, printing out the tid of each that is blocking the current
700 * expedited grace period.
 
 
 
 
 
 
 
 
 
 
 
701 */
702static int rcu_print_task_exp_stall(struct rcu_node *rnp)
703{
704	unsigned long flags;
705	int ndetected = 0;
706	struct task_struct *t;
707
708	if (!READ_ONCE(rnp->exp_tasks))
709		return 0;
710	raw_spin_lock_irqsave_rcu_node(rnp, flags);
711	t = list_entry(rnp->exp_tasks->prev,
712		       struct task_struct, rcu_node_entry);
713	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
714		pr_cont(" P%d", t->pid);
715		ndetected++;
716	}
717	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
718	return ndetected;
719}
720
721#else /* #ifdef CONFIG_PREEMPT_RCU */
 
 
722
723/* Request an expedited quiescent state. */
724static void rcu_exp_need_qs(void)
725{
726	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
727	/* Store .exp before .rcu_urgent_qs. */
728	smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
729	set_tsk_need_resched(current);
730	set_preempt_need_resched();
731}
 
732
733/* Invoked on each online non-idle CPU for expedited quiescent state. */
734static void rcu_exp_handler(void *unused)
735{
736	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
737	struct rcu_node *rnp = rdp->mynode;
738
739	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
740	    __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
741		return;
742	if (rcu_is_cpu_rrupt_from_idle()) {
743		rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
744		return;
745	}
746	rcu_exp_need_qs();
747}
748
749/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
750static void sync_sched_exp_online_cleanup(int cpu)
751{
752	unsigned long flags;
753	int my_cpu;
754	struct rcu_data *rdp;
755	int ret;
756	struct rcu_node *rnp;
757
758	rdp = per_cpu_ptr(&rcu_data, cpu);
759	rnp = rdp->mynode;
760	my_cpu = get_cpu();
761	/* Quiescent state either not needed or already requested, leave. */
762	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
763	    __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
764		put_cpu();
 
 
765		return;
766	}
767	/* Quiescent state needed on current CPU, so set it up locally. */
768	if (my_cpu == cpu) {
769		local_irq_save(flags);
770		rcu_exp_need_qs();
771		local_irq_restore(flags);
772		put_cpu();
773		return;
774	}
775	/* Quiescent state needed on some other CPU, send IPI. */
776	ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
777	put_cpu();
778	WARN_ON_ONCE(ret);
779}
780
781/*
782 * Because preemptible RCU does not exist, we never have to check for
783 * tasks blocked within RCU read-side critical sections that are
784 * blocking the current expedited grace period.
785 */
786static int rcu_print_task_exp_stall(struct rcu_node *rnp)
787{
788	return 0;
 
 
789}
790
791#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
792
793/**
794 * synchronize_rcu_expedited - Brute-force RCU grace period
795 *
796 * Wait for an RCU grace period, but expedite it.  The basic idea is to
797 * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
798 * the CPU is in an RCU critical section, and if so, it sets a flag that
799 * causes the outermost rcu_read_unlock() to report the quiescent state
800 * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
801 * other hand, if the CPU is not in an RCU read-side critical section,
802 * the IPI handler reports the quiescent state immediately.
803 *
804 * Although this is a great improvement over previous expedited
805 * implementations, it is still unfriendly to real-time workloads, so is
806 * thus not recommended for any sort of common-case code.  In fact, if
807 * you are using synchronize_rcu_expedited() in a loop, please restructure
808 * your code to batch your updates, and then use a single synchronize_rcu()
809 * instead.
810 *
811 * This has the same semantics as (but is more brutal than) synchronize_rcu().
812 */
813void synchronize_rcu_expedited(void)
814{
815	bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
816	struct rcu_exp_work rew;
817	struct rcu_node *rnp;
818	unsigned long s;
819
820	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
821			 lock_is_held(&rcu_lock_map) ||
822			 lock_is_held(&rcu_sched_lock_map),
823			 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
824
825	/* Is the state is such that the call is a grace period? */
826	if (rcu_blocking_is_gp())
827		return;
 
 
 
828
829	/* If expedited grace periods are prohibited, fall back to normal. */
830	if (rcu_gp_is_normal()) {
831		wait_rcu_gp(call_rcu);
832		return;
833	}
834
835	/* Take a snapshot of the sequence number.  */
836	s = rcu_exp_gp_seq_snap();
837	if (exp_funnel_lock(s))
838		return;  /* Someone else did our work for us. */
839
840	/* Ensure that load happens before action based on it. */
841	if (unlikely(boottime)) {
842		/* Direct call during scheduler init and early_initcalls(). */
843		rcu_exp_sel_wait_wake(s);
844	} else {
845		/* Marshall arguments & schedule the expedited grace period. */
846		rew.rew_s = s;
847		INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
848		queue_work(rcu_gp_wq, &rew.rew_work);
849	}
850
851	/* Wait for expedited grace period to complete. */
852	rnp = rcu_get_root();
853	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
854		   sync_exp_work_done(s));
855	smp_mb(); /* Workqueue actions happen before return. */
856
857	/* Let the next expedited grace period start. */
858	mutex_unlock(&rcu_state.exp_mutex);
859
860	if (likely(!boottime))
861		destroy_work_on_stack(&rew.rew_work);
862}
863EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);