Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  4 *
  5 * membarrier system call
  6 */
  7
  8/*
  9 * For documentation purposes, here are some membarrier ordering
 10 * scenarios to keep in mind:
 11 *
 12 * A) Userspace thread execution after IPI vs membarrier's memory
 13 *    barrier before sending the IPI
 14 *
 15 * Userspace variables:
 16 *
 17 * int x = 0, y = 0;
 18 *
 19 * The memory barrier at the start of membarrier() on CPU0 is necessary in
 20 * order to enforce the guarantee that any writes occurring on CPU0 before
 21 * the membarrier() is executed will be visible to any code executing on
 22 * CPU1 after the IPI-induced memory barrier:
 23 *
 24 *         CPU0                              CPU1
 25 *
 26 *         x = 1
 27 *         membarrier():
 28 *           a: smp_mb()
 29 *           b: send IPI                       IPI-induced mb
 30 *           c: smp_mb()
 31 *         r2 = y
 32 *                                           y = 1
 33 *                                           barrier()
 34 *                                           r1 = x
 35 *
 36 *                     BUG_ON(r1 == 0 && r2 == 0)
 37 *
 38 * The write to y and load from x by CPU1 are unordered by the hardware,
 39 * so it's possible to have "r1 = x" reordered before "y = 1" at any
 40 * point after (b).  If the memory barrier at (a) is omitted, then "x = 1"
 41 * can be reordered after (a) (although not after (c)), so we get r1 == 0
 42 * and r2 == 0.  This violates the guarantee that membarrier() is
 43 * supposed by provide.
 44 *
 45 * The timing of the memory barrier at (a) has to ensure that it executes
 46 * before the IPI-induced memory barrier on CPU1.
 47 *
 48 * B) Userspace thread execution before IPI vs membarrier's memory
 49 *    barrier after completing the IPI
 50 *
 51 * Userspace variables:
 52 *
 53 * int x = 0, y = 0;
 54 *
 55 * The memory barrier at the end of membarrier() on CPU0 is necessary in
 56 * order to enforce the guarantee that any writes occurring on CPU1 before
 57 * the membarrier() is executed will be visible to any code executing on
 58 * CPU0 after the membarrier():
 59 *
 60 *         CPU0                              CPU1
 61 *
 62 *                                           x = 1
 63 *                                           barrier()
 64 *                                           y = 1
 65 *         r2 = y
 66 *         membarrier():
 67 *           a: smp_mb()
 68 *           b: send IPI                       IPI-induced mb
 69 *           c: smp_mb()
 70 *         r1 = x
 71 *         BUG_ON(r1 == 0 && r2 == 1)
 72 *
 73 * The writes to x and y are unordered by the hardware, so it's possible to
 74 * have "r2 = 1" even though the write to x doesn't execute until (b).  If
 75 * the memory barrier at (c) is omitted then "r1 = x" can be reordered
 76 * before (b) (although not before (a)), so we get "r1 = 0".  This violates
 77 * the guarantee that membarrier() is supposed to provide.
 78 *
 79 * The timing of the memory barrier at (c) has to ensure that it executes
 80 * after the IPI-induced memory barrier on CPU1.
 81 *
 82 * C) Scheduling userspace thread -> kthread -> userspace thread vs membarrier
 83 *
 84 *           CPU0                            CPU1
 85 *
 86 *           membarrier():
 87 *           a: smp_mb()
 88 *                                           d: switch to kthread (includes mb)
 89 *           b: read rq->curr->mm == NULL
 90 *                                           e: switch to user (includes mb)
 91 *           c: smp_mb()
 92 *
 93 * Using the scenario from (A), we can show that (a) needs to be paired
 94 * with (e). Using the scenario from (B), we can show that (c) needs to
 95 * be paired with (d).
 96 *
 97 * D) exit_mm vs membarrier
 98 *
 99 * Two thread groups are created, A and B.  Thread group B is created by
100 * issuing clone from group A with flag CLONE_VM set, but not CLONE_THREAD.
101 * Let's assume we have a single thread within each thread group (Thread A
102 * and Thread B).  Thread A runs on CPU0, Thread B runs on CPU1.
103 *
104 *           CPU0                            CPU1
105 *
106 *           membarrier():
107 *             a: smp_mb()
108 *                                           exit_mm():
109 *                                             d: smp_mb()
110 *                                             e: current->mm = NULL
111 *             b: read rq->curr->mm == NULL
112 *             c: smp_mb()
113 *
114 * Using scenario (B), we can show that (c) needs to be paired with (d).
115 *
116 * E) kthread_{use,unuse}_mm vs membarrier
117 *
118 *           CPU0                            CPU1
119 *
120 *           membarrier():
121 *           a: smp_mb()
122 *                                           kthread_unuse_mm()
123 *                                             d: smp_mb()
124 *                                             e: current->mm = NULL
125 *           b: read rq->curr->mm == NULL
126 *                                           kthread_use_mm()
127 *                                             f: current->mm = mm
128 *                                             g: smp_mb()
129 *           c: smp_mb()
130 *
131 * Using the scenario from (A), we can show that (a) needs to be paired
132 * with (g). Using the scenario from (B), we can show that (c) needs to
133 * be paired with (d).
134 */
 
135
136/*
137 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
138 * except MEMBARRIER_CMD_QUERY.
139 */
140#ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
141#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK			\
142	(MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE			\
143	| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
144#else
145#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK	0
146#endif
147
148#ifdef CONFIG_RSEQ
149#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK		\
150	(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ			\
151	| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
152#else
153#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK	0
154#endif
155
156#define MEMBARRIER_CMD_BITMASK						\
157	(MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED	\
158	| MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED			\
159	| MEMBARRIER_CMD_PRIVATE_EXPEDITED				\
160	| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED			\
161	| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK		\
162	| MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK			\
163	| MEMBARRIER_CMD_GET_REGISTRATIONS)
164
165static DEFINE_MUTEX(membarrier_ipi_mutex);
166#define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex)
167
168static void ipi_mb(void *info)
169{
170	smp_mb();	/* IPIs should be serializing but paranoid. */
171}
172
173static void ipi_sync_core(void *info)
174{
175	/*
176	 * The smp_mb() in membarrier after all the IPIs is supposed to
177	 * ensure that memory on remote CPUs that occur before the IPI
178	 * become visible to membarrier()'s caller -- see scenario B in
179	 * the big comment at the top of this file.
180	 *
181	 * A sync_core() would provide this guarantee, but
182	 * sync_core_before_usermode() might end up being deferred until
183	 * after membarrier()'s smp_mb().
184	 */
185	smp_mb();	/* IPIs should be serializing but paranoid. */
186
187	sync_core_before_usermode();
188}
189
190static void ipi_rseq(void *info)
191{
192	/*
193	 * Ensure that all stores done by the calling thread are visible
194	 * to the current task before the current task resumes.  We could
195	 * probably optimize this away on most architectures, but by the
196	 * time we've already sent an IPI, the cost of the extra smp_mb()
197	 * is negligible.
198	 */
199	smp_mb();
200	rseq_preempt(current);
201}
202
203static void ipi_sync_rq_state(void *info)
204{
205	struct mm_struct *mm = (struct mm_struct *) info;
206
207	if (current->mm != mm)
208		return;
209	this_cpu_write(runqueues.membarrier_state,
210		       atomic_read(&mm->membarrier_state));
211	/*
212	 * Issue a memory barrier after setting
213	 * MEMBARRIER_STATE_GLOBAL_EXPEDITED in the current runqueue to
214	 * guarantee that no memory access following registration is reordered
215	 * before registration.
216	 */
217	smp_mb();
218}
219
220void membarrier_exec_mmap(struct mm_struct *mm)
221{
222	/*
223	 * Issue a memory barrier before clearing membarrier_state to
224	 * guarantee that no memory access prior to exec is reordered after
225	 * clearing this state.
226	 */
227	smp_mb();
228	atomic_set(&mm->membarrier_state, 0);
229	/*
230	 * Keep the runqueue membarrier_state in sync with this mm
231	 * membarrier_state.
232	 */
233	this_cpu_write(runqueues.membarrier_state, 0);
234}
235
236void membarrier_update_current_mm(struct mm_struct *next_mm)
237{
238	struct rq *rq = this_rq();
239	int membarrier_state = 0;
240
241	if (next_mm)
242		membarrier_state = atomic_read(&next_mm->membarrier_state);
243	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
244		return;
245	WRITE_ONCE(rq->membarrier_state, membarrier_state);
246}
247
248static int membarrier_global_expedited(void)
249{
250	int cpu;
 
251	cpumask_var_t tmpmask;
252
253	if (num_online_cpus() == 1)
254		return 0;
255
256	/*
257	 * Matches memory barriers after rq->curr modification in
258	 * scheduler.
259	 */
260	smp_mb();	/* system call entry is not a mb. */
261
262	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
263		return -ENOMEM;
 
 
 
 
 
 
 
264
265	SERIALIZE_IPI();
266	cpus_read_lock();
267	rcu_read_lock();
268	for_each_online_cpu(cpu) {
269		struct task_struct *p;
270
271		/*
272		 * Skipping the current CPU is OK even through we can be
273		 * migrated at any point. The current CPU, at the point
274		 * where we read raw_smp_processor_id(), is ensured to
275		 * be in program order with respect to the caller
276		 * thread. Therefore, we can skip this CPU from the
277		 * iteration.
278		 */
279		if (cpu == raw_smp_processor_id())
280			continue;
281
282		if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) &
283		    MEMBARRIER_STATE_GLOBAL_EXPEDITED))
284			continue;
285
286		/*
287		 * Skip the CPU if it runs a kernel thread which is not using
288		 * a task mm.
289		 */
290		p = rcu_dereference(cpu_rq(cpu)->curr);
291		if (!p->mm)
292			continue;
293
294		__cpumask_set_cpu(cpu, tmpmask);
 
 
 
295	}
296	rcu_read_unlock();
297
298	preempt_disable();
299	smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
300	preempt_enable();
301
302	free_cpumask_var(tmpmask);
303	cpus_read_unlock();
304
305	/*
306	 * Memory barrier on the caller thread _after_ we finished
307	 * waiting for the last IPI. Matches memory barriers before
308	 * rq->curr modification in scheduler.
309	 */
310	smp_mb();	/* exit from system call is not a mb */
311	return 0;
312}
313
314static int membarrier_private_expedited(int flags, int cpu_id)
315{
 
 
316	cpumask_var_t tmpmask;
317	struct mm_struct *mm = current->mm;
318	smp_call_func_t ipi_func = ipi_mb;
319
320	if (flags == MEMBARRIER_FLAG_SYNC_CORE) {
321		if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
322			return -EINVAL;
323		if (!(atomic_read(&mm->membarrier_state) &
324		      MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
325			return -EPERM;
326		ipi_func = ipi_sync_core;
327		prepare_sync_core_cmd(mm);
328	} else if (flags == MEMBARRIER_FLAG_RSEQ) {
329		if (!IS_ENABLED(CONFIG_RSEQ))
330			return -EINVAL;
331		if (!(atomic_read(&mm->membarrier_state) &
332		      MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY))
333			return -EPERM;
334		ipi_func = ipi_rseq;
335	} else {
336		WARN_ON_ONCE(flags);
337		if (!(atomic_read(&mm->membarrier_state) &
338		      MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
339			return -EPERM;
340	}
341
342	if (flags != MEMBARRIER_FLAG_SYNC_CORE &&
343	    (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1))
344		return 0;
345
346	/*
347	 * Matches memory barriers after rq->curr modification in
348	 * scheduler.
349	 *
350	 * On RISC-V, this barrier pairing is also needed for the
351	 * SYNC_CORE command when switching between processes, cf.
352	 * the inline comments in membarrier_arch_switch_mm().
353	 */
354	smp_mb();	/* system call entry is not a mb. */
355
356	if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
357		return -ENOMEM;
 
 
 
 
 
 
 
358
359	SERIALIZE_IPI();
360	cpus_read_lock();
361
362	if (cpu_id >= 0) {
363		struct task_struct *p;
364
365		if (cpu_id >= nr_cpu_ids || !cpu_online(cpu_id))
366			goto out;
367		rcu_read_lock();
368		p = rcu_dereference(cpu_rq(cpu_id)->curr);
369		if (!p || p->mm != mm) {
370			rcu_read_unlock();
371			goto out;
372		}
373		rcu_read_unlock();
374	} else {
375		int cpu;
376
377		rcu_read_lock();
378		for_each_online_cpu(cpu) {
379			struct task_struct *p;
380
381			p = rcu_dereference(cpu_rq(cpu)->curr);
382			if (p && p->mm == mm)
383				__cpumask_set_cpu(cpu, tmpmask);
 
 
384		}
385		rcu_read_unlock();
386	}
387
388	if (cpu_id >= 0) {
389		/*
390		 * smp_call_function_single() will call ipi_func() if cpu_id
391		 * is the calling CPU.
392		 */
393		smp_call_function_single(cpu_id, ipi_func, NULL, 1);
394	} else {
395		/*
396		 * For regular membarrier, we can save a few cycles by
397		 * skipping the current cpu -- we're about to do smp_mb()
398		 * below, and if we migrate to a different cpu, this cpu
399		 * and the new cpu will execute a full barrier in the
400		 * scheduler.
401		 *
402		 * For SYNC_CORE, we do need a barrier on the current cpu --
403		 * otherwise, if we are migrated and replaced by a different
404		 * task in the same mm just before, during, or after
405		 * membarrier, we will end up with some thread in the mm
406		 * running without a core sync.
407		 *
408		 * For RSEQ, don't rseq_preempt() the caller.  User code
409		 * is not supposed to issue syscalls at all from inside an
410		 * rseq critical section.
411		 */
412		if (flags != MEMBARRIER_FLAG_SYNC_CORE) {
413			preempt_disable();
414			smp_call_function_many(tmpmask, ipi_func, NULL, true);
415			preempt_enable();
416		} else {
417			on_each_cpu_mask(tmpmask, ipi_func, NULL, true);
418		}
419	}
420
421out:
422	if (cpu_id < 0)
423		free_cpumask_var(tmpmask);
 
424	cpus_read_unlock();
425
426	/*
427	 * Memory barrier on the caller thread _after_ we finished
428	 * waiting for the last IPI. Matches memory barriers before
429	 * rq->curr modification in scheduler.
430	 */
431	smp_mb();	/* exit from system call is not a mb */
432
433	return 0;
434}
435
436static int sync_runqueues_membarrier_state(struct mm_struct *mm)
437{
438	int membarrier_state = atomic_read(&mm->membarrier_state);
439	cpumask_var_t tmpmask;
440	int cpu;
441
442	if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) {
443		this_cpu_write(runqueues.membarrier_state, membarrier_state);
444
445		/*
446		 * For single mm user, we can simply issue a memory barrier
447		 * after setting MEMBARRIER_STATE_GLOBAL_EXPEDITED in the
448		 * mm and in the current runqueue to guarantee that no memory
449		 * access following registration is reordered before
450		 * registration.
451		 */
452		smp_mb();
453		return 0;
454	}
455
456	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
457		return -ENOMEM;
458
459	/*
460	 * For mm with multiple users, we need to ensure all future
461	 * scheduler executions will observe @mm's new membarrier
462	 * state.
463	 */
464	synchronize_rcu();
465
466	/*
467	 * For each cpu runqueue, if the task's mm match @mm, ensure that all
468	 * @mm's membarrier state set bits are also set in the runqueue's
469	 * membarrier state. This ensures that a runqueue scheduling
470	 * between threads which are users of @mm has its membarrier state
471	 * updated.
472	 */
473	SERIALIZE_IPI();
474	cpus_read_lock();
475	rcu_read_lock();
476	for_each_online_cpu(cpu) {
477		struct rq *rq = cpu_rq(cpu);
478		struct task_struct *p;
479
480		p = rcu_dereference(rq->curr);
481		if (p && p->mm == mm)
482			__cpumask_set_cpu(cpu, tmpmask);
483	}
484	rcu_read_unlock();
485
486	on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true);
487
488	free_cpumask_var(tmpmask);
489	cpus_read_unlock();
490
491	return 0;
492}
493
494static int membarrier_register_global_expedited(void)
495{
496	struct task_struct *p = current;
497	struct mm_struct *mm = p->mm;
498	int ret;
499
500	if (atomic_read(&mm->membarrier_state) &
501	    MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
502		return 0;
503	atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
504	ret = sync_runqueues_membarrier_state(mm);
505	if (ret)
506		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507	atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
508		  &mm->membarrier_state);
509
510	return 0;
511}
512
513static int membarrier_register_private_expedited(int flags)
514{
515	struct task_struct *p = current;
516	struct mm_struct *mm = p->mm;
517	int ready_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
518	    set_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED,
519	    ret;
520
521	if (flags == MEMBARRIER_FLAG_SYNC_CORE) {
522		if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
523			return -EINVAL;
524		ready_state =
525			MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
526	} else if (flags == MEMBARRIER_FLAG_RSEQ) {
527		if (!IS_ENABLED(CONFIG_RSEQ))
528			return -EINVAL;
529		ready_state =
530			MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY;
531	} else {
532		WARN_ON_ONCE(flags);
533	}
534
535	/*
536	 * We need to consider threads belonging to different thread
537	 * groups, which use the same mm. (CLONE_VM but not
538	 * CLONE_THREAD).
539	 */
540	if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state)
541		return 0;
 
542	if (flags & MEMBARRIER_FLAG_SYNC_CORE)
543		set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE;
544	if (flags & MEMBARRIER_FLAG_RSEQ)
545		set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ;
546	atomic_or(set_state, &mm->membarrier_state);
547	ret = sync_runqueues_membarrier_state(mm);
548	if (ret)
549		return ret;
550	atomic_or(ready_state, &mm->membarrier_state);
 
 
551
552	return 0;
553}
554
555static int membarrier_get_registrations(void)
556{
557	struct task_struct *p = current;
558	struct mm_struct *mm = p->mm;
559	int registrations_mask = 0, membarrier_state, i;
560	static const int states[] = {
561		MEMBARRIER_STATE_GLOBAL_EXPEDITED |
562			MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
563		MEMBARRIER_STATE_PRIVATE_EXPEDITED |
564			MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
565		MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE |
566			MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY,
567		MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ |
568			MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY
569	};
570	static const int registration_cmds[] = {
571		MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED,
572		MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED,
573		MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE,
574		MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ
575	};
576	BUILD_BUG_ON(ARRAY_SIZE(states) != ARRAY_SIZE(registration_cmds));
577
578	membarrier_state = atomic_read(&mm->membarrier_state);
579	for (i = 0; i < ARRAY_SIZE(states); ++i) {
580		if (membarrier_state & states[i]) {
581			registrations_mask |= registration_cmds[i];
582			membarrier_state &= ~states[i];
583		}
584	}
585	WARN_ON_ONCE(membarrier_state != 0);
586	return registrations_mask;
587}
588
589/**
590 * sys_membarrier - issue memory barriers on a set of threads
591 * @cmd:    Takes command values defined in enum membarrier_cmd.
592 * @flags:  Currently needs to be 0 for all commands other than
593 *          MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: in the latter
594 *          case it can be MEMBARRIER_CMD_FLAG_CPU, indicating that @cpu_id
595 *          contains the CPU on which to interrupt (= restart)
596 *          the RSEQ critical section.
597 * @cpu_id: if @flags == MEMBARRIER_CMD_FLAG_CPU, indicates the cpu on which
598 *          RSEQ CS should be interrupted (@cmd must be
599 *          MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ).
600 *
601 * If this system call is not implemented, -ENOSYS is returned. If the
602 * command specified does not exist, not available on the running
603 * kernel, or if the command argument is invalid, this system call
604 * returns -EINVAL. For a given command, with flags argument set to 0,
605 * if this system call returns -ENOSYS or -EINVAL, it is guaranteed to
606 * always return the same value until reboot. In addition, it can return
607 * -ENOMEM if there is not enough memory available to perform the system
608 * call.
609 *
610 * All memory accesses performed in program order from each targeted thread
611 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
612 * the semantic "barrier()" to represent a compiler barrier forcing memory
613 * accesses to be performed in program order across the barrier, and
614 * smp_mb() to represent explicit memory barriers forcing full memory
615 * ordering across the barrier, we have the following ordering table for
616 * each pair of barrier(), sys_membarrier() and smp_mb():
617 *
618 * The pair ordering is detailed as (O: ordered, X: not ordered):
619 *
620 *                        barrier()   smp_mb() sys_membarrier()
621 *        barrier()          X           X            O
622 *        smp_mb()           X           O            O
623 *        sys_membarrier()   O           O            O
624 */
625SYSCALL_DEFINE3(membarrier, int, cmd, unsigned int, flags, int, cpu_id)
626{
627	switch (cmd) {
628	case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
629		if (unlikely(flags && flags != MEMBARRIER_CMD_FLAG_CPU))
630			return -EINVAL;
631		break;
632	default:
633		if (unlikely(flags))
634			return -EINVAL;
635	}
636
637	if (!(flags & MEMBARRIER_CMD_FLAG_CPU))
638		cpu_id = -1;
639
640	switch (cmd) {
641	case MEMBARRIER_CMD_QUERY:
642	{
643		int cmd_mask = MEMBARRIER_CMD_BITMASK;
644
645		if (tick_nohz_full_enabled())
646			cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
647		return cmd_mask;
648	}
649	case MEMBARRIER_CMD_GLOBAL:
650		/* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
651		if (tick_nohz_full_enabled())
652			return -EINVAL;
653		if (num_online_cpus() > 1)
654			synchronize_rcu();
655		return 0;
656	case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
657		return membarrier_global_expedited();
658	case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
659		return membarrier_register_global_expedited();
660	case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
661		return membarrier_private_expedited(0, cpu_id);
662	case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
663		return membarrier_register_private_expedited(0);
664	case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
665		return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE, cpu_id);
666	case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
667		return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
668	case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
669		return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ, cpu_id);
670	case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
671		return membarrier_register_private_expedited(MEMBARRIER_FLAG_RSEQ);
672	case MEMBARRIER_CMD_GET_REGISTRATIONS:
673		return membarrier_get_registrations();
674	default:
675		return -EINVAL;
676	}
677}
v4.17
 
  1/*
  2 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  3 *
  4 * membarrier system call
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or
  9 * (at your option) any later version.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15 */
 16#include "sched.h"
 17
 18/*
 19 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
 20 * except MEMBARRIER_CMD_QUERY.
 21 */
 22#ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
 23#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK			\
 24	(MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE			\
 25	| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
 26#else
 27#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK	0
 28#endif
 29
 
 
 
 
 
 
 
 
 30#define MEMBARRIER_CMD_BITMASK						\
 31	(MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED	\
 32	| MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED			\
 33	| MEMBARRIER_CMD_PRIVATE_EXPEDITED				\
 34	| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED			\
 35	| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
 
 
 
 
 
 36
 37static void ipi_mb(void *info)
 38{
 39	smp_mb();	/* IPIs should be serializing but paranoid. */
 40}
 41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42static int membarrier_global_expedited(void)
 43{
 44	int cpu;
 45	bool fallback = false;
 46	cpumask_var_t tmpmask;
 47
 48	if (num_online_cpus() == 1)
 49		return 0;
 50
 51	/*
 52	 * Matches memory barriers around rq->curr modification in
 53	 * scheduler.
 54	 */
 55	smp_mb();	/* system call entry is not a mb. */
 56
 57	/*
 58	 * Expedited membarrier commands guarantee that they won't
 59	 * block, hence the GFP_NOWAIT allocation flag and fallback
 60	 * implementation.
 61	 */
 62	if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
 63		/* Fallback for OOM. */
 64		fallback = true;
 65	}
 66
 
 67	cpus_read_lock();
 
 68	for_each_online_cpu(cpu) {
 69		struct task_struct *p;
 70
 71		/*
 72		 * Skipping the current CPU is OK even through we can be
 73		 * migrated at any point. The current CPU, at the point
 74		 * where we read raw_smp_processor_id(), is ensured to
 75		 * be in program order with respect to the caller
 76		 * thread. Therefore, we can skip this CPU from the
 77		 * iteration.
 78		 */
 79		if (cpu == raw_smp_processor_id())
 80			continue;
 81
 82		rcu_read_lock();
 83		p = task_rcu_dereference(&cpu_rq(cpu)->curr);
 84		if (p && p->mm && (atomic_read(&p->mm->membarrier_state) &
 85				   MEMBARRIER_STATE_GLOBAL_EXPEDITED)) {
 86			if (!fallback)
 87				__cpumask_set_cpu(cpu, tmpmask);
 88			else
 89				smp_call_function_single(cpu, ipi_mb, NULL, 1);
 90		}
 91		rcu_read_unlock();
 92	}
 93	if (!fallback) {
 94		preempt_disable();
 95		smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
 96		preempt_enable();
 97		free_cpumask_var(tmpmask);
 98	}
 
 
 
 
 
 
 
 99	cpus_read_unlock();
100
101	/*
102	 * Memory barrier on the caller thread _after_ we finished
103	 * waiting for the last IPI. Matches memory barriers around
104	 * rq->curr modification in scheduler.
105	 */
106	smp_mb();	/* exit from system call is not a mb */
107	return 0;
108}
109
110static int membarrier_private_expedited(int flags)
111{
112	int cpu;
113	bool fallback = false;
114	cpumask_var_t tmpmask;
 
 
115
116	if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
117		if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
118			return -EINVAL;
119		if (!(atomic_read(&current->mm->membarrier_state) &
120		      MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
121			return -EPERM;
 
 
 
 
 
 
 
 
 
122	} else {
123		if (!(atomic_read(&current->mm->membarrier_state) &
 
124		      MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
125			return -EPERM;
126	}
127
128	if (num_online_cpus() == 1)
 
129		return 0;
130
131	/*
132	 * Matches memory barriers around rq->curr modification in
133	 * scheduler.
 
 
 
 
134	 */
135	smp_mb();	/* system call entry is not a mb. */
136
137	/*
138	 * Expedited membarrier commands guarantee that they won't
139	 * block, hence the GFP_NOWAIT allocation flag and fallback
140	 * implementation.
141	 */
142	if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
143		/* Fallback for OOM. */
144		fallback = true;
145	}
146
 
147	cpus_read_lock();
148	for_each_online_cpu(cpu) {
 
149		struct task_struct *p;
150
151		/*
152		 * Skipping the current CPU is OK even through we can be
153		 * migrated at any point. The current CPU, at the point
154		 * where we read raw_smp_processor_id(), is ensured to
155		 * be in program order with respect to the caller
156		 * thread. Therefore, we can skip this CPU from the
157		 * iteration.
158		 */
159		if (cpu == raw_smp_processor_id())
160			continue;
 
 
161		rcu_read_lock();
162		p = task_rcu_dereference(&cpu_rq(cpu)->curr);
163		if (p && p->mm == current->mm) {
164			if (!fallback)
 
 
165				__cpumask_set_cpu(cpu, tmpmask);
166			else
167				smp_call_function_single(cpu, ipi_mb, NULL, 1);
168		}
169		rcu_read_unlock();
170	}
171	if (!fallback) {
172		preempt_disable();
173		smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
174		preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175		free_cpumask_var(tmpmask);
176	}
177	cpus_read_unlock();
178
179	/*
180	 * Memory barrier on the caller thread _after_ we finished
181	 * waiting for the last IPI. Matches memory barriers around
182	 * rq->curr modification in scheduler.
183	 */
184	smp_mb();	/* exit from system call is not a mb */
185
186	return 0;
187}
188
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189static int membarrier_register_global_expedited(void)
190{
191	struct task_struct *p = current;
192	struct mm_struct *mm = p->mm;
 
193
194	if (atomic_read(&mm->membarrier_state) &
195	    MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
196		return 0;
197	atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
198	if (atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1) {
199		/*
200		 * For single mm user, single threaded process, we can
201		 * simply issue a memory barrier after setting
202		 * MEMBARRIER_STATE_GLOBAL_EXPEDITED to guarantee that
203		 * no memory access following registration is reordered
204		 * before registration.
205		 */
206		smp_mb();
207	} else {
208		/*
209		 * For multi-mm user threads, we need to ensure all
210		 * future scheduler executions will observe the new
211		 * thread flag state for this mm.
212		 */
213		synchronize_sched();
214	}
215	atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
216		  &mm->membarrier_state);
217
218	return 0;
219}
220
221static int membarrier_register_private_expedited(int flags)
222{
223	struct task_struct *p = current;
224	struct mm_struct *mm = p->mm;
225	int state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY;
 
 
226
227	if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
228		if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
229			return -EINVAL;
230		state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
 
 
 
 
 
 
 
 
231	}
232
233	/*
234	 * We need to consider threads belonging to different thread
235	 * groups, which use the same mm. (CLONE_VM but not
236	 * CLONE_THREAD).
237	 */
238	if (atomic_read(&mm->membarrier_state) & state)
239		return 0;
240	atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
241	if (flags & MEMBARRIER_FLAG_SYNC_CORE)
242		atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE,
243			  &mm->membarrier_state);
244	if (!(atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1)) {
245		/*
246		 * Ensure all future scheduler executions will observe the
247		 * new thread flag state for this process.
248		 */
249		synchronize_sched();
250	}
251	atomic_or(state, &mm->membarrier_state);
252
253	return 0;
254}
255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256/**
257 * sys_membarrier - issue memory barriers on a set of threads
258 * @cmd:   Takes command values defined in enum membarrier_cmd.
259 * @flags: Currently needs to be 0. For future extensions.
 
 
 
 
 
 
 
260 *
261 * If this system call is not implemented, -ENOSYS is returned. If the
262 * command specified does not exist, not available on the running
263 * kernel, or if the command argument is invalid, this system call
264 * returns -EINVAL. For a given command, with flags argument set to 0,
265 * this system call is guaranteed to always return the same value until
266 * reboot.
 
 
267 *
268 * All memory accesses performed in program order from each targeted thread
269 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
270 * the semantic "barrier()" to represent a compiler barrier forcing memory
271 * accesses to be performed in program order across the barrier, and
272 * smp_mb() to represent explicit memory barriers forcing full memory
273 * ordering across the barrier, we have the following ordering table for
274 * each pair of barrier(), sys_membarrier() and smp_mb():
275 *
276 * The pair ordering is detailed as (O: ordered, X: not ordered):
277 *
278 *                        barrier()   smp_mb() sys_membarrier()
279 *        barrier()          X           X            O
280 *        smp_mb()           X           O            O
281 *        sys_membarrier()   O           O            O
282 */
283SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
284{
285	if (unlikely(flags))
286		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
287	switch (cmd) {
288	case MEMBARRIER_CMD_QUERY:
289	{
290		int cmd_mask = MEMBARRIER_CMD_BITMASK;
291
292		if (tick_nohz_full_enabled())
293			cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
294		return cmd_mask;
295	}
296	case MEMBARRIER_CMD_GLOBAL:
297		/* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
298		if (tick_nohz_full_enabled())
299			return -EINVAL;
300		if (num_online_cpus() > 1)
301			synchronize_sched();
302		return 0;
303	case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
304		return membarrier_global_expedited();
305	case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
306		return membarrier_register_global_expedited();
307	case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
308		return membarrier_private_expedited(0);
309	case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
310		return membarrier_register_private_expedited(0);
311	case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
312		return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
313	case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
314		return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
 
 
 
 
 
 
315	default:
316		return -EINVAL;
317	}
318}