Linux Audio

Check our new training course

Loading...
v5.9
 1// SPDX-License-Identifier: GPL-2.0-only
 2/*
 3 * Copyright (C) 2017 SiFive
 4 */
 5
 
 
 
 6#include <asm/cacheflush.h>
 7
 8#ifdef CONFIG_SMP
 9
10#include <asm/sbi.h>
11
12static void ipi_remote_fence_i(void *info)
13{
14	return local_flush_icache_all();
15}
16
17void flush_icache_all(void)
18{
19	if (IS_ENABLED(CONFIG_RISCV_SBI))
 
 
20		sbi_remote_fence_i(NULL);
21	else
22		on_each_cpu(ipi_remote_fence_i, NULL, 1);
23}
24EXPORT_SYMBOL(flush_icache_all);
25
26/*
27 * Performs an icache flush for the given MM context.  RISC-V has no direct
28 * mechanism for instruction cache shoot downs, so instead we send an IPI that
29 * informs the remote harts they need to flush their local instruction caches.
30 * To avoid pathologically slow behavior in a common case (a bunch of
31 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
32 * IPIs for harts that are not currently executing a MM context and instead
33 * schedule a deferred local instruction cache flush to be performed before
34 * execution resumes on each hart.
35 */
36void flush_icache_mm(struct mm_struct *mm, bool local)
37{
38	unsigned int cpu;
39	cpumask_t others, *mask;
40
41	preempt_disable();
42
43	/* Mark every hart's icache as needing a flush for this MM. */
44	mask = &mm->context.icache_stale_mask;
45	cpumask_setall(mask);
46	/* Flush this hart's I$ now, and mark it as flushed. */
47	cpu = smp_processor_id();
48	cpumask_clear_cpu(cpu, mask);
49	local_flush_icache_all();
50
51	/*
52	 * Flush the I$ of other harts concurrently executing, and mark them as
53	 * flushed.
54	 */
55	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
56	local |= cpumask_empty(&others);
57	if (mm == current->active_mm && local) {
58		/*
59		 * It's assumed that at least one strongly ordered operation is
60		 * performed on this hart between setting a hart's cpumask bit
61		 * and scheduling this MM context on that hart.  Sending an SBI
62		 * remote message will do this, but in the case where no
63		 * messages are sent we still need to order this hart's writes
64		 * with flush_icache_deferred().
65		 */
66		smp_mb();
67	} else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
68		cpumask_t hartid_mask;
69
70		riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
71		sbi_remote_fence_i(cpumask_bits(&hartid_mask));
72	} else {
73		on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
74	}
75
76	preempt_enable();
77}
78
79#endif /* CONFIG_SMP */
80
81#ifdef CONFIG_MMU
82void flush_icache_pte(pte_t pte)
83{
84	struct page *page = pte_page(pte);
85
86	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
87		flush_icache_all();
 
 
88}
89#endif /* CONFIG_MMU */
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017 SiFive
  4 */
  5
  6#include <linux/acpi.h>
  7#include <linux/of.h>
  8#include <asm/acpi.h>
  9#include <asm/cacheflush.h>
 10
 11#ifdef CONFIG_SMP
 12
 13#include <asm/sbi.h>
 14
 15static void ipi_remote_fence_i(void *info)
 16{
 17	return local_flush_icache_all();
 18}
 19
 20void flush_icache_all(void)
 21{
 22	local_flush_icache_all();
 23
 24	if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence())
 25		sbi_remote_fence_i(NULL);
 26	else
 27		on_each_cpu(ipi_remote_fence_i, NULL, 1);
 28}
 29EXPORT_SYMBOL(flush_icache_all);
 30
 31/*
 32 * Performs an icache flush for the given MM context.  RISC-V has no direct
 33 * mechanism for instruction cache shoot downs, so instead we send an IPI that
 34 * informs the remote harts they need to flush their local instruction caches.
 35 * To avoid pathologically slow behavior in a common case (a bunch of
 36 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
 37 * IPIs for harts that are not currently executing a MM context and instead
 38 * schedule a deferred local instruction cache flush to be performed before
 39 * execution resumes on each hart.
 40 */
 41void flush_icache_mm(struct mm_struct *mm, bool local)
 42{
 43	unsigned int cpu;
 44	cpumask_t others, *mask;
 45
 46	preempt_disable();
 47
 48	/* Mark every hart's icache as needing a flush for this MM. */
 49	mask = &mm->context.icache_stale_mask;
 50	cpumask_setall(mask);
 51	/* Flush this hart's I$ now, and mark it as flushed. */
 52	cpu = smp_processor_id();
 53	cpumask_clear_cpu(cpu, mask);
 54	local_flush_icache_all();
 55
 56	/*
 57	 * Flush the I$ of other harts concurrently executing, and mark them as
 58	 * flushed.
 59	 */
 60	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
 61	local |= cpumask_empty(&others);
 62	if (mm == current->active_mm && local) {
 63		/*
 64		 * It's assumed that at least one strongly ordered operation is
 65		 * performed on this hart between setting a hart's cpumask bit
 66		 * and scheduling this MM context on that hart.  Sending an SBI
 67		 * remote message will do this, but in the case where no
 68		 * messages are sent we still need to order this hart's writes
 69		 * with flush_icache_deferred().
 70		 */
 71		smp_mb();
 72	} else if (IS_ENABLED(CONFIG_RISCV_SBI) &&
 73		   !riscv_use_ipi_for_rfence()) {
 74		sbi_remote_fence_i(&others);
 
 
 75	} else {
 76		on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
 77	}
 78
 79	preempt_enable();
 80}
 81
 82#endif /* CONFIG_SMP */
 83
 84#ifdef CONFIG_MMU
 85void flush_icache_pte(pte_t pte)
 86{
 87	struct folio *folio = page_folio(pte_page(pte));
 88
 89	if (!test_bit(PG_dcache_clean, &folio->flags)) {
 90		flush_icache_all();
 91		set_bit(PG_dcache_clean, &folio->flags);
 92	}
 93}
 94#endif /* CONFIG_MMU */
 95
 96unsigned int riscv_cbom_block_size;
 97EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
 98
 99unsigned int riscv_cboz_block_size;
100EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
101
102static void __init cbo_get_block_size(struct device_node *node,
103				      const char *name, u32 *block_size,
104				      unsigned long *first_hartid)
105{
106	unsigned long hartid;
107	u32 val;
108
109	if (riscv_of_processor_hartid(node, &hartid))
110		return;
111
112	if (of_property_read_u32(node, name, &val))
113		return;
114
115	if (!*block_size) {
116		*block_size = val;
117		*first_hartid = hartid;
118	} else if (*block_size != val) {
119		pr_warn("%s mismatched between harts %lu and %lu\n",
120			name, *first_hartid, hartid);
121	}
122}
123
124void __init riscv_init_cbo_blocksizes(void)
125{
126	unsigned long cbom_hartid, cboz_hartid;
127	u32 cbom_block_size = 0, cboz_block_size = 0;
128	struct device_node *node;
129	struct acpi_table_header *rhct;
130	acpi_status status;
131
132	if (acpi_disabled) {
133		for_each_of_cpu_node(node) {
134			/* set block-size for cbom and/or cboz extension if available */
135			cbo_get_block_size(node, "riscv,cbom-block-size",
136					   &cbom_block_size, &cbom_hartid);
137			cbo_get_block_size(node, "riscv,cboz-block-size",
138					   &cboz_block_size, &cboz_hartid);
139		}
140	} else {
141		status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
142		if (ACPI_FAILURE(status))
143			return;
144
145		acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL);
146		acpi_put_table((struct acpi_table_header *)rhct);
147	}
148
149	if (cbom_block_size)
150		riscv_cbom_block_size = cbom_block_size;
151
152	if (cboz_block_size)
153		riscv_cboz_block_size = cboz_block_size;
154}