Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Based upon linux/arch/m68k/mm/sun3mmu.c
  4 * Based upon linux/arch/ppc/mm/mmu_context.c
  5 *
  6 * Implementations of mm routines specific to the Coldfire MMU.
  7 *
  8 * Copyright (c) 2008 Freescale Semiconductor, Inc.
  9 */
 10
 11#include <linux/kernel.h>
 12#include <linux/types.h>
 13#include <linux/mm.h>
 14#include <linux/init.h>
 15#include <linux/string.h>
 16#include <linux/memblock.h>
 17
 18#include <asm/setup.h>
 19#include <asm/page.h>
 
 20#include <asm/mmu_context.h>
 21#include <asm/mcf_pgalloc.h>
 22#include <asm/tlbflush.h>
 23#include <asm/pgalloc.h>
 24
 25#define KMAPAREA(x)	((x >= VMALLOC_START) && (x < KMAP_END))
 26
 27mm_context_t next_mmu_context;
 28unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
 29atomic_t nr_free_contexts;
 30struct mm_struct *context_mm[LAST_CONTEXT+1];
 31unsigned long num_pages;
 32
 33/*
 34 * ColdFire paging_init derived from sun3.
 35 */
 36void __init paging_init(void)
 37{
 38	pgd_t *pg_dir;
 39	pte_t *pg_table;
 40	unsigned long address, size;
 41	unsigned long next_pgtable, bootmem_end;
 42	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
 
 43	int i;
 44
 45	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 46	if (!empty_zero_page)
 47		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 48		      __func__, PAGE_SIZE, PAGE_SIZE);
 49
 50	pg_dir = swapper_pg_dir;
 51	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 52
 53	size = num_pages * sizeof(pte_t);
 54	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
 55	next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
 56	if (!next_pgtable)
 57		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 58		      __func__, size, PAGE_SIZE);
 59
 60	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
 61	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
 62
 63	address = PAGE_OFFSET;
 64	while (address < (unsigned long)high_memory) {
 65		pg_table = (pte_t *) next_pgtable;
 66		next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
 67		pgd_val(*pg_dir) = (unsigned long) pg_table;
 68		pg_dir++;
 69
 70		/* now change pg_table to kernel virtual addresses */
 71		for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
 72			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
 73			if (address >= (unsigned long) high_memory)
 74				pte_val(pte) = 0;
 75
 76			set_pte(pg_table, pte);
 77			address += PAGE_SIZE;
 78		}
 79	}
 80
 81	current->mm = NULL;
 82	max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
 83	free_area_init(max_zone_pfn);
 
 
 
 84}
 85
 86int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
 87{
 88	unsigned long flags, mmuar, mmutr;
 89	struct mm_struct *mm;
 90	pgd_t *pgd;
 91	p4d_t *p4d;
 92	pud_t *pud;
 93	pmd_t *pmd;
 94	pte_t *pte;
 95	int asid;
 96
 97	local_irq_save(flags);
 98
 99	mmuar = (dtlb) ? mmu_read(MMUAR) :
100		regs->pc + (extension_word * sizeof(long));
101
102	mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
103	if (!mm) {
104		local_irq_restore(flags);
105		return -1;
106	}
107
108	pgd = pgd_offset(mm, mmuar);
109	if (pgd_none(*pgd))  {
110		local_irq_restore(flags);
111		return -1;
112	}
113
114	p4d = p4d_offset(pgd, mmuar);
115	if (p4d_none(*p4d)) {
116		local_irq_restore(flags);
117		return -1;
118	}
119
120	pud = pud_offset(p4d, mmuar);
121	if (pud_none(*pud)) {
122		local_irq_restore(flags);
123		return -1;
124	}
125
126	pmd = pmd_offset(pud, mmuar);
127	if (pmd_none(*pmd)) {
128		local_irq_restore(flags);
129		return -1;
130	}
131
132	pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
133				: pte_offset_map(pmd, mmuar);
134	if (pte_none(*pte) || !pte_present(*pte)) {
135		local_irq_restore(flags);
136		return -1;
137	}
138
139	if (write) {
140		if (!pte_write(*pte)) {
141			local_irq_restore(flags);
142			return -1;
143		}
144		set_pte(pte, pte_mkdirty(*pte));
145	}
146
147	set_pte(pte, pte_mkyoung(*pte));
148	asid = mm->context & 0xff;
149	if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
150		set_pte(pte, pte_wrprotect(*pte));
151
152	mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
153	if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
154		mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
155	mmu_write(MMUTR, mmutr);
156
157	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
158		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
159
160	if (dtlb)
161		mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
162	else
163		mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
164
165	local_irq_restore(flags);
166	return 0;
167}
168
169void __init cf_bootmem_alloc(void)
170{
171	unsigned long memstart;
172
173	/* _rambase and _ramend will be naturally page aligned */
174	m68k_memory[0].addr = _rambase;
175	m68k_memory[0].size = _ramend - _rambase;
176
177	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
178			  MEMBLOCK_NONE);
179
180	/* compute total pages in system */
181	num_pages = PFN_DOWN(_ramend - _rambase);
182
183	/* page numbers */
184	memstart = PAGE_ALIGN(_ramstart);
185	min_low_pfn = PFN_DOWN(_rambase);
186	max_pfn = max_low_pfn = PFN_DOWN(_ramend);
187	high_memory = (void *)_ramend;
188
189	/* Reserve kernel text/data/bss */
190	memblock_reserve(_rambase, memstart - _rambase);
191
192	m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
193	module_fixup(NULL, __start_fixup, __stop_fixup);
194
195	/* setup node data */
196	m68k_setup_node(0);
197}
198
199/*
200 * Initialize the context management stuff.
201 * The following was taken from arch/ppc/mmu_context.c
202 */
203void __init cf_mmu_context_init(void)
204{
205	/*
206	 * Some processors have too few contexts to reserve one for
207	 * init_mm, and require using context 0 for a normal task.
208	 * Other processors reserve the use of context zero for the kernel.
209	 * This code assumes FIRST_CONTEXT < 32.
210	 */
211	context_map[0] = (1 << FIRST_CONTEXT) - 1;
212	next_mmu_context = FIRST_CONTEXT;
213	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
214}
215
216/*
217 * Steal a context from a task that has one at the moment.
 
 
 
 
 
218 * This isn't an LRU system, it just frees up each context in
219 * turn (sort-of pseudo-random replacement :).  This would be the
220 * place to implement an LRU scheme if anyone was motivated to do it.
221 *  -- paulus
222 */
223void steal_context(void)
224{
225	struct mm_struct *mm;
226	/*
227	 * free up context `next_mmu_context'
228	 * if we shouldn't free context 0, don't...
229	 */
230	if (next_mmu_context < FIRST_CONTEXT)
231		next_mmu_context = FIRST_CONTEXT;
232	mm = context_mm[next_mmu_context];
233	flush_tlb_mm(mm);
234	destroy_context(mm);
235}
236
237static const pgprot_t protection_map[16] = {
238	[VM_NONE]					= PAGE_NONE,
239	[VM_READ]					= __pgprot(CF_PAGE_VALID |
240								   CF_PAGE_ACCESSED |
241								   CF_PAGE_READABLE),
242	[VM_WRITE]					= __pgprot(CF_PAGE_VALID |
243								   CF_PAGE_ACCESSED |
244								   CF_PAGE_WRITABLE),
245	[VM_WRITE | VM_READ]				= __pgprot(CF_PAGE_VALID |
246								   CF_PAGE_ACCESSED |
247								   CF_PAGE_READABLE |
248								   CF_PAGE_WRITABLE),
249	[VM_EXEC]					= __pgprot(CF_PAGE_VALID |
250								   CF_PAGE_ACCESSED |
251								   CF_PAGE_EXEC),
252	[VM_EXEC | VM_READ]				= __pgprot(CF_PAGE_VALID |
253								   CF_PAGE_ACCESSED |
254								   CF_PAGE_READABLE |
255								   CF_PAGE_EXEC),
256	[VM_EXEC | VM_WRITE]				= __pgprot(CF_PAGE_VALID |
257								   CF_PAGE_ACCESSED |
258								   CF_PAGE_WRITABLE |
259								   CF_PAGE_EXEC),
260	[VM_EXEC | VM_WRITE | VM_READ]			=  __pgprot(CF_PAGE_VALID |
261								    CF_PAGE_ACCESSED |
262								    CF_PAGE_READABLE |
263								    CF_PAGE_WRITABLE |
264								    CF_PAGE_EXEC),
265	[VM_SHARED]					= PAGE_NONE,
266	[VM_SHARED | VM_READ]				= __pgprot(CF_PAGE_VALID |
267								   CF_PAGE_ACCESSED |
268								   CF_PAGE_READABLE),
269	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
270	[VM_SHARED | VM_WRITE | VM_READ]		= __pgprot(CF_PAGE_VALID |
271								   CF_PAGE_ACCESSED |
272								   CF_PAGE_READABLE |
273								   CF_PAGE_SHARED),
274	[VM_SHARED | VM_EXEC]				= __pgprot(CF_PAGE_VALID |
275								   CF_PAGE_ACCESSED |
276								   CF_PAGE_EXEC),
277	[VM_SHARED | VM_EXEC | VM_READ]			= __pgprot(CF_PAGE_VALID |
278								   CF_PAGE_ACCESSED |
279								   CF_PAGE_READABLE |
280								   CF_PAGE_EXEC),
281	[VM_SHARED | VM_EXEC | VM_WRITE]		= __pgprot(CF_PAGE_VALID |
282								   CF_PAGE_ACCESSED |
283								   CF_PAGE_SHARED |
284								   CF_PAGE_EXEC),
285	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= __pgprot(CF_PAGE_VALID |
286								   CF_PAGE_ACCESSED |
287								   CF_PAGE_READABLE |
288								   CF_PAGE_SHARED |
289								   CF_PAGE_EXEC)
290};
291DECLARE_VM_GET_PAGE_PROT
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Based upon linux/arch/m68k/mm/sun3mmu.c
  4 * Based upon linux/arch/ppc/mm/mmu_context.c
  5 *
  6 * Implementations of mm routines specific to the Coldfire MMU.
  7 *
  8 * Copyright (c) 2008 Freescale Semiconductor, Inc.
  9 */
 10
 11#include <linux/kernel.h>
 12#include <linux/types.h>
 13#include <linux/mm.h>
 14#include <linux/init.h>
 15#include <linux/string.h>
 16#include <linux/memblock.h>
 17
 18#include <asm/setup.h>
 19#include <asm/page.h>
 20#include <asm/pgtable.h>
 21#include <asm/mmu_context.h>
 22#include <asm/mcf_pgalloc.h>
 23#include <asm/tlbflush.h>
 
 24
 25#define KMAPAREA(x)	((x >= VMALLOC_START) && (x < KMAP_END))
 26
 27mm_context_t next_mmu_context;
 28unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
 29atomic_t nr_free_contexts;
 30struct mm_struct *context_mm[LAST_CONTEXT+1];
 31unsigned long num_pages;
 32
 33/*
 34 * ColdFire paging_init derived from sun3.
 35 */
 36void __init paging_init(void)
 37{
 38	pgd_t *pg_dir;
 39	pte_t *pg_table;
 40	unsigned long address, size;
 41	unsigned long next_pgtable, bootmem_end;
 42	unsigned long zones_size[MAX_NR_ZONES];
 43	enum zone_type zone;
 44	int i;
 45
 46	empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 47	if (!empty_zero_page)
 48		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 49		      __func__, PAGE_SIZE, PAGE_SIZE);
 50
 51	pg_dir = swapper_pg_dir;
 52	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 53
 54	size = num_pages * sizeof(pte_t);
 55	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
 56	next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
 57	if (!next_pgtable)
 58		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 59		      __func__, size, PAGE_SIZE);
 60
 61	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
 62	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
 63
 64	address = PAGE_OFFSET;
 65	while (address < (unsigned long)high_memory) {
 66		pg_table = (pte_t *) next_pgtable;
 67		next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
 68		pgd_val(*pg_dir) = (unsigned long) pg_table;
 69		pg_dir++;
 70
 71		/* now change pg_table to kernel virtual addresses */
 72		for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
 73			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
 74			if (address >= (unsigned long) high_memory)
 75				pte_val(pte) = 0;
 76
 77			set_pte(pg_table, pte);
 78			address += PAGE_SIZE;
 79		}
 80	}
 81
 82	current->mm = NULL;
 83
 84	for (zone = 0; zone < MAX_NR_ZONES; zone++)
 85		zones_size[zone] = 0x0;
 86	zones_size[ZONE_DMA] = num_pages;
 87	free_area_init(zones_size);
 88}
 89
 90int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
 91{
 92	unsigned long flags, mmuar, mmutr;
 93	struct mm_struct *mm;
 94	pgd_t *pgd;
 
 
 95	pmd_t *pmd;
 96	pte_t *pte;
 97	int asid;
 98
 99	local_irq_save(flags);
100
101	mmuar = (dtlb) ? mmu_read(MMUAR) :
102		regs->pc + (extension_word * sizeof(long));
103
104	mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
105	if (!mm) {
106		local_irq_restore(flags);
107		return -1;
108	}
109
110	pgd = pgd_offset(mm, mmuar);
111	if (pgd_none(*pgd))  {
112		local_irq_restore(flags);
113		return -1;
114	}
115
116	pmd = pmd_offset(pgd, mmuar);
 
 
 
 
 
 
 
 
 
 
 
 
117	if (pmd_none(*pmd)) {
118		local_irq_restore(flags);
119		return -1;
120	}
121
122	pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
123				: pte_offset_map(pmd, mmuar);
124	if (pte_none(*pte) || !pte_present(*pte)) {
125		local_irq_restore(flags);
126		return -1;
127	}
128
129	if (write) {
130		if (!pte_write(*pte)) {
131			local_irq_restore(flags);
132			return -1;
133		}
134		set_pte(pte, pte_mkdirty(*pte));
135	}
136
137	set_pte(pte, pte_mkyoung(*pte));
138	asid = mm->context & 0xff;
139	if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
140		set_pte(pte, pte_wrprotect(*pte));
141
142	mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
143	if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
144		mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
145	mmu_write(MMUTR, mmutr);
146
147	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
148		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
149
150	if (dtlb)
151		mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
152	else
153		mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
154
155	local_irq_restore(flags);
156	return 0;
157}
158
159void __init cf_bootmem_alloc(void)
160{
161	unsigned long memstart;
162
163	/* _rambase and _ramend will be naturally page aligned */
164	m68k_memory[0].addr = _rambase;
165	m68k_memory[0].size = _ramend - _rambase;
166
167	memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
 
168
169	/* compute total pages in system */
170	num_pages = PFN_DOWN(_ramend - _rambase);
171
172	/* page numbers */
173	memstart = PAGE_ALIGN(_ramstart);
174	min_low_pfn = PFN_DOWN(_rambase);
175	max_pfn = max_low_pfn = PFN_DOWN(_ramend);
176	high_memory = (void *)_ramend;
177
178	/* Reserve kernel text/data/bss */
179	memblock_reserve(_rambase, memstart - _rambase);
180
181	m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
182	module_fixup(NULL, __start_fixup, __stop_fixup);
183
184	/* setup node data */
185	m68k_setup_node(0);
186}
187
188/*
189 * Initialize the context management stuff.
190 * The following was taken from arch/ppc/mmu_context.c
191 */
192void __init cf_mmu_context_init(void)
193{
194	/*
195	 * Some processors have too few contexts to reserve one for
196	 * init_mm, and require using context 0 for a normal task.
197	 * Other processors reserve the use of context zero for the kernel.
198	 * This code assumes FIRST_CONTEXT < 32.
199	 */
200	context_map[0] = (1 << FIRST_CONTEXT) - 1;
201	next_mmu_context = FIRST_CONTEXT;
202	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
203}
204
205/*
206 * Steal a context from a task that has one at the moment.
207 * This is only used on 8xx and 4xx and we presently assume that
208 * they don't do SMP.  If they do then thicfpgalloc.hs will have to check
209 * whether the MM we steal is in use.
210 * We also assume that this is only used on systems that don't
211 * use an MMU hash table - this is true for 8xx and 4xx.
212 * This isn't an LRU system, it just frees up each context in
213 * turn (sort-of pseudo-random replacement :).  This would be the
214 * place to implement an LRU scheme if anyone was motivated to do it.
215 *  -- paulus
216 */
217void steal_context(void)
218{
219	struct mm_struct *mm;
220	/*
221	 * free up context `next_mmu_context'
222	 * if we shouldn't free context 0, don't...
223	 */
224	if (next_mmu_context < FIRST_CONTEXT)
225		next_mmu_context = FIRST_CONTEXT;
226	mm = context_mm[next_mmu_context];
227	flush_tlb_mm(mm);
228	destroy_context(mm);
229}
230