Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Based upon linux/arch/m68k/mm/sun3mmu.c
4 * Based upon linux/arch/ppc/mm/mmu_context.c
5 *
6 * Implementations of mm routines specific to the Coldfire MMU.
7 *
8 * Copyright (c) 2008 Freescale Semiconductor, Inc.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/string.h>
16#include <linux/memblock.h>
17
18#include <asm/setup.h>
19#include <asm/page.h>
20#include <asm/mmu_context.h>
21#include <asm/mcf_pgalloc.h>
22#include <asm/tlbflush.h>
23#include <asm/pgalloc.h>
24
25#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
26
27mm_context_t next_mmu_context;
28unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
29atomic_t nr_free_contexts;
30struct mm_struct *context_mm[LAST_CONTEXT+1];
31unsigned long num_pages;
32
33/*
34 * ColdFire paging_init derived from sun3.
35 */
36void __init paging_init(void)
37{
38 pgd_t *pg_dir;
39 pte_t *pg_table;
40 unsigned long address, size;
41 unsigned long next_pgtable, bootmem_end;
42 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
43 int i;
44
45 empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
46 if (!empty_zero_page)
47 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
48 __func__, PAGE_SIZE, PAGE_SIZE);
49
50 pg_dir = swapper_pg_dir;
51 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
52
53 size = num_pages * sizeof(pte_t);
54 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
55 next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
56 if (!next_pgtable)
57 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
58 __func__, size, PAGE_SIZE);
59
60 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
61 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
62
63 address = PAGE_OFFSET;
64 while (address < (unsigned long)high_memory) {
65 pg_table = (pte_t *) next_pgtable;
66 next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
67 pgd_val(*pg_dir) = (unsigned long) pg_table;
68 pg_dir++;
69
70 /* now change pg_table to kernel virtual addresses */
71 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
72 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
73 if (address >= (unsigned long) high_memory)
74 pte_val(pte) = 0;
75
76 set_pte(pg_table, pte);
77 address += PAGE_SIZE;
78 }
79 }
80
81 current->mm = NULL;
82 max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
83 free_area_init(max_zone_pfn);
84}
85
86int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
87{
88 unsigned long flags, mmuar, mmutr;
89 struct mm_struct *mm;
90 pgd_t *pgd;
91 p4d_t *p4d;
92 pud_t *pud;
93 pmd_t *pmd;
94 pte_t *pte;
95 int asid;
96
97 local_irq_save(flags);
98
99 mmuar = (dtlb) ? mmu_read(MMUAR) :
100 regs->pc + (extension_word * sizeof(long));
101
102 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
103 if (!mm) {
104 local_irq_restore(flags);
105 return -1;
106 }
107
108 pgd = pgd_offset(mm, mmuar);
109 if (pgd_none(*pgd)) {
110 local_irq_restore(flags);
111 return -1;
112 }
113
114 p4d = p4d_offset(pgd, mmuar);
115 if (p4d_none(*p4d)) {
116 local_irq_restore(flags);
117 return -1;
118 }
119
120 pud = pud_offset(p4d, mmuar);
121 if (pud_none(*pud)) {
122 local_irq_restore(flags);
123 return -1;
124 }
125
126 pmd = pmd_offset(pud, mmuar);
127 if (pmd_none(*pmd)) {
128 local_irq_restore(flags);
129 return -1;
130 }
131
132 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
133 : pte_offset_map(pmd, mmuar);
134 if (pte_none(*pte) || !pte_present(*pte)) {
135 local_irq_restore(flags);
136 return -1;
137 }
138
139 if (write) {
140 if (!pte_write(*pte)) {
141 local_irq_restore(flags);
142 return -1;
143 }
144 set_pte(pte, pte_mkdirty(*pte));
145 }
146
147 set_pte(pte, pte_mkyoung(*pte));
148 asid = mm->context & 0xff;
149 if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
150 set_pte(pte, pte_wrprotect(*pte));
151
152 mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
153 if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
154 mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
155 mmu_write(MMUTR, mmutr);
156
157 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
158 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
159
160 if (dtlb)
161 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
162 else
163 mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
164
165 local_irq_restore(flags);
166 return 0;
167}
168
169void __init cf_bootmem_alloc(void)
170{
171 unsigned long memstart;
172
173 /* _rambase and _ramend will be naturally page aligned */
174 m68k_memory[0].addr = _rambase;
175 m68k_memory[0].size = _ramend - _rambase;
176
177 memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
178
179 /* compute total pages in system */
180 num_pages = PFN_DOWN(_ramend - _rambase);
181
182 /* page numbers */
183 memstart = PAGE_ALIGN(_ramstart);
184 min_low_pfn = PFN_DOWN(_rambase);
185 max_pfn = max_low_pfn = PFN_DOWN(_ramend);
186 high_memory = (void *)_ramend;
187
188 /* Reserve kernel text/data/bss */
189 memblock_reserve(_rambase, memstart - _rambase);
190
191 m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
192 module_fixup(NULL, __start_fixup, __stop_fixup);
193
194 /* setup node data */
195 m68k_setup_node(0);
196}
197
198/*
199 * Initialize the context management stuff.
200 * The following was taken from arch/ppc/mmu_context.c
201 */
202void __init cf_mmu_context_init(void)
203{
204 /*
205 * Some processors have too few contexts to reserve one for
206 * init_mm, and require using context 0 for a normal task.
207 * Other processors reserve the use of context zero for the kernel.
208 * This code assumes FIRST_CONTEXT < 32.
209 */
210 context_map[0] = (1 << FIRST_CONTEXT) - 1;
211 next_mmu_context = FIRST_CONTEXT;
212 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
213}
214
215/*
216 * Steal a context from a task that has one at the moment.
217 * This isn't an LRU system, it just frees up each context in
218 * turn (sort-of pseudo-random replacement :). This would be the
219 * place to implement an LRU scheme if anyone was motivated to do it.
220 * -- paulus
221 */
222void steal_context(void)
223{
224 struct mm_struct *mm;
225 /*
226 * free up context `next_mmu_context'
227 * if we shouldn't free context 0, don't...
228 */
229 if (next_mmu_context < FIRST_CONTEXT)
230 next_mmu_context = FIRST_CONTEXT;
231 mm = context_mm[next_mmu_context];
232 flush_tlb_mm(mm);
233 destroy_context(mm);
234}
235
1/*
2 * Based upon linux/arch/m68k/mm/sun3mmu.c
3 * Based upon linux/arch/ppc/mm/mmu_context.c
4 *
5 * Implementations of mm routines specific to the Coldfire MMU.
6 *
7 * Copyright (c) 2008 Freescale Semiconductor, Inc.
8 */
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14#include <linux/string.h>
15#include <linux/bootmem.h>
16
17#include <asm/setup.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/mmu_context.h>
21#include <asm/mcf_pgalloc.h>
22#include <asm/tlbflush.h>
23
24#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
25
26mm_context_t next_mmu_context;
27unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
28atomic_t nr_free_contexts;
29struct mm_struct *context_mm[LAST_CONTEXT+1];
30extern unsigned long num_pages;
31
32void free_initmem(void)
33{
34}
35
36/*
37 * ColdFire paging_init derived from sun3.
38 */
39void __init paging_init(void)
40{
41 pgd_t *pg_dir;
42 pte_t *pg_table;
43 unsigned long address, size;
44 unsigned long next_pgtable, bootmem_end;
45 unsigned long zones_size[MAX_NR_ZONES];
46 enum zone_type zone;
47 int i;
48
49 empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
50 memset((void *) empty_zero_page, 0, PAGE_SIZE);
51
52 pg_dir = swapper_pg_dir;
53 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
54
55 size = num_pages * sizeof(pte_t);
56 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
57 next_pgtable = (unsigned long) alloc_bootmem_pages(size);
58
59 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
60 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
61
62 address = PAGE_OFFSET;
63 while (address < (unsigned long)high_memory) {
64 pg_table = (pte_t *) next_pgtable;
65 next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
66 pgd_val(*pg_dir) = (unsigned long) pg_table;
67 pg_dir++;
68
69 /* now change pg_table to kernel virtual addresses */
70 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
71 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
72 if (address >= (unsigned long) high_memory)
73 pte_val(pte) = 0;
74
75 set_pte(pg_table, pte);
76 address += PAGE_SIZE;
77 }
78 }
79
80 current->mm = NULL;
81
82 for (zone = 0; zone < MAX_NR_ZONES; zone++)
83 zones_size[zone] = 0x0;
84 zones_size[ZONE_DMA] = num_pages;
85 free_area_init(zones_size);
86}
87
88int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
89{
90 unsigned long flags, mmuar, mmutr;
91 struct mm_struct *mm;
92 pgd_t *pgd;
93 pmd_t *pmd;
94 pte_t *pte;
95 int asid;
96
97 local_irq_save(flags);
98
99 mmuar = (dtlb) ? mmu_read(MMUAR) :
100 regs->pc + (extension_word * sizeof(long));
101
102 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
103 if (!mm) {
104 local_irq_restore(flags);
105 return -1;
106 }
107
108 pgd = pgd_offset(mm, mmuar);
109 if (pgd_none(*pgd)) {
110 local_irq_restore(flags);
111 return -1;
112 }
113
114 pmd = pmd_offset(pgd, mmuar);
115 if (pmd_none(*pmd)) {
116 local_irq_restore(flags);
117 return -1;
118 }
119
120 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
121 : pte_offset_map(pmd, mmuar);
122 if (pte_none(*pte) || !pte_present(*pte)) {
123 local_irq_restore(flags);
124 return -1;
125 }
126
127 if (write) {
128 if (!pte_write(*pte)) {
129 local_irq_restore(flags);
130 return -1;
131 }
132 set_pte(pte, pte_mkdirty(*pte));
133 }
134
135 set_pte(pte, pte_mkyoung(*pte));
136 asid = mm->context & 0xff;
137 if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
138 set_pte(pte, pte_wrprotect(*pte));
139
140 mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
141 if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
142 mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
143 mmu_write(MMUTR, mmutr);
144
145 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
146 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
147
148 if (dtlb)
149 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
150 else
151 mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
152
153 local_irq_restore(flags);
154 return 0;
155}
156
157/*
158 * Initialize the context management stuff.
159 * The following was taken from arch/ppc/mmu_context.c
160 */
161void __init mmu_context_init(void)
162{
163 /*
164 * Some processors have too few contexts to reserve one for
165 * init_mm, and require using context 0 for a normal task.
166 * Other processors reserve the use of context zero for the kernel.
167 * This code assumes FIRST_CONTEXT < 32.
168 */
169 context_map[0] = (1 << FIRST_CONTEXT) - 1;
170 next_mmu_context = FIRST_CONTEXT;
171 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
172}
173
174/*
175 * Steal a context from a task that has one at the moment.
176 * This is only used on 8xx and 4xx and we presently assume that
177 * they don't do SMP. If they do then thicfpgalloc.hs will have to check
178 * whether the MM we steal is in use.
179 * We also assume that this is only used on systems that don't
180 * use an MMU hash table - this is true for 8xx and 4xx.
181 * This isn't an LRU system, it just frees up each context in
182 * turn (sort-of pseudo-random replacement :). This would be the
183 * place to implement an LRU scheme if anyone was motivated to do it.
184 * -- paulus
185 */
186void steal_context(void)
187{
188 struct mm_struct *mm;
189 /*
190 * free up context `next_mmu_context'
191 * if we shouldn't free context 0, don't...
192 */
193 if (next_mmu_context < FIRST_CONTEXT)
194 next_mmu_context = FIRST_CONTEXT;
195 mm = context_mm[next_mmu_context];
196 flush_tlb_mm(mm);
197 destroy_context(mm);
198}
199