Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
  8 * Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/init.h>
 12#include <linux/sched.h>
 13#include <linux/smp.h>
 14#include <linux/mm.h>
 15
 16#include <asm/cpu.h>
 17#include <asm/bootinfo.h>
 18#include <asm/mmu_context.h>
 19#include <asm/pgtable.h>
 20#include <asm/system.h>
 21
 22extern void build_tlb_refill_handler(void);
 23
 24#define TFP_TLB_SIZE		384
 25#define TFP_TLB_SET_SHIFT	7
 26
 27/* CP0 hazard avoidance. */
 28#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
 29				     "nop; nop; nop; nop; nop; nop;\n\t" \
 30				     ".set reorder\n\t")
 31
 32void local_flush_tlb_all(void)
 33{
 34	unsigned long flags;
 35	unsigned long old_ctx;
 36	int entry;
 37
 38	local_irq_save(flags);
 39	/* Save old context and create impossible VPN2 value */
 40	old_ctx = read_c0_entryhi();
 41	write_c0_entrylo(0);
 42
 43	for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
 44		write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
 45		write_c0_vaddr(entry << PAGE_SHIFT);
 46		write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
 47		mtc0_tlbw_hazard();
 48		tlb_write();
 49	}
 50	tlbw_use_hazard();
 51	write_c0_entryhi(old_ctx);
 52	local_irq_restore(flags);
 53}
 54
 55void local_flush_tlb_mm(struct mm_struct *mm)
 56{
 57	int cpu = smp_processor_id();
 58
 59	if (cpu_context(cpu, mm) != 0)
 60		drop_mmu_context(mm, cpu);
 61}
 62
 63void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 64	unsigned long end)
 65{
 66	struct mm_struct *mm = vma->vm_mm;
 67	int cpu = smp_processor_id();
 68	unsigned long flags;
 69	int oldpid, newpid, size;
 70
 71	if (!cpu_context(cpu, mm))
 72		return;
 73
 74	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 75	size = (size + 1) >> 1;
 76
 77	local_irq_save(flags);
 78
 79	if (size > TFP_TLB_SIZE / 2) {
 80		drop_mmu_context(mm, cpu);
 81		goto out_restore;
 82	}
 83
 84	oldpid = read_c0_entryhi();
 85	newpid = cpu_asid(cpu, mm);
 86
 87	write_c0_entrylo(0);
 88
 89	start &= PAGE_MASK;
 90	end += (PAGE_SIZE - 1);
 91	end &= PAGE_MASK;
 92	while (start < end) {
 93		signed long idx;
 94
 95		write_c0_vaddr(start);
 96		write_c0_entryhi(start);
 97		start += PAGE_SIZE;
 98		tlb_probe();
 99		idx = read_c0_tlbset();
100		if (idx < 0)
101			continue;
102
103		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
104		tlb_write();
105	}
106	write_c0_entryhi(oldpid);
107
108out_restore:
109	local_irq_restore(flags);
110}
111
112/* Usable for KV1 addresses only! */
113void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
114{
115	unsigned long size, flags;
116
117	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
118	size = (size + 1) >> 1;
119
120	if (size > TFP_TLB_SIZE / 2) {
121		local_flush_tlb_all();
122		return;
123	}
124
125	local_irq_save(flags);
126
127	write_c0_entrylo(0);
128
129	start &= PAGE_MASK;
130	end += (PAGE_SIZE - 1);
131	end &= PAGE_MASK;
132	while (start < end) {
133		signed long idx;
134
135		write_c0_vaddr(start);
136		write_c0_entryhi(start);
137		start += PAGE_SIZE;
138		tlb_probe();
139		idx = read_c0_tlbset();
140		if (idx < 0)
141			continue;
142
143		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
144		tlb_write();
145	}
146
147	local_irq_restore(flags);
148}
149
150void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
151{
152	int cpu = smp_processor_id();
153	unsigned long flags;
154	int oldpid, newpid;
155	signed long idx;
156
157	if (!cpu_context(cpu, vma->vm_mm))
158		return;
159
160	newpid = cpu_asid(cpu, vma->vm_mm);
161	page &= PAGE_MASK;
162	local_irq_save(flags);
163	oldpid = read_c0_entryhi();
164	write_c0_vaddr(page);
165	write_c0_entryhi(newpid);
166	tlb_probe();
167	idx = read_c0_tlbset();
168	if (idx < 0)
169		goto finish;
170
171	write_c0_entrylo(0);
172	write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
173	tlb_write();
174
175finish:
176	write_c0_entryhi(oldpid);
177	local_irq_restore(flags);
178}
179
180/*
181 * We will need multiple versions of update_mmu_cache(), one that just
182 * updates the TLB with the new pte(s), and another which also checks
183 * for the R4k "end of page" hardware bug and does the needy.
184 */
185void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
186{
187	unsigned long flags;
188	pgd_t *pgdp;
189	pmd_t *pmdp;
190	pte_t *ptep;
191	int pid;
192
193	/*
194	 * Handle debugger faulting in for debugee.
195	 */
196	if (current->active_mm != vma->vm_mm)
197		return;
198
199	pid = read_c0_entryhi() & ASID_MASK;
200
201	local_irq_save(flags);
202	address &= PAGE_MASK;
203	write_c0_vaddr(address);
204	write_c0_entryhi(pid);
205	pgdp = pgd_offset(vma->vm_mm, address);
206	pmdp = pmd_offset(pgdp, address);
207	ptep = pte_offset_map(pmdp, address);
208	tlb_probe();
209
210	write_c0_entrylo(pte_val(*ptep++) >> 6);
211	tlb_write();
212
213	write_c0_entryhi(pid);
214	local_irq_restore(flags);
215}
216
217static void __cpuinit probe_tlb(unsigned long config)
218{
219	struct cpuinfo_mips *c = &current_cpu_data;
220
221	c->tlbsize = 3 * 128;		/* 3 sets each 128 entries */
222}
223
224void __cpuinit tlb_init(void)
225{
226	unsigned int config = read_c0_config();
227	unsigned long status;
228
229	probe_tlb(config);
230
231	status = read_c0_status();
232	status &= ~(ST0_UPS | ST0_KPS);
233#ifdef CONFIG_PAGE_SIZE_4KB
234	status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36);
235#elif defined(CONFIG_PAGE_SIZE_8KB)
236	status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36);
237#elif defined(CONFIG_PAGE_SIZE_16KB)
238	status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36);
239#elif defined(CONFIG_PAGE_SIZE_64KB)
240	status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36);
241#endif
242	write_c0_status(status);
243
244	write_c0_wired(0);
245
246	local_flush_tlb_all();
247
248	build_tlb_refill_handler();
249}
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
  8 * Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 10 */
 
 11#include <linux/sched.h>
 12#include <linux/smp.h>
 13#include <linux/mm.h>
 14
 15#include <asm/cpu.h>
 16#include <asm/bootinfo.h>
 17#include <asm/mmu_context.h>
 18#include <asm/pgtable.h>
 
 19
 20extern void build_tlb_refill_handler(void);
 21
 22#define TFP_TLB_SIZE		384
 23#define TFP_TLB_SET_SHIFT	7
 24
 25/* CP0 hazard avoidance. */
 26#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
 27				     "nop; nop; nop; nop; nop; nop;\n\t" \
 28				     ".set reorder\n\t")
 29
 30void local_flush_tlb_all(void)
 31{
 32	unsigned long flags;
 33	unsigned long old_ctx;
 34	int entry;
 35
 36	local_irq_save(flags);
 37	/* Save old context and create impossible VPN2 value */
 38	old_ctx = read_c0_entryhi();
 39	write_c0_entrylo(0);
 40
 41	for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
 42		write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
 43		write_c0_vaddr(entry << PAGE_SHIFT);
 44		write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
 45		mtc0_tlbw_hazard();
 46		tlb_write();
 47	}
 48	tlbw_use_hazard();
 49	write_c0_entryhi(old_ctx);
 50	local_irq_restore(flags);
 51}
 52
 53void local_flush_tlb_mm(struct mm_struct *mm)
 54{
 55	int cpu = smp_processor_id();
 56
 57	if (cpu_context(cpu, mm) != 0)
 58		drop_mmu_context(mm, cpu);
 59}
 60
 61void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 62	unsigned long end)
 63{
 64	struct mm_struct *mm = vma->vm_mm;
 65	int cpu = smp_processor_id();
 66	unsigned long flags;
 67	int oldpid, newpid, size;
 68
 69	if (!cpu_context(cpu, mm))
 70		return;
 71
 72	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 73	size = (size + 1) >> 1;
 74
 75	local_irq_save(flags);
 76
 77	if (size > TFP_TLB_SIZE / 2) {
 78		drop_mmu_context(mm, cpu);
 79		goto out_restore;
 80	}
 81
 82	oldpid = read_c0_entryhi();
 83	newpid = cpu_asid(cpu, mm);
 84
 85	write_c0_entrylo(0);
 86
 87	start &= PAGE_MASK;
 88	end += (PAGE_SIZE - 1);
 89	end &= PAGE_MASK;
 90	while (start < end) {
 91		signed long idx;
 92
 93		write_c0_vaddr(start);
 94		write_c0_entryhi(start);
 95		start += PAGE_SIZE;
 96		tlb_probe();
 97		idx = read_c0_tlbset();
 98		if (idx < 0)
 99			continue;
100
101		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
102		tlb_write();
103	}
104	write_c0_entryhi(oldpid);
105
106out_restore:
107	local_irq_restore(flags);
108}
109
110/* Usable for KV1 addresses only! */
111void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
112{
113	unsigned long size, flags;
114
115	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
116	size = (size + 1) >> 1;
117
118	if (size > TFP_TLB_SIZE / 2) {
119		local_flush_tlb_all();
120		return;
121	}
122
123	local_irq_save(flags);
124
125	write_c0_entrylo(0);
126
127	start &= PAGE_MASK;
128	end += (PAGE_SIZE - 1);
129	end &= PAGE_MASK;
130	while (start < end) {
131		signed long idx;
132
133		write_c0_vaddr(start);
134		write_c0_entryhi(start);
135		start += PAGE_SIZE;
136		tlb_probe();
137		idx = read_c0_tlbset();
138		if (idx < 0)
139			continue;
140
141		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
142		tlb_write();
143	}
144
145	local_irq_restore(flags);
146}
147
148void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
149{
150	int cpu = smp_processor_id();
151	unsigned long flags;
152	int oldpid, newpid;
153	signed long idx;
154
155	if (!cpu_context(cpu, vma->vm_mm))
156		return;
157
158	newpid = cpu_asid(cpu, vma->vm_mm);
159	page &= PAGE_MASK;
160	local_irq_save(flags);
161	oldpid = read_c0_entryhi();
162	write_c0_vaddr(page);
163	write_c0_entryhi(newpid);
164	tlb_probe();
165	idx = read_c0_tlbset();
166	if (idx < 0)
167		goto finish;
168
169	write_c0_entrylo(0);
170	write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
171	tlb_write();
172
173finish:
174	write_c0_entryhi(oldpid);
175	local_irq_restore(flags);
176}
177
178/*
179 * We will need multiple versions of update_mmu_cache(), one that just
180 * updates the TLB with the new pte(s), and another which also checks
181 * for the R4k "end of page" hardware bug and does the needy.
182 */
183void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
184{
185	unsigned long flags;
186	pgd_t *pgdp;
187	pmd_t *pmdp;
188	pte_t *ptep;
189	int pid;
190
191	/*
192	 * Handle debugger faulting in for debugee.
193	 */
194	if (current->active_mm != vma->vm_mm)
195		return;
196
197	pid = read_c0_entryhi() & ASID_MASK;
198
199	local_irq_save(flags);
200	address &= PAGE_MASK;
201	write_c0_vaddr(address);
202	write_c0_entryhi(pid);
203	pgdp = pgd_offset(vma->vm_mm, address);
204	pmdp = pmd_offset(pgdp, address);
205	ptep = pte_offset_map(pmdp, address);
206	tlb_probe();
207
208	write_c0_entrylo(pte_val(*ptep++) >> 6);
209	tlb_write();
210
211	write_c0_entryhi(pid);
212	local_irq_restore(flags);
213}
214
215static void probe_tlb(unsigned long config)
216{
217	struct cpuinfo_mips *c = &current_cpu_data;
218
219	c->tlbsize = 3 * 128;		/* 3 sets each 128 entries */
220}
221
222void tlb_init(void)
223{
224	unsigned int config = read_c0_config();
225	unsigned long status;
226
227	probe_tlb(config);
228
229	status = read_c0_status();
230	status &= ~(ST0_UPS | ST0_KPS);
231#ifdef CONFIG_PAGE_SIZE_4KB
232	status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36);
233#elif defined(CONFIG_PAGE_SIZE_8KB)
234	status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36);
235#elif defined(CONFIG_PAGE_SIZE_16KB)
236	status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36);
237#elif defined(CONFIG_PAGE_SIZE_64KB)
238	status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36);
239#endif
240	write_c0_status(status);
241
242	write_c0_wired(0);
243
244	local_flush_tlb_all();
245
246	build_tlb_refill_handler();
247}