Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * PowerPC64 Segment Translation Support.
  3 *
  4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
  5 *    Copyright (c) 2001 Dave Engebretsen
  6 *
  7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
  8 *
  9 *      This program is free software; you can redistribute it and/or
 10 *      modify it under the terms of the GNU General Public License
 11 *      as published by the Free Software Foundation; either version
 12 *      2 of the License, or (at your option) any later version.
 13 */
 14
 15#include <linux/memblock.h>
 16
 17#include <asm/pgtable.h>
 18#include <asm/mmu.h>
 19#include <asm/mmu_context.h>
 20#include <asm/paca.h>
 21#include <asm/cputable.h>
 22#include <asm/prom.h>
 23#include <asm/abs_addr.h>
 24
 25struct stab_entry {
 26	unsigned long esid_data;
 27	unsigned long vsid_data;
 28};
 29
 30#define NR_STAB_CACHE_ENTRIES 8
 31static DEFINE_PER_CPU(long, stab_cache_ptr);
 32static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
 33
 34/*
 35 * Create a segment table entry for the given esid/vsid pair.
 36 */
 37static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
 38{
 39	unsigned long esid_data, vsid_data;
 40	unsigned long entry, group, old_esid, castout_entry, i;
 41	unsigned int global_entry;
 42	struct stab_entry *ste, *castout_ste;
 43	unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
 44
 45	vsid_data = vsid << STE_VSID_SHIFT;
 46	esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
 47	if (! kernel_segment)
 48		esid_data |= STE_ESID_KS;
 49
 50	/* Search the primary group first. */
 51	global_entry = (esid & 0x1f) << 3;
 52	ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
 53
 54	/* Find an empty entry, if one exists. */
 55	for (group = 0; group < 2; group++) {
 56		for (entry = 0; entry < 8; entry++, ste++) {
 57			if (!(ste->esid_data & STE_ESID_V)) {
 58				ste->vsid_data = vsid_data;
 59				eieio();
 60				ste->esid_data = esid_data;
 61				return (global_entry | entry);
 62			}
 63		}
 64		/* Now search the secondary group. */
 65		global_entry = ((~esid) & 0x1f) << 3;
 66		ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
 67	}
 68
 69	/*
 70	 * Could not find empty entry, pick one with a round robin selection.
 71	 * Search all entries in the two groups.
 72	 */
 73	castout_entry = get_paca()->stab_rr;
 74	for (i = 0; i < 16; i++) {
 75		if (castout_entry < 8) {
 76			global_entry = (esid & 0x1f) << 3;
 77			ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
 78			castout_ste = ste + castout_entry;
 79		} else {
 80			global_entry = ((~esid) & 0x1f) << 3;
 81			ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
 82			castout_ste = ste + (castout_entry - 8);
 83		}
 84
 85		/* Dont cast out the first kernel segment */
 86		if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
 87			break;
 88
 89		castout_entry = (castout_entry + 1) & 0xf;
 90	}
 91
 92	get_paca()->stab_rr = (castout_entry + 1) & 0xf;
 93
 94	/* Modify the old entry to the new value. */
 95
 96	/* Force previous translations to complete. DRENG */
 97	asm volatile("isync" : : : "memory");
 98
 99	old_esid = castout_ste->esid_data >> SID_SHIFT;
100	castout_ste->esid_data = 0;		/* Invalidate old entry */
101
102	asm volatile("sync" : : : "memory");    /* Order update */
103
104	castout_ste->vsid_data = vsid_data;
105	eieio();				/* Order update */
106	castout_ste->esid_data = esid_data;
107
108	asm volatile("slbie  %0" : : "r" (old_esid << SID_SHIFT));
109	/* Ensure completion of slbie */
110	asm volatile("sync" : : : "memory");
111
112	return (global_entry | (castout_entry & 0x7));
113}
114
115/*
116 * Allocate a segment table entry for the given ea and mm
117 */
118static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
119{
120	unsigned long vsid;
121	unsigned char stab_entry;
122	unsigned long offset;
123
124	/* Kernel or user address? */
125	if (is_kernel_addr(ea)) {
126		vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
127	} else {
128		if ((ea >= TASK_SIZE_USER64) || (! mm))
129			return 1;
130
131		vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M);
132	}
133
134	stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
135
136	if (!is_kernel_addr(ea)) {
137		offset = __get_cpu_var(stab_cache_ptr);
138		if (offset < NR_STAB_CACHE_ENTRIES)
139			__get_cpu_var(stab_cache[offset++]) = stab_entry;
140		else
141			offset = NR_STAB_CACHE_ENTRIES+1;
142		__get_cpu_var(stab_cache_ptr) = offset;
143
144		/* Order update */
145		asm volatile("sync":::"memory");
146	}
147
148	return 0;
149}
150
151int ste_allocate(unsigned long ea)
152{
153	return __ste_allocate(ea, current->mm);
154}
155
156/*
157 * Do the segment table work for a context switch: flush all user
158 * entries from the table, then preload some probably useful entries
159 * for the new task
160 */
161void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
162{
163	struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
164	struct stab_entry *ste;
165	unsigned long offset;
166	unsigned long pc = KSTK_EIP(tsk);
167	unsigned long stack = KSTK_ESP(tsk);
168	unsigned long unmapped_base;
169
170	/* Force previous translations to complete. DRENG */
171	asm volatile("isync" : : : "memory");
172
173	/*
174	 * We need interrupts hard-disabled here, not just soft-disabled,
175	 * so that a PMU interrupt can't occur, which might try to access
176	 * user memory (to get a stack trace) and possible cause an STAB miss
177	 * which would update the stab_cache/stab_cache_ptr per-cpu variables.
178	 */
179	hard_irq_disable();
180
181	offset = __get_cpu_var(stab_cache_ptr);
182	if (offset <= NR_STAB_CACHE_ENTRIES) {
183		int i;
184
185		for (i = 0; i < offset; i++) {
186			ste = stab + __get_cpu_var(stab_cache[i]);
187			ste->esid_data = 0; /* invalidate entry */
188		}
189	} else {
190		unsigned long entry;
191
192		/* Invalidate all entries. */
193		ste = stab;
194
195		/* Never flush the first entry. */
196		ste += 1;
197		for (entry = 1;
198		     entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
199		     entry++, ste++) {
200			unsigned long ea;
201			ea = ste->esid_data & ESID_MASK;
202			if (!is_kernel_addr(ea)) {
203				ste->esid_data = 0;
204			}
205		}
206	}
207
208	asm volatile("sync; slbia; sync":::"memory");
209
210	__get_cpu_var(stab_cache_ptr) = 0;
211
212	/* Now preload some entries for the new task */
213	if (test_tsk_thread_flag(tsk, TIF_32BIT))
214		unmapped_base = TASK_UNMAPPED_BASE_USER32;
215	else
216		unmapped_base = TASK_UNMAPPED_BASE_USER64;
217
218	__ste_allocate(pc, mm);
219
220	if (GET_ESID(pc) == GET_ESID(stack))
221		return;
222
223	__ste_allocate(stack, mm);
224
225	if ((GET_ESID(pc) == GET_ESID(unmapped_base))
226	    || (GET_ESID(stack) == GET_ESID(unmapped_base)))
227		return;
228
229	__ste_allocate(unmapped_base, mm);
230
231	/* Order update */
232	asm volatile("sync" : : : "memory");
233}
234
235/*
236 * Allocate segment tables for secondary CPUs.  These must all go in
237 * the first (bolted) segment, so that do_stab_bolted won't get a
238 * recursive segment miss on the segment table itself.
239 */
240void __init stabs_alloc(void)
241{
242	int cpu;
243
244	if (mmu_has_feature(MMU_FTR_SLB))
245		return;
246
247	for_each_possible_cpu(cpu) {
248		unsigned long newstab;
249
250		if (cpu == 0)
251			continue; /* stab for CPU 0 is statically allocated */
252
253		newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
254					 1<<SID_SHIFT);
255		newstab = (unsigned long)__va(newstab);
256
257		memset((void *)newstab, 0, HW_PAGE_SIZE);
258
259		paca[cpu].stab_addr = newstab;
260		paca[cpu].stab_real = virt_to_abs(newstab);
261		printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
262		       "virtual, 0x%llx absolute\n",
263		       cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
264	}
265}
266
267/*
268 * Build an entry for the base kernel segment and put it into
269 * the segment table or SLB.  All other segment table or SLB
270 * entries are faulted in.
271 */
272void stab_initialize(unsigned long stab)
273{
274	unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M);
275	unsigned long stabreal;
276
277	asm volatile("isync; slbia; isync":::"memory");
278	make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
279
280	/* Order update */
281	asm volatile("sync":::"memory");
282
283	/* Set ASR */
284	stabreal = get_paca()->stab_real | 0x1ul;
285
286	mtspr(SPRN_ASR, stabreal);
287}