Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/*
  2 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
  3 *
  4 * This file contains the architecture-dependant parts of system setup.
  5 *
  6 */
  7
  8#include <linux/export.h>
  9#include <linux/bootmem.h>
 10#include <linux/console.h>
 11#include <linux/cpu.h>
 12#include <linux/delay.h>
 13#include <linux/errno.h>
 14#include <linux/fs.h>
 15#include <linux/genhd.h>
 16#include <linux/init.h>
 17#include <linux/initrd.h>
 18#include <linux/interrupt.h>
 19#include <linux/kernel.h>
 20#include <linux/memblock.h>
 21#include <linux/mm.h>
 22#include <linux/of_fdt.h>
 23#include <linux/pfn.h>
 24#include <linux/root_dev.h>
 25#include <linux/sched.h>
 26#include <linux/seq_file.h>
 27#include <linux/start_kernel.h>
 28#include <linux/string.h>
 29
 30#include <asm/cachepart.h>
 31#include <asm/clock.h>
 32#include <asm/core_reg.h>
 33#include <asm/cpu.h>
 34#include <asm/da.h>
 35#include <asm/highmem.h>
 36#include <asm/hwthread.h>
 37#include <asm/l2cache.h>
 38#include <asm/mach/arch.h>
 39#include <asm/metag_mem.h>
 40#include <asm/metag_regs.h>
 41#include <asm/mmu.h>
 42#include <asm/mmzone.h>
 43#include <asm/processor.h>
 44#include <asm/sections.h>
 45#include <asm/setup.h>
 46#include <asm/traps.h>
 47
 48/* Priv protect as many registers as possible. */
 49#define DEFAULT_PRIV	(TXPRIVEXT_COPRO_BITS		| \
 50			 TXPRIVEXT_TXTRIGGER_BIT	| \
 51			 TXPRIVEXT_TXGBLCREG_BIT	| \
 52			 TXPRIVEXT_ILOCK_BIT		| \
 53			 TXPRIVEXT_TXITACCYC_BIT	| \
 54			 TXPRIVEXT_TXDIVTIME_BIT	| \
 55			 TXPRIVEXT_TXAMAREGX_BIT	| \
 56			 TXPRIVEXT_TXTIMERI_BIT		| \
 57			 TXPRIVEXT_TXSTATUS_BIT		| \
 58			 TXPRIVEXT_TXDISABLE_BIT)
 59
 60/* Meta2 specific bits. */
 61#ifdef CONFIG_METAG_META12
 62#define META2_PRIV	0
 63#else
 64#define META2_PRIV	(TXPRIVEXT_TXTIMER_BIT		| \
 65			 TXPRIVEXT_TRACE_BIT)
 66#endif
 67
 68/* Unaligned access checking bits. */
 69#ifdef CONFIG_METAG_UNALIGNED
 70#define UNALIGNED_PRIV	TXPRIVEXT_ALIGNREW_BIT
 71#else
 72#define UNALIGNED_PRIV	0
 73#endif
 74
 75#define PRIV_BITS 	(DEFAULT_PRIV			| \
 76			 META2_PRIV			| \
 77			 UNALIGNED_PRIV)
 78
 79/*
 80 * Protect access to:
 81 * 0x06000000-0x07ffffff Direct mapped region
 82 * 0x05000000-0x05ffffff MMU table region (Meta1)
 83 * 0x04400000-0x047fffff Cache flush region
 84 * 0x84000000-0x87ffffff Core cache memory region (Meta2)
 85 *
 86 * Allow access to:
 87 * 0x80000000-0x81ffffff Core code memory region (Meta2)
 88 */
 89#ifdef CONFIG_METAG_META12
 90#define PRIVSYSR_BITS	TXPRIVSYSR_ALL_BITS
 91#else
 92#define PRIVSYSR_BITS	(TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT)
 93#endif
 94
 95/* Protect all 0x02xxxxxx and 0x048xxxxx. */
 96#define PIOREG_BITS	0xffffffff
 97
 98/*
 99 * Protect all 0x04000xx0 (system events)
100 * except write combiner flush and write fence (system events 4 and 5).
101 */
102#define PSYREG_BITS	0xfffffffb
103
104
105extern char _heap_start[];
106
107#ifdef CONFIG_DA_CONSOLE
108/* Our early channel based console driver */
109extern struct console dash_console;
110#endif
111
112const struct machine_desc *machine_desc __initdata;
113
114/*
115 * Map a Linux CPU number to a hardware thread ID
116 * In SMP this will be setup with the correct mapping at startup; in UP this
117 * will map to the HW thread on which we are running.
118 */
119u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
120	[0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
121};
122EXPORT_SYMBOL_GPL(cpu_2_hwthread_id);
123
124/*
125 * Map a hardware thread ID to a Linux CPU number
126 * In SMP this will be fleshed out with the correct CPU ID for a particular
127 * hardware thread. In UP this will be initialised with the boot CPU ID.
128 */
129u8 hwthread_id_2_cpu[4] __read_mostly = {
130	[0 ... 3] = BAD_CPU_ID
131};
132
133/* The relative offset of the MMU mapped memory (from ldlk or bootloader)
134 * to the real physical memory.  This is needed as we have to use the
135 * physical addresses in the MMU tables (pte entries), and not the virtual
136 * addresses.
137 * This variable is used in the __pa() and __va() macros, and should
138 * probably only be used via them.
139 */
140unsigned int meta_memoffset;
141EXPORT_SYMBOL(meta_memoffset);
142
143static char __initdata *original_cmd_line;
144
145DEFINE_PER_CPU(PTBI, pTBI);
146
147/*
148 * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
149 *
150 *	"hwthread_map=0:1,1:2,2:3,3:0"
151 *
152 *	Linux CPU ID	HWTHREAD_ID
153 *	---------------------------
154 *	    0		      1
155 *	    1		      2
156 *	    2		      3
157 *	    3		      0
158 */
159static int __init parse_hwthread_map(char *p)
160{
161	int cpu;
162
163	while (*p) {
164		cpu = (*p++) - '0';
165		if (cpu < 0 || cpu > 9)
166			goto err_cpu;
167
168		p++;		/* skip semi-colon */
169		cpu_2_hwthread_id[cpu] = (*p++) - '0';
170		if (cpu_2_hwthread_id[cpu] >= 4)
171			goto err_thread;
172		hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu;
173
174		if (*p == ',')
175			p++;		/* skip comma */
176	}
177
178	return 0;
179err_cpu:
180	pr_err("%s: hwthread_map cpu argument out of range\n", __func__);
181	return -EINVAL;
182err_thread:
183	pr_err("%s: hwthread_map thread argument out of range\n", __func__);
184	return -EINVAL;
185}
186early_param("hwthread_map", parse_hwthread_map);
187
188void __init dump_machine_table(void)
189{
190	struct machine_desc *p;
191	const char **compat;
192
193	pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n");
194	for_each_machine_desc(p) {
195		pr_info("\t%s\t[", p->name);
196		for (compat = p->dt_compat; compat && *compat; ++compat)
197			printk(" '%s'", *compat);
198		printk(" ]\n");
199	}
200
201	pr_info("\nPlease check your kernel config and/or bootloader.\n");
202
203	hard_processor_halt(HALT_PANIC);
204}
205
206#ifdef CONFIG_METAG_HALT_ON_PANIC
207static int metag_panic_event(struct notifier_block *this, unsigned long event,
208			     void *ptr)
209{
210	hard_processor_halt(HALT_PANIC);
211	return NOTIFY_DONE;
212}
213
214static struct notifier_block metag_panic_block = {
215	metag_panic_event,
216	NULL,
217	0
218};
219#endif
220
221void __init setup_arch(char **cmdline_p)
222{
223	unsigned long start_pfn;
224	unsigned long text_start = (unsigned long)(&_stext);
225	unsigned long cpu = smp_processor_id();
226	unsigned long heap_start, heap_end;
227	unsigned long start_pte;
228	PTBI _pTBI;
229	PTBISEG p_heap;
230	int heap_id, i;
231
232	metag_cache_probe();
233
234	metag_da_probe();
235#ifdef CONFIG_DA_CONSOLE
236	if (metag_da_enabled()) {
237		/* An early channel based console driver */
238		register_console(&dash_console);
239		add_preferred_console("ttyDA", 1, NULL);
240	}
241#endif
242
243	/* try interpreting the argument as a device tree */
244	machine_desc = setup_machine_fdt(original_cmd_line);
245	/* if it doesn't look like a device tree it must be a command line */
246	if (!machine_desc) {
247#ifdef CONFIG_METAG_BUILTIN_DTB
248		/* try the embedded device tree */
249		machine_desc = setup_machine_fdt(__dtb_start);
250		if (!machine_desc)
251			panic("Invalid embedded device tree.");
252#else
253		/* use the default machine description */
254		machine_desc = default_machine_desc();
255#endif
256#ifndef CONFIG_CMDLINE_FORCE
257		/* append the bootloader cmdline to any builtin fdt cmdline */
258		if (boot_command_line[0] && original_cmd_line[0])
259			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
260		strlcat(boot_command_line, original_cmd_line,
261			COMMAND_LINE_SIZE);
262#endif
263	}
264	setup_meta_clocks(machine_desc->clocks);
265
266	*cmdline_p = boot_command_line;
267	parse_early_param();
268
269	/*
270	 * Make sure we don't alias in dcache or icache
271	 */
272	check_for_cache_aliasing(cpu);
273
274
275#ifdef CONFIG_METAG_HALT_ON_PANIC
276	atomic_notifier_chain_register(&panic_notifier_list,
277				       &metag_panic_block);
278#endif
279
280#ifdef CONFIG_DUMMY_CONSOLE
281	conswitchp = &dummy_con;
282#endif
283
284	if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT))
285		panic("Privilege must be enabled for this thread.");
286
287	_pTBI = __TBI(TBID_ISTAT_BIT);
288
289	per_cpu(pTBI, cpu) = _pTBI;
290
291	if (!per_cpu(pTBI, cpu))
292		panic("No TBI found!");
293
294	/*
295	 * Initialize all interrupt vectors to our copy of __TBIUnExpXXX,
296	 * rather than the version from the bootloader. This makes call
297	 * stacks easier to understand and may allow us to unmap the
298	 * bootloader at some point.
299	 */
300	for (i = 0; i <= TBID_SIGNUM_MAX; i++)
301		_pTBI->fnSigs[i] = __TBIUnExpXXX;
302
303	/* A Meta requirement is that the kernel is loaded (virtually)
304	 * at the PAGE_OFFSET.
305	 */
306	if (PAGE_OFFSET != text_start)
307		panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.",
308		      PAGE_OFFSET, text_start);
309
310	start_pte = mmu_read_second_level_page(text_start);
311
312	/*
313	 * Kernel pages should have the PRIV bit set by the bootloader.
314	 */
315	if (!(start_pte & _PAGE_KERNEL))
316		panic("kernel pte does not have PRIV set");
317
318	/*
319	 * See __pa and __va in include/asm/page.h.
320	 * This value is negative when running in local space but the
321	 * calculations work anyway.
322	 */
323	meta_memoffset = text_start - (start_pte & PAGE_MASK);
324
325	/* Now lets look at the heap space */
326	heap_id = (__TBIThreadId() & TBID_THREAD_BITS)
327		+ TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP);
328
329	p_heap = __TBIFindSeg(NULL, heap_id);
330
331	if (!p_heap)
332		panic("Could not find heap from TBI!");
333
334	/* The heap begins at the first full page after the kernel data. */
335	heap_start = (unsigned long) &_heap_start;
336
337	/* The heap ends at the end of the heap segment specified with
338	 * ldlk.
339	 */
340	if (is_global_space(text_start)) {
341		pr_debug("WARNING: running in global space!\n");
342		heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes;
343	} else {
344		heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes;
345	}
346
347	ROOT_DEV = Root_RAM0;
348
349	/* init_mm is the mm struct used for the first task.  It is then
350	 * cloned for all other tasks spawned from that task.
351	 *
352	 * Note - we are using the virtual addresses here.
353	 */
354	init_mm.start_code = (unsigned long)(&_stext);
355	init_mm.end_code = (unsigned long)(&_etext);
356	init_mm.end_data = (unsigned long)(&_edata);
357	init_mm.brk = (unsigned long)heap_start;
358
359	min_low_pfn = PFN_UP(__pa(text_start));
360	max_low_pfn = PFN_DOWN(__pa(heap_end));
361
362	pfn_base = min_low_pfn;
363
364	/* Round max_pfn up to a 4Mb boundary. The free_bootmem_node()
365	 * call later makes sure to keep the rounded up pages marked reserved.
366	 */
367	max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1);
368	max_pfn &= ~((1 << MAX_ORDER) - 1);
369
370	start_pfn = PFN_UP(__pa(heap_start));
371
372	if (min_low_pfn & ((1 << MAX_ORDER) - 1)) {
373		/* Theoretically, we could expand the space that the
374		 * bootmem allocator covers - much as we do for the
375		 * 'high' address, and then tell the bootmem system
376		 * that the lowest chunk is 'not available'.  Right
377		 * now it is just much easier to constrain the
378		 * user to always MAX_ORDER align their kernel space.
379		 */
380
381		panic("Kernel must be %d byte aligned, currently at %#lx.",
382		      1 << (MAX_ORDER + PAGE_SHIFT),
383		      min_low_pfn << PAGE_SHIFT);
384	}
385
386#ifdef CONFIG_HIGHMEM
387	highstart_pfn = highend_pfn = max_pfn;
388	high_memory = (void *) __va(PFN_PHYS(highstart_pfn));
389#else
390	high_memory = (void *)__va(PFN_PHYS(max_pfn));
391#endif
392
393	paging_init(heap_end);
394
395	setup_priv();
396
397	/* Setup the boot cpu's mapping. The rest will be setup below. */
398	cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id();
399	hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id();
400
401	unflatten_and_copy_device_tree();
402
403#ifdef CONFIG_SMP
404	smp_init_cpus();
405#endif
406
407	if (machine_desc->init_early)
408		machine_desc->init_early();
409}
410
411static int __init customize_machine(void)
412{
413	/* customizes platform devices, or adds new ones */
414	if (machine_desc->init_machine)
415		machine_desc->init_machine();
416
417	return 0;
418}
419arch_initcall(customize_machine);
420
421static int __init init_machine_late(void)
422{
423	if (machine_desc->init_late)
424		machine_desc->init_late();
425	return 0;
426}
427late_initcall(init_machine_late);
428
429#ifdef CONFIG_PROC_FS
430/*
431 *	Get CPU information for use by the procfs.
432 */
433static const char *get_cpu_capabilities(unsigned int txenable)
434{
435#ifdef CONFIG_METAG_META21
436	/* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */
437	int coreid = metag_in32(METAC_CORE_ID);
438	unsigned int dsp_type = (coreid >> 3) & 7;
439	unsigned int fpu_type = (coreid >> 7) & 3;
440
441	switch (dsp_type | fpu_type << 3) {
442	case (0x00): return "EDSP";
443	case (0x01): return "DSP";
444	case (0x08): return "EDSP+LFPU";
445	case (0x09): return "DSP+LFPU";
446	case (0x10): return "EDSP+FPU";
447	case (0x11): return "DSP+FPU";
448	}
449	return "UNKNOWN";
450
451#else
452	if (!(txenable & TXENABLE_CLASS_BITS))
453		return "DSP";
454	else
455		return "";
456#endif
457}
458
459static int show_cpuinfo(struct seq_file *m, void *v)
460{
461	const char *cpu;
462	unsigned int txenable, thread_id, major, minor;
463	unsigned long clockfreq = get_coreclock();
464#ifdef CONFIG_SMP
465	int i;
466	unsigned long lpj;
467#endif
468
469	cpu = "META";
470
471	txenable = __core_reg_get(TXENABLE);
472	major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S;
473	minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S;
474	thread_id = (txenable >> 8) & 0x3;
475
476#ifdef CONFIG_SMP
477	for_each_online_cpu(i) {
478		lpj = per_cpu(cpu_data, i).loops_per_jiffy;
479		txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM,
480							cpu_2_hwthread_id[i]);
481
482		seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
483			      "Clocking:\t%lu.%1luMHz\n"
484			      "BogoMips:\t%lu.%02lu\n"
485			      "Calibration:\t%lu loops\n"
486			      "Capabilities:\t%s\n\n",
487			      cpu, major, minor, i,
488			      clockfreq / 1000000, (clockfreq / 100000) % 10,
489			      lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100,
490			      lpj,
491			      get_cpu_capabilities(txenable));
492	}
493#else
494	seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
495		   "Clocking:\t%lu.%1luMHz\n"
496		   "BogoMips:\t%lu.%02lu\n"
497		   "Calibration:\t%lu loops\n"
498		   "Capabilities:\t%s\n",
499		   cpu, major, minor, thread_id,
500		   clockfreq / 1000000, (clockfreq / 100000) % 10,
501		   loops_per_jiffy / (500000 / HZ),
502		   (loops_per_jiffy / (5000 / HZ)) % 100,
503		   loops_per_jiffy,
504		   get_cpu_capabilities(txenable));
505#endif /* CONFIG_SMP */
506
507#ifdef CONFIG_METAG_L2C
508	if (meta_l2c_is_present()) {
509		seq_printf(m, "L2 cache:\t%s\n"
510			      "L2 cache size:\t%d KB\n",
511			      meta_l2c_is_enabled() ? "enabled" : "disabled",
512			      meta_l2c_size() >> 10);
513	}
514#endif
515	return 0;
516}
517
518static void *c_start(struct seq_file *m, loff_t *pos)
519{
520	return (void *)(*pos == 0);
521}
522static void *c_next(struct seq_file *m, void *v, loff_t *pos)
523{
524	return NULL;
525}
526static void c_stop(struct seq_file *m, void *v)
527{
528}
529const struct seq_operations cpuinfo_op = {
530	.start = c_start,
531	.next  = c_next,
532	.stop  = c_stop,
533	.show  = show_cpuinfo,
534};
535#endif /* CONFIG_PROC_FS */
536
537void __init metag_start_kernel(char *args)
538{
539	/* Zero the timer register so timestamps are from the point at
540	 * which the kernel started running.
541	 */
542	__core_reg_set(TXTIMER, 0);
543
544	/* Clear the bss. */
545	memset(__bss_start, 0,
546	       (unsigned long)__bss_stop - (unsigned long)__bss_start);
547
548	/* Remember where these are for use in setup_arch */
549	original_cmd_line = args;
550
551	current_thread_info()->cpu = hard_processor_id();
552
553	start_kernel();
554}
555
556/**
557 * setup_priv() - Set up privilege protection registers.
558 *
559 * Set up privilege protection registers such as TXPRIVEXT to prevent userland
560 * from touching our precious registers and sensitive memory areas.
561 */
562void setup_priv(void)
563{
564	unsigned int offset = hard_processor_id() << TXPRIVREG_STRIDE_S;
565
566	__core_reg_set(TXPRIVEXT, PRIV_BITS);
567
568	metag_out32(PRIVSYSR_BITS, T0PRIVSYSR + offset);
569	metag_out32(PIOREG_BITS,   T0PIOREG   + offset);
570	metag_out32(PSYREG_BITS,   T0PSYREG   + offset);
571}
572
573PTBI pTBI_get(unsigned int cpu)
574{
575	return per_cpu(pTBI, cpu);
576}
577EXPORT_SYMBOL(pTBI_get);
578
579#if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
580static char capabilities[] = "dsp fpu";
581#elif defined(CONFIG_METAG_DSP)
582static char capabilities[] = "dsp";
583#elif defined(CONFIG_METAG_FPU)
584static char capabilities[] = "fpu";
585#else
586static char capabilities[] = "";
587#endif
588
589static struct ctl_table caps_kern_table[] = {
590	{
591		.procname	= "capabilities",
592		.data		= capabilities,
593		.maxlen		= sizeof(capabilities),
594		.mode		= 0444,
595		.proc_handler	= proc_dostring,
596	},
597	{}
598};
599
600static struct ctl_table caps_root_table[] = {
601	{
602		.procname	= "kernel",
603		.mode		= 0555,
604		.child		= caps_kern_table,
605	},
606	{}
607};
608
609static int __init capabilities_register_sysctl(void)
610{
611	struct ctl_table_header *caps_table_header;
612
613	caps_table_header = register_sysctl_table(caps_root_table);
614	if (!caps_table_header) {
615		pr_err("Unable to register CAPABILITIES sysctl\n");
616		return -ENOMEM;
617	}
618
619	return 0;
620}
621
622core_initcall(capabilities_register_sysctl);