Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Based on arch/arm/kernel/setup.c
  4 *
  5 * Copyright (C) 1995-2001 Russell King
  6 * Copyright (C) 2012 ARM Ltd.
  7 */
  8
  9#include <linux/acpi.h>
 10#include <linux/export.h>
 11#include <linux/kernel.h>
 12#include <linux/stddef.h>
 13#include <linux/ioport.h>
 14#include <linux/delay.h>
 15#include <linux/initrd.h>
 16#include <linux/console.h>
 17#include <linux/cache.h>
 18#include <linux/screen_info.h>
 19#include <linux/init.h>
 20#include <linux/kexec.h>
 21#include <linux/root_dev.h>
 22#include <linux/cpu.h>
 23#include <linux/interrupt.h>
 24#include <linux/smp.h>
 25#include <linux/fs.h>
 26#include <linux/proc_fs.h>
 27#include <linux/memblock.h>
 28#include <linux/of_fdt.h>
 29#include <linux/efi.h>
 30#include <linux/psci.h>
 31#include <linux/sched/task.h>
 32#include <linux/mm.h>
 33
 34#include <asm/acpi.h>
 35#include <asm/fixmap.h>
 36#include <asm/cpu.h>
 37#include <asm/cputype.h>
 38#include <asm/daifflags.h>
 39#include <asm/elf.h>
 40#include <asm/cpufeature.h>
 41#include <asm/cpu_ops.h>
 42#include <asm/kasan.h>
 43#include <asm/numa.h>
 44#include <asm/sections.h>
 45#include <asm/setup.h>
 46#include <asm/smp_plat.h>
 47#include <asm/cacheflush.h>
 48#include <asm/tlbflush.h>
 49#include <asm/traps.h>
 50#include <asm/efi.h>
 51#include <asm/xen/hypervisor.h>
 52#include <asm/mmu_context.h>
 53
 54static int num_standard_resources;
 55static struct resource *standard_resources;
 56
 57phys_addr_t __fdt_pointer __initdata;
 58
 59/*
 60 * Standard memory resources
 61 */
 62static struct resource mem_res[] = {
 63	{
 64		.name = "Kernel code",
 65		.start = 0,
 66		.end = 0,
 67		.flags = IORESOURCE_SYSTEM_RAM
 68	},
 69	{
 70		.name = "Kernel data",
 71		.start = 0,
 72		.end = 0,
 73		.flags = IORESOURCE_SYSTEM_RAM
 74	}
 75};
 76
 77#define kernel_code mem_res[0]
 78#define kernel_data mem_res[1]
 79
 80/*
 81 * The recorded values of x0 .. x3 upon kernel entry.
 82 */
 83u64 __cacheline_aligned boot_args[4];
 84
 85void __init smp_setup_processor_id(void)
 86{
 87	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
 88	set_cpu_logical_map(0, mpidr);
 89
 90	/*
 91	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
 92	 * using percpu variable early, for example, lockdep will
 93	 * access percpu variable inside lock_release
 94	 */
 95	set_my_cpu_offset(0);
 96	pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
 97		(unsigned long)mpidr, read_cpuid_id());
 98}
 99
100bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
101{
102	return phys_id == cpu_logical_map(cpu);
103}
104
105struct mpidr_hash mpidr_hash;
106/**
107 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
108 *			  level in order to build a linear index from an
109 *			  MPIDR value. Resulting algorithm is a collision
110 *			  free hash carried out through shifting and ORing
111 */
112static void __init smp_build_mpidr_hash(void)
113{
114	u32 i, affinity, fs[4], bits[4], ls;
115	u64 mask = 0;
116	/*
117	 * Pre-scan the list of MPIDRS and filter out bits that do
118	 * not contribute to affinity levels, ie they never toggle.
119	 */
120	for_each_possible_cpu(i)
121		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
122	pr_debug("mask of set bits %#llx\n", mask);
123	/*
124	 * Find and stash the last and first bit set at all affinity levels to
125	 * check how many bits are required to represent them.
126	 */
127	for (i = 0; i < 4; i++) {
128		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
129		/*
130		 * Find the MSB bit and LSB bits position
131		 * to determine how many bits are required
132		 * to express the affinity level.
133		 */
134		ls = fls(affinity);
135		fs[i] = affinity ? ffs(affinity) - 1 : 0;
136		bits[i] = ls - fs[i];
137	}
138	/*
139	 * An index can be created from the MPIDR_EL1 by isolating the
140	 * significant bits at each affinity level and by shifting
141	 * them in order to compress the 32 bits values space to a
142	 * compressed set of values. This is equivalent to hashing
143	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
144	 * hash though not minimal since some levels might contain a number
145	 * of CPUs that is not an exact power of 2 and their bit
146	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
147	 */
148	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
149	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
150	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
151						(bits[1] + bits[0]);
152	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
153				  fs[3] - (bits[2] + bits[1] + bits[0]);
154	mpidr_hash.mask = mask;
155	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
156	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
157		mpidr_hash.shift_aff[0],
158		mpidr_hash.shift_aff[1],
159		mpidr_hash.shift_aff[2],
160		mpidr_hash.shift_aff[3],
161		mpidr_hash.mask,
162		mpidr_hash.bits);
163	/*
164	 * 4x is an arbitrary value used to warn on a hash table much bigger
165	 * than expected on most systems.
166	 */
167	if (mpidr_hash_size() > 4 * num_possible_cpus())
168		pr_warn("Large number of MPIDR hash buckets detected\n");
169}
170
171static void __init setup_machine_fdt(phys_addr_t dt_phys)
172{
173	int size;
174	void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
175	const char *name;
176
177	if (dt_virt)
178		memblock_reserve(dt_phys, size);
179
180	if (!dt_virt || !early_init_dt_scan(dt_virt)) {
181		pr_crit("\n"
182			"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
183			"The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
184			"\nPlease check your bootloader.",
185			&dt_phys, dt_virt);
186
187		while (true)
188			cpu_relax();
189	}
190
191	/* Early fixups are done, map the FDT as read-only now */
192	fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
193
194	name = of_flat_dt_get_machine_name();
195	if (!name)
196		return;
197
198	pr_info("Machine model: %s\n", name);
199	dump_stack_set_arch_desc("%s (DT)", name);
200}
201
202static void __init request_standard_resources(void)
203{
204	struct memblock_region *region;
205	struct resource *res;
206	unsigned long i = 0;
207	size_t res_size;
208
209	kernel_code.start   = __pa_symbol(_text);
210	kernel_code.end     = __pa_symbol(__init_begin - 1);
211	kernel_data.start   = __pa_symbol(_sdata);
212	kernel_data.end     = __pa_symbol(_end - 1);
213
214	num_standard_resources = memblock.memory.cnt;
215	res_size = num_standard_resources * sizeof(*standard_resources);
216	standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
217	if (!standard_resources)
218		panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
219
220	for_each_memblock(memory, region) {
221		res = &standard_resources[i++];
222		if (memblock_is_nomap(region)) {
223			res->name  = "reserved";
224			res->flags = IORESOURCE_MEM;
225		} else {
226			res->name  = "System RAM";
227			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
228		}
229		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
230		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
231
232		request_resource(&iomem_resource, res);
233
234		if (kernel_code.start >= res->start &&
235		    kernel_code.end <= res->end)
236			request_resource(res, &kernel_code);
237		if (kernel_data.start >= res->start &&
238		    kernel_data.end <= res->end)
239			request_resource(res, &kernel_data);
240#ifdef CONFIG_KEXEC_CORE
241		/* Userspace will find "Crash kernel" region in /proc/iomem. */
242		if (crashk_res.end && crashk_res.start >= res->start &&
243		    crashk_res.end <= res->end)
244			request_resource(res, &crashk_res);
245#endif
246	}
247}
248
249static int __init reserve_memblock_reserved_regions(void)
250{
251	u64 i, j;
252
253	for (i = 0; i < num_standard_resources; ++i) {
254		struct resource *mem = &standard_resources[i];
255		phys_addr_t r_start, r_end, mem_size = resource_size(mem);
256
257		if (!memblock_is_region_reserved(mem->start, mem_size))
258			continue;
259
260		for_each_reserved_mem_region(j, &r_start, &r_end) {
261			resource_size_t start, end;
262
263			start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
264			end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
265
266			if (start > mem->end || end < mem->start)
267				continue;
268
269			reserve_region_with_split(mem, start, end, "reserved");
270		}
271	}
272
273	return 0;
274}
275arch_initcall(reserve_memblock_reserved_regions);
276
277u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
278
279u64 cpu_logical_map(int cpu)
280{
281	return __cpu_logical_map[cpu];
282}
283
284void __init __no_sanitize_address setup_arch(char **cmdline_p)
285{
286	init_mm.start_code = (unsigned long) _text;
287	init_mm.end_code   = (unsigned long) _etext;
288	init_mm.end_data   = (unsigned long) _edata;
289	init_mm.brk	   = (unsigned long) _end;
290
291	*cmdline_p = boot_command_line;
292
293	/*
294	 * If know now we are going to need KPTI then use non-global
295	 * mappings from the start, avoiding the cost of rewriting
296	 * everything later.
297	 */
298	arm64_use_ng_mappings = kaslr_requires_kpti();
299
300	early_fixmap_init();
301	early_ioremap_init();
302
303	setup_machine_fdt(__fdt_pointer);
304
305	/*
306	 * Initialise the static keys early as they may be enabled by the
307	 * cpufeature code and early parameters.
308	 */
309	jump_label_init();
310	parse_early_param();
311
312	/*
313	 * Unmask asynchronous aborts and fiq after bringing up possible
314	 * earlycon. (Report possible System Errors once we can report this
315	 * occurred).
316	 */
317	local_daif_restore(DAIF_PROCCTX_NOIRQ);
318
319	/*
320	 * TTBR0 is only used for the identity mapping at this stage. Make it
321	 * point to zero page to avoid speculatively fetching new entries.
322	 */
323	cpu_uninstall_idmap();
324
325	xen_early_init();
326	efi_init();
327
328	if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
329	     pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
330
331	arm64_memblock_init();
332
333	paging_init();
334
335	acpi_table_upgrade();
336
337	/* Parse the ACPI tables for possible boot-time configuration */
338	acpi_boot_table_init();
339
340	if (acpi_disabled)
341		unflatten_device_tree();
342
343	bootmem_init();
344
345	kasan_init();
346
347	request_standard_resources();
348
349	early_ioremap_reset();
350
351	if (acpi_disabled)
352		psci_dt_init();
353	else
354		psci_acpi_init();
355
356	init_bootcpu_ops();
357	smp_init_cpus();
358	smp_build_mpidr_hash();
359
360	/* Init percpu seeds for random tags after cpus are set up. */
361	kasan_init_tags();
362
363#ifdef CONFIG_ARM64_SW_TTBR0_PAN
364	/*
365	 * Make sure init_thread_info.ttbr0 always generates translation
366	 * faults in case uaccess_enable() is inadvertently called by the init
367	 * thread.
368	 */
369	init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
370#endif
371
372	if (boot_args[1] || boot_args[2] || boot_args[3]) {
373		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
374			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
375			"This indicates a broken bootloader or old kernel\n",
376			boot_args[1], boot_args[2], boot_args[3]);
377	}
378}
379
380static inline bool cpu_can_disable(unsigned int cpu)
381{
382#ifdef CONFIG_HOTPLUG_CPU
383	const struct cpu_operations *ops = get_cpu_ops(cpu);
384
385	if (ops && ops->cpu_can_disable)
386		return ops->cpu_can_disable(cpu);
387#endif
388	return false;
389}
390
391static int __init topology_init(void)
392{
393	int i;
394
395	for_each_online_node(i)
396		register_one_node(i);
397
398	for_each_possible_cpu(i) {
399		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
400		cpu->hotpluggable = cpu_can_disable(i);
401		register_cpu(cpu, i);
402	}
403
404	return 0;
405}
406subsys_initcall(topology_init);
407
408static void dump_kernel_offset(void)
409{
410	const unsigned long offset = kaslr_offset();
411
412	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
413		pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
414			 offset, KIMAGE_VADDR);
415		pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
416	} else {
417		pr_emerg("Kernel Offset: disabled\n");
418	}
419}
420
421static int arm64_panic_block_dump(struct notifier_block *self,
422				  unsigned long v, void *p)
423{
424	dump_kernel_offset();
425	dump_cpu_features();
426	dump_mem_limit();
427	return 0;
428}
429
430static struct notifier_block arm64_panic_block = {
431	.notifier_call = arm64_panic_block_dump
432};
433
434static int __init register_arm64_panic_block(void)
435{
436	atomic_notifier_chain_register(&panic_notifier_list,
437				       &arm64_panic_block);
438	return 0;
439}
440device_initcall(register_arm64_panic_block);