Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/devtree.c
4 *
5 * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
6 */
7
8#include <linux/init.h>
9#include <linux/export.h>
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/memblock.h>
13#include <linux/of.h>
14#include <linux/of_fdt.h>
15#include <linux/of_irq.h>
16#include <linux/smp.h>
17
18#include <asm/cputype.h>
19#include <asm/setup.h>
20#include <asm/page.h>
21#include <asm/prom.h>
22#include <asm/smp_plat.h>
23#include <asm/mach/arch.h>
24#include <asm/mach-types.h>
25
26
27#ifdef CONFIG_SMP
28extern struct of_cpu_method __cpu_method_of_table[];
29
30static const struct of_cpu_method __cpu_method_of_table_sentinel
31 __used __section("__cpu_method_of_table_end");
32
33
34static int __init set_smp_ops_by_method(struct device_node *node)
35{
36 const char *method;
37 struct of_cpu_method *m = __cpu_method_of_table;
38
39 if (of_property_read_string(node, "enable-method", &method))
40 return 0;
41
42 for (; m->method; m++)
43 if (!strcmp(m->method, method)) {
44 smp_set_ops(m->ops);
45 return 1;
46 }
47
48 return 0;
49}
50#else
51static inline int set_smp_ops_by_method(struct device_node *node)
52{
53 return 1;
54}
55#endif
56
57
58/*
59 * arm_dt_init_cpu_maps - Function retrieves cpu nodes from the device tree
60 * and builds the cpu logical map array containing MPIDR values related to
61 * logical cpus
62 *
63 * Updates the cpu possible mask with the number of parsed cpu nodes
64 */
65void __init arm_dt_init_cpu_maps(void)
66{
67 /*
68 * Temp logical map is initialized with UINT_MAX values that are
69 * considered invalid logical map entries since the logical map must
70 * contain a list of MPIDR[23:0] values where MPIDR[31:24] must
71 * read as 0.
72 */
73 struct device_node *cpu, *cpus;
74 int found_method = 0;
75 u32 i, j, cpuidx = 1;
76 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
77
78 u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
79 bool bootcpu_valid = false;
80 cpus = of_find_node_by_path("/cpus");
81
82 if (!cpus)
83 return;
84
85 for_each_of_cpu_node(cpu) {
86 u32 hwid = of_get_cpu_hwid(cpu, 0);
87
88 pr_debug(" * %pOF...\n", cpu);
89
90 /*
91 * Bits n:24 must be set to 0 in the DT since the reg property
92 * defines the MPIDR[23:0].
93 */
94 if (hwid & ~MPIDR_HWID_BITMASK) {
95 of_node_put(cpu);
96 return;
97 }
98
99 /*
100 * Duplicate MPIDRs are a recipe for disaster.
101 * Scan all initialized entries and check for
102 * duplicates. If any is found just bail out.
103 * temp values were initialized to UINT_MAX
104 * to avoid matching valid MPIDR[23:0] values.
105 */
106 for (j = 0; j < cpuidx; j++)
107 if (WARN(tmp_map[j] == hwid,
108 "Duplicate /cpu reg properties in the DT\n")) {
109 of_node_put(cpu);
110 return;
111 }
112
113 /*
114 * Build a stashed array of MPIDR values. Numbering scheme
115 * requires that if detected the boot CPU must be assigned
116 * logical id 0. Other CPUs get sequential indexes starting
117 * from 1. If a CPU node with a reg property matching the
118 * boot CPU MPIDR is detected, this is recorded so that the
119 * logical map built from DT is validated and can be used
120 * to override the map created in smp_setup_processor_id().
121 */
122 if (hwid == mpidr) {
123 i = 0;
124 bootcpu_valid = true;
125 } else {
126 i = cpuidx++;
127 }
128
129 if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than "
130 "max cores %u, capping them\n",
131 cpuidx, nr_cpu_ids)) {
132 cpuidx = nr_cpu_ids;
133 of_node_put(cpu);
134 break;
135 }
136
137 tmp_map[i] = hwid;
138
139 if (!found_method)
140 found_method = set_smp_ops_by_method(cpu);
141 }
142
143 /*
144 * Fallback to an enable-method in the cpus node if nothing found in
145 * a cpu node.
146 */
147 if (!found_method)
148 set_smp_ops_by_method(cpus);
149
150 if (!bootcpu_valid) {
151 pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
152 return;
153 }
154
155 /*
156 * Since the boot CPU node contains proper data, and all nodes have
157 * a reg property, the DT CPU list can be considered valid and the
158 * logical map created in smp_setup_processor_id() can be overridden
159 */
160 for (i = 0; i < cpuidx; i++) {
161 set_cpu_possible(i, true);
162 cpu_logical_map(i) = tmp_map[i];
163 pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i));
164 }
165}
166
167bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
168{
169 return phys_id == cpu_logical_map(cpu);
170}
171
172static const void * __init arch_get_next_mach(const char *const **match)
173{
174 static const struct machine_desc *mdesc = __arch_info_begin;
175 const struct machine_desc *m = mdesc;
176
177 if (m >= __arch_info_end)
178 return NULL;
179
180 mdesc++;
181 *match = m->dt_compat;
182 return m;
183}
184
185/**
186 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
187 * @dt_virt: virtual address of dt blob
188 *
189 * If a dtb was passed to the kernel in r2, then use it to choose the
190 * correct machine_desc and to setup the system.
191 */
192const struct machine_desc * __init setup_machine_fdt(void *dt_virt)
193{
194 const struct machine_desc *mdesc, *mdesc_best = NULL;
195
196 DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
197 .l2c_aux_val = 0x0,
198 .l2c_aux_mask = ~0x0,
199 MACHINE_END
200
201 mdesc_best = &__mach_desc_GENERIC_DT;
202
203 if (!dt_virt || !early_init_dt_verify(dt_virt, __pa(dt_virt)))
204 return NULL;
205
206 mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
207
208 if (!mdesc) {
209 const char *prop;
210 int size;
211 unsigned long dt_root;
212
213 early_print("\nError: unrecognized/unsupported "
214 "device tree compatible list:\n[ ");
215
216 dt_root = of_get_flat_dt_root();
217 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
218 while (size > 0) {
219 early_print("'%s' ", prop);
220 size -= strlen(prop) + 1;
221 prop += strlen(prop) + 1;
222 }
223 early_print("]\n\n");
224
225 dump_machine_table(); /* does not return */
226 }
227
228 /* We really don't want to do this, but sometimes firmware provides buggy data */
229 if (mdesc->dt_fixup)
230 mdesc->dt_fixup();
231
232 early_init_dt_scan_nodes();
233
234 /* Change machine number to match the mdesc we're using */
235 __machine_arch_type = mdesc->nr;
236
237 return mdesc;
238}
1/*
2 * linux/arch/arm/kernel/devtree.c
3 *
4 * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/export.h>
13#include <linux/errno.h>
14#include <linux/types.h>
15#include <linux/bootmem.h>
16#include <linux/memblock.h>
17#include <linux/of.h>
18#include <linux/of_fdt.h>
19#include <linux/of_irq.h>
20#include <linux/of_platform.h>
21#include <linux/smp.h>
22
23#include <asm/cputype.h>
24#include <asm/setup.h>
25#include <asm/page.h>
26#include <asm/smp_plat.h>
27#include <asm/mach/arch.h>
28#include <asm/mach-types.h>
29
30void __init early_init_dt_add_memory_arch(u64 base, u64 size)
31{
32 arm_add_memory(base, size);
33}
34
35void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
36{
37 return memblock_virt_alloc(size, align);
38}
39
40void __init arm_dt_memblock_reserve(void)
41{
42 u64 *reserve_map, base, size;
43
44 if (!initial_boot_params)
45 return;
46
47 /* Reserve the dtb region */
48 memblock_reserve(virt_to_phys(initial_boot_params),
49 be32_to_cpu(initial_boot_params->totalsize));
50
51 /*
52 * Process the reserve map. This will probably overlap the initrd
53 * and dtb locations which are already reserved, but overlaping
54 * doesn't hurt anything
55 */
56 reserve_map = ((void*)initial_boot_params) +
57 be32_to_cpu(initial_boot_params->off_mem_rsvmap);
58 while (1) {
59 base = be64_to_cpup(reserve_map++);
60 size = be64_to_cpup(reserve_map++);
61 if (!size)
62 break;
63 memblock_reserve(base, size);
64 }
65}
66
67#ifdef CONFIG_SMP
68extern struct of_cpu_method __cpu_method_of_table_begin[];
69extern struct of_cpu_method __cpu_method_of_table_end[];
70
71static int __init set_smp_ops_by_method(struct device_node *node)
72{
73 const char *method;
74 struct of_cpu_method *m = __cpu_method_of_table_begin;
75
76 if (of_property_read_string(node, "enable-method", &method))
77 return 0;
78
79 for (; m < __cpu_method_of_table_end; m++)
80 if (!strcmp(m->method, method)) {
81 smp_set_ops(m->ops);
82 return 1;
83 }
84
85 return 0;
86}
87#else
88static inline int set_smp_ops_by_method(struct device_node *node)
89{
90 return 1;
91}
92#endif
93
94
95/*
96 * arm_dt_init_cpu_maps - Function retrieves cpu nodes from the device tree
97 * and builds the cpu logical map array containing MPIDR values related to
98 * logical cpus
99 *
100 * Updates the cpu possible mask with the number of parsed cpu nodes
101 */
102void __init arm_dt_init_cpu_maps(void)
103{
104 /*
105 * Temp logical map is initialized with UINT_MAX values that are
106 * considered invalid logical map entries since the logical map must
107 * contain a list of MPIDR[23:0] values where MPIDR[31:24] must
108 * read as 0.
109 */
110 struct device_node *cpu, *cpus;
111 int found_method = 0;
112 u32 i, j, cpuidx = 1;
113 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
114
115 u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
116 bool bootcpu_valid = false;
117 cpus = of_find_node_by_path("/cpus");
118
119 if (!cpus)
120 return;
121
122 for_each_child_of_node(cpus, cpu) {
123 u32 hwid;
124
125 if (of_node_cmp(cpu->type, "cpu"))
126 continue;
127
128 pr_debug(" * %s...\n", cpu->full_name);
129 /*
130 * A device tree containing CPU nodes with missing "reg"
131 * properties is considered invalid to build the
132 * cpu_logical_map.
133 */
134 if (of_property_read_u32(cpu, "reg", &hwid)) {
135 pr_debug(" * %s missing reg property\n",
136 cpu->full_name);
137 return;
138 }
139
140 /*
141 * 8 MSBs must be set to 0 in the DT since the reg property
142 * defines the MPIDR[23:0].
143 */
144 if (hwid & ~MPIDR_HWID_BITMASK)
145 return;
146
147 /*
148 * Duplicate MPIDRs are a recipe for disaster.
149 * Scan all initialized entries and check for
150 * duplicates. If any is found just bail out.
151 * temp values were initialized to UINT_MAX
152 * to avoid matching valid MPIDR[23:0] values.
153 */
154 for (j = 0; j < cpuidx; j++)
155 if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg "
156 "properties in the DT\n"))
157 return;
158
159 /*
160 * Build a stashed array of MPIDR values. Numbering scheme
161 * requires that if detected the boot CPU must be assigned
162 * logical id 0. Other CPUs get sequential indexes starting
163 * from 1. If a CPU node with a reg property matching the
164 * boot CPU MPIDR is detected, this is recorded so that the
165 * logical map built from DT is validated and can be used
166 * to override the map created in smp_setup_processor_id().
167 */
168 if (hwid == mpidr) {
169 i = 0;
170 bootcpu_valid = true;
171 } else {
172 i = cpuidx++;
173 }
174
175 if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than "
176 "max cores %u, capping them\n",
177 cpuidx, nr_cpu_ids)) {
178 cpuidx = nr_cpu_ids;
179 break;
180 }
181
182 tmp_map[i] = hwid;
183
184 if (!found_method)
185 found_method = set_smp_ops_by_method(cpu);
186 }
187
188 /*
189 * Fallback to an enable-method in the cpus node if nothing found in
190 * a cpu node.
191 */
192 if (!found_method)
193 set_smp_ops_by_method(cpus);
194
195 if (!bootcpu_valid) {
196 pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
197 return;
198 }
199
200 /*
201 * Since the boot CPU node contains proper data, and all nodes have
202 * a reg property, the DT CPU list can be considered valid and the
203 * logical map created in smp_setup_processor_id() can be overridden
204 */
205 for (i = 0; i < cpuidx; i++) {
206 set_cpu_possible(i, true);
207 cpu_logical_map(i) = tmp_map[i];
208 pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i));
209 }
210}
211
212bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
213{
214 return phys_id == cpu_logical_map(cpu);
215}
216
217static const void * __init arch_get_next_mach(const char *const **match)
218{
219 static const struct machine_desc *mdesc = __arch_info_begin;
220 const struct machine_desc *m = mdesc;
221
222 if (m >= __arch_info_end)
223 return NULL;
224
225 mdesc++;
226 *match = m->dt_compat;
227 return m;
228}
229
230/**
231 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
232 * @dt_phys: physical address of dt blob
233 *
234 * If a dtb was passed to the kernel in r2, then use it to choose the
235 * correct machine_desc and to setup the system.
236 */
237const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
238{
239 const struct machine_desc *mdesc, *mdesc_best = NULL;
240
241#ifdef CONFIG_ARCH_MULTIPLATFORM
242 DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
243 MACHINE_END
244
245 mdesc_best = &__mach_desc_GENERIC_DT;
246#endif
247
248 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys)))
249 return NULL;
250
251 mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
252
253 if (!mdesc) {
254 const char *prop;
255 long size;
256 unsigned long dt_root;
257
258 early_print("\nError: unrecognized/unsupported "
259 "device tree compatible list:\n[ ");
260
261 dt_root = of_get_flat_dt_root();
262 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
263 while (size > 0) {
264 early_print("'%s' ", prop);
265 size -= strlen(prop) + 1;
266 prop += strlen(prop) + 1;
267 }
268 early_print("]\n\n");
269
270 dump_machine_table(); /* does not return */
271 }
272
273 /* Change machine number to match the mdesc we're using */
274 __machine_arch_type = mdesc->nr;
275
276 return mdesc;
277}