Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Dynamic reconfiguration memory support
  4 *
  5 * Copyright 2017 IBM Corporation
  6 */
  7
  8#define pr_fmt(fmt) "drmem: " fmt
  9
 10#include <linux/kernel.h>
 11#include <linux/of.h>
 12#include <linux/of_fdt.h>
 13#include <linux/memblock.h>
 14#include <asm/prom.h>
 15#include <asm/drmem.h>
 16
 
 
 17static struct drmem_lmb_info __drmem_info;
 18struct drmem_lmb_info *drmem_info = &__drmem_info;
 19
 20u64 drmem_lmb_memory_max(void)
 21{
 22	struct drmem_lmb *last_lmb;
 23
 24	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
 25	return last_lmb->base_addr + drmem_lmb_size();
 26}
 27
 28static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
 29{
 30	/*
 31	 * Return the value of the lmb flags field minus the reserved
 32	 * bit used internally for hotplug processing.
 33	 */
 34	return lmb->flags & ~DRMEM_LMB_RESERVED;
 35}
 36
 37static struct property *clone_property(struct property *prop, u32 prop_sz)
 38{
 39	struct property *new_prop;
 40
 41	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
 42	if (!new_prop)
 43		return NULL;
 44
 45	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
 46	new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
 47	if (!new_prop->name || !new_prop->value) {
 48		kfree(new_prop->name);
 49		kfree(new_prop->value);
 50		kfree(new_prop);
 51		return NULL;
 52	}
 53
 54	new_prop->length = prop_sz;
 55#if defined(CONFIG_OF_DYNAMIC)
 56	of_property_set_flag(new_prop, OF_DYNAMIC);
 57#endif
 58	return new_prop;
 59}
 60
 61static int drmem_update_dt_v1(struct device_node *memory,
 62			      struct property *prop)
 63{
 64	struct property *new_prop;
 65	struct of_drconf_cell_v1 *dr_cell;
 66	struct drmem_lmb *lmb;
 67	u32 *p;
 68
 69	new_prop = clone_property(prop, prop->length);
 70	if (!new_prop)
 71		return -1;
 72
 73	p = new_prop->value;
 74	*p++ = cpu_to_be32(drmem_info->n_lmbs);
 75
 76	dr_cell = (struct of_drconf_cell_v1 *)p;
 77
 78	for_each_drmem_lmb(lmb) {
 79		dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
 80		dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
 81		dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
 82		dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
 83
 84		dr_cell++;
 85	}
 86
 87	of_update_property(memory, new_prop);
 88	return 0;
 89}
 90
 91static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
 92				struct drmem_lmb *lmb)
 93{
 94	dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
 95	dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
 96	dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
 97	dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
 98}
 99
100static int drmem_update_dt_v2(struct device_node *memory,
101			      struct property *prop)
102{
103	struct property *new_prop;
104	struct of_drconf_cell_v2 *dr_cell;
105	struct drmem_lmb *lmb, *prev_lmb;
106	u32 lmb_sets, prop_sz, seq_lmbs;
107	u32 *p;
108
109	/* First pass, determine how many LMB sets are needed. */
110	lmb_sets = 0;
111	prev_lmb = NULL;
112	for_each_drmem_lmb(lmb) {
113		if (!prev_lmb) {
114			prev_lmb = lmb;
115			lmb_sets++;
116			continue;
117		}
118
119		if (prev_lmb->aa_index != lmb->aa_index ||
120		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
121			lmb_sets++;
122
123		prev_lmb = lmb;
124	}
125
126	prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
127	new_prop = clone_property(prop, prop_sz);
128	if (!new_prop)
129		return -1;
130
131	p = new_prop->value;
132	*p++ = cpu_to_be32(lmb_sets);
133
134	dr_cell = (struct of_drconf_cell_v2 *)p;
135
136	/* Second pass, populate the LMB set data */
137	prev_lmb = NULL;
138	seq_lmbs = 0;
139	for_each_drmem_lmb(lmb) {
140		if (prev_lmb == NULL) {
141			/* Start of first LMB set */
142			prev_lmb = lmb;
143			init_drconf_v2_cell(dr_cell, lmb);
144			seq_lmbs++;
145			continue;
146		}
147
148		if (prev_lmb->aa_index != lmb->aa_index ||
149		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
150			/* end of one set, start of another */
151			dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
152			dr_cell++;
153
154			init_drconf_v2_cell(dr_cell, lmb);
155			seq_lmbs = 1;
156		} else {
157			seq_lmbs++;
158		}
159
160		prev_lmb = lmb;
161	}
162
163	/* close out last LMB set */
164	dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
165	of_update_property(memory, new_prop);
166	return 0;
167}
168
169int drmem_update_dt(void)
170{
171	struct device_node *memory;
172	struct property *prop;
173	int rc = -1;
174
175	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
176	if (!memory)
177		return -1;
178
179	prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
180	if (prop) {
181		rc = drmem_update_dt_v1(memory, prop);
182	} else {
183		prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
184		if (prop)
185			rc = drmem_update_dt_v2(memory, prop);
186	}
187
188	of_node_put(memory);
189	return rc;
190}
191
192static void __init read_drconf_v1_cell(struct drmem_lmb *lmb,
193				       const __be32 **prop)
194{
195	const __be32 *p = *prop;
196
197	lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
 
198	lmb->drc_index = of_read_number(p++, 1);
199
200	p++; /* skip reserved field */
201
202	lmb->aa_index = of_read_number(p++, 1);
203	lmb->flags = of_read_number(p++, 1);
204
205	*prop = p;
206}
207
208static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
209			void (*func)(struct drmem_lmb *, const __be32 **))
 
210{
211	struct drmem_lmb lmb;
212	u32 i, n_lmbs;
 
213
214	n_lmbs = of_read_number(prop++, 1);
215	if (n_lmbs == 0)
216		return;
217
218	for (i = 0; i < n_lmbs; i++) {
219		read_drconf_v1_cell(&lmb, &prop);
220		func(&lmb, &usm);
 
 
221	}
 
 
222}
223
224static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
225				       const __be32 **prop)
226{
227	const __be32 *p = *prop;
228
229	dr_cell->seq_lmbs = of_read_number(p++, 1);
230	dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
 
231	dr_cell->drc_index = of_read_number(p++, 1);
232	dr_cell->aa_index = of_read_number(p++, 1);
233	dr_cell->flags = of_read_number(p++, 1);
234
235	*prop = p;
236}
237
238static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
239			void (*func)(struct drmem_lmb *, const __be32 **))
 
240{
241	struct of_drconf_cell_v2 dr_cell;
242	struct drmem_lmb lmb;
243	u32 i, j, lmb_sets;
 
244
245	lmb_sets = of_read_number(prop++, 1);
246	if (lmb_sets == 0)
247		return;
248
249	for (i = 0; i < lmb_sets; i++) {
250		read_drconf_v2_cell(&dr_cell, &prop);
251
252		for (j = 0; j < dr_cell.seq_lmbs; j++) {
253			lmb.base_addr = dr_cell.base_addr;
254			dr_cell.base_addr += drmem_lmb_size();
255
256			lmb.drc_index = dr_cell.drc_index;
257			dr_cell.drc_index++;
258
259			lmb.aa_index = dr_cell.aa_index;
260			lmb.flags = dr_cell.flags;
261
262			func(&lmb, &usm);
 
 
263		}
264	}
 
 
265}
266
267#ifdef CONFIG_PPC_PSERIES
268void __init walk_drmem_lmbs_early(unsigned long node,
269			void (*func)(struct drmem_lmb *, const __be32 **))
270{
271	const __be32 *prop, *usm;
272	int len;
273
274	prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
275	if (!prop || len < dt_root_size_cells * sizeof(__be32))
276		return;
 
 
 
 
277
278	drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
279
280	usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
281
282	prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
283	if (prop) {
284		__walk_drmem_v1_lmbs(prop, usm, func);
285	} else {
286		prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
287					   &len);
288		if (prop)
289			__walk_drmem_v2_lmbs(prop, usm, func);
290	}
291
292	memblock_dump_all();
 
293}
294
295#endif
296
297static int __init init_drmem_lmb_size(struct device_node *dn)
298{
299	const __be32 *prop;
300	int len;
301
302	if (drmem_info->lmb_size)
303		return 0;
304
305	prop = of_get_property(dn, "ibm,lmb-size", &len);
306	if (!prop || len < dt_root_size_cells * sizeof(__be32)) {
307		pr_info("Could not determine LMB size\n");
308		return -1;
309	}
310
311	drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
312	return 0;
313}
314
315/*
316 * Returns the property linux,drconf-usable-memory if
317 * it exists (the property exists only in kexec/kdump kernels,
318 * added by kexec-tools)
319 */
320static const __be32 *of_get_usable_memory(struct device_node *dn)
321{
322	const __be32 *prop;
323	u32 len;
324
325	prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
326	if (!prop || len < sizeof(unsigned int))
327		return NULL;
328
329	return prop;
330}
331
332void __init walk_drmem_lmbs(struct device_node *dn,
333			    void (*func)(struct drmem_lmb *, const __be32 **))
334{
335	const __be32 *prop, *usm;
 
 
 
 
 
 
 
 
 
 
336
337	if (init_drmem_lmb_size(dn))
338		return;
339
340	usm = of_get_usable_memory(dn);
341
342	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
343	if (prop) {
344		__walk_drmem_v1_lmbs(prop, usm, func);
345	} else {
346		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
347		if (prop)
348			__walk_drmem_v2_lmbs(prop, usm, func);
349	}
 
 
350}
351
352static void __init init_drmem_v1_lmbs(const __be32 *prop)
353{
354	struct drmem_lmb *lmb;
355
356	drmem_info->n_lmbs = of_read_number(prop++, 1);
357	if (drmem_info->n_lmbs == 0)
358		return;
359
360	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
361				   GFP_KERNEL);
362	if (!drmem_info->lmbs)
363		return;
364
365	for_each_drmem_lmb(lmb) {
366		read_drconf_v1_cell(lmb, &prop);
367		lmb_set_nid(lmb);
368	}
369}
370
371static void __init init_drmem_v2_lmbs(const __be32 *prop)
372{
373	struct drmem_lmb *lmb;
374	struct of_drconf_cell_v2 dr_cell;
375	const __be32 *p;
376	u32 i, j, lmb_sets;
377	int lmb_index;
378
379	lmb_sets = of_read_number(prop++, 1);
380	if (lmb_sets == 0)
381		return;
382
383	/* first pass, calculate the number of LMBs */
384	p = prop;
385	for (i = 0; i < lmb_sets; i++) {
386		read_drconf_v2_cell(&dr_cell, &p);
387		drmem_info->n_lmbs += dr_cell.seq_lmbs;
388	}
389
390	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
391				   GFP_KERNEL);
392	if (!drmem_info->lmbs)
393		return;
394
395	/* second pass, read in the LMB information */
396	lmb_index = 0;
397	p = prop;
398
399	for (i = 0; i < lmb_sets; i++) {
400		read_drconf_v2_cell(&dr_cell, &p);
401
402		for (j = 0; j < dr_cell.seq_lmbs; j++) {
403			lmb = &drmem_info->lmbs[lmb_index++];
404
405			lmb->base_addr = dr_cell.base_addr;
406			dr_cell.base_addr += drmem_info->lmb_size;
407
408			lmb->drc_index = dr_cell.drc_index;
409			dr_cell.drc_index++;
410
411			lmb->aa_index = dr_cell.aa_index;
412			lmb->flags = dr_cell.flags;
413
414			lmb_set_nid(lmb);
415		}
416	}
417}
418
419static int __init drmem_init(void)
420{
421	struct device_node *dn;
422	const __be32 *prop;
423
424	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
425	if (!dn) {
426		pr_info("No dynamic reconfiguration memory found\n");
427		return 0;
428	}
429
430	if (init_drmem_lmb_size(dn)) {
431		of_node_put(dn);
432		return 0;
433	}
434
435	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
436	if (prop) {
437		init_drmem_v1_lmbs(prop);
438	} else {
439		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
440		if (prop)
441			init_drmem_v2_lmbs(prop);
442	}
443
444	of_node_put(dn);
445	return 0;
446}
447late_initcall(drmem_init);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Dynamic reconfiguration memory support
  4 *
  5 * Copyright 2017 IBM Corporation
  6 */
  7
  8#define pr_fmt(fmt) "drmem: " fmt
  9
 10#include <linux/kernel.h>
 11#include <linux/of.h>
 12#include <linux/of_fdt.h>
 13#include <linux/memblock.h>
 14#include <asm/prom.h>
 15#include <asm/drmem.h>
 16
 17static int n_root_addr_cells, n_root_size_cells;
 18
 19static struct drmem_lmb_info __drmem_info;
 20struct drmem_lmb_info *drmem_info = &__drmem_info;
 21
 22u64 drmem_lmb_memory_max(void)
 23{
 24	struct drmem_lmb *last_lmb;
 25
 26	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
 27	return last_lmb->base_addr + drmem_lmb_size();
 28}
 29
 30static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
 31{
 32	/*
 33	 * Return the value of the lmb flags field minus the reserved
 34	 * bit used internally for hotplug processing.
 35	 */
 36	return lmb->flags & ~DRMEM_LMB_RESERVED;
 37}
 38
 39static struct property *clone_property(struct property *prop, u32 prop_sz)
 40{
 41	struct property *new_prop;
 42
 43	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
 44	if (!new_prop)
 45		return NULL;
 46
 47	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
 48	new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
 49	if (!new_prop->name || !new_prop->value) {
 50		kfree(new_prop->name);
 51		kfree(new_prop->value);
 52		kfree(new_prop);
 53		return NULL;
 54	}
 55
 56	new_prop->length = prop_sz;
 57#if defined(CONFIG_OF_DYNAMIC)
 58	of_property_set_flag(new_prop, OF_DYNAMIC);
 59#endif
 60	return new_prop;
 61}
 62
 63static int drmem_update_dt_v1(struct device_node *memory,
 64			      struct property *prop)
 65{
 66	struct property *new_prop;
 67	struct of_drconf_cell_v1 *dr_cell;
 68	struct drmem_lmb *lmb;
 69	u32 *p;
 70
 71	new_prop = clone_property(prop, prop->length);
 72	if (!new_prop)
 73		return -1;
 74
 75	p = new_prop->value;
 76	*p++ = cpu_to_be32(drmem_info->n_lmbs);
 77
 78	dr_cell = (struct of_drconf_cell_v1 *)p;
 79
 80	for_each_drmem_lmb(lmb) {
 81		dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
 82		dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
 83		dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
 84		dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
 85
 86		dr_cell++;
 87	}
 88
 89	of_update_property(memory, new_prop);
 90	return 0;
 91}
 92
 93static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
 94				struct drmem_lmb *lmb)
 95{
 96	dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
 97	dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
 98	dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
 99	dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
100}
101
102static int drmem_update_dt_v2(struct device_node *memory,
103			      struct property *prop)
104{
105	struct property *new_prop;
106	struct of_drconf_cell_v2 *dr_cell;
107	struct drmem_lmb *lmb, *prev_lmb;
108	u32 lmb_sets, prop_sz, seq_lmbs;
109	u32 *p;
110
111	/* First pass, determine how many LMB sets are needed. */
112	lmb_sets = 0;
113	prev_lmb = NULL;
114	for_each_drmem_lmb(lmb) {
115		if (!prev_lmb) {
116			prev_lmb = lmb;
117			lmb_sets++;
118			continue;
119		}
120
121		if (prev_lmb->aa_index != lmb->aa_index ||
122		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
123			lmb_sets++;
124
125		prev_lmb = lmb;
126	}
127
128	prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
129	new_prop = clone_property(prop, prop_sz);
130	if (!new_prop)
131		return -1;
132
133	p = new_prop->value;
134	*p++ = cpu_to_be32(lmb_sets);
135
136	dr_cell = (struct of_drconf_cell_v2 *)p;
137
138	/* Second pass, populate the LMB set data */
139	prev_lmb = NULL;
140	seq_lmbs = 0;
141	for_each_drmem_lmb(lmb) {
142		if (prev_lmb == NULL) {
143			/* Start of first LMB set */
144			prev_lmb = lmb;
145			init_drconf_v2_cell(dr_cell, lmb);
146			seq_lmbs++;
147			continue;
148		}
149
150		if (prev_lmb->aa_index != lmb->aa_index ||
151		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
152			/* end of one set, start of another */
153			dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
154			dr_cell++;
155
156			init_drconf_v2_cell(dr_cell, lmb);
157			seq_lmbs = 1;
158		} else {
159			seq_lmbs++;
160		}
161
162		prev_lmb = lmb;
163	}
164
165	/* close out last LMB set */
166	dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
167	of_update_property(memory, new_prop);
168	return 0;
169}
170
171int drmem_update_dt(void)
172{
173	struct device_node *memory;
174	struct property *prop;
175	int rc = -1;
176
177	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
178	if (!memory)
179		return -1;
180
181	prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
182	if (prop) {
183		rc = drmem_update_dt_v1(memory, prop);
184	} else {
185		prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
186		if (prop)
187			rc = drmem_update_dt_v2(memory, prop);
188	}
189
190	of_node_put(memory);
191	return rc;
192}
193
194static void read_drconf_v1_cell(struct drmem_lmb *lmb,
195				       const __be32 **prop)
196{
197	const __be32 *p = *prop;
198
199	lmb->base_addr = of_read_number(p, n_root_addr_cells);
200	p += n_root_addr_cells;
201	lmb->drc_index = of_read_number(p++, 1);
202
203	p++; /* skip reserved field */
204
205	lmb->aa_index = of_read_number(p++, 1);
206	lmb->flags = of_read_number(p++, 1);
207
208	*prop = p;
209}
210
211static int
212__walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data,
213		     int (*func)(struct drmem_lmb *, const __be32 **, void *))
214{
215	struct drmem_lmb lmb;
216	u32 i, n_lmbs;
217	int ret = 0;
218
219	n_lmbs = of_read_number(prop++, 1);
 
 
 
220	for (i = 0; i < n_lmbs; i++) {
221		read_drconf_v1_cell(&lmb, &prop);
222		ret = func(&lmb, &usm, data);
223		if (ret)
224			break;
225	}
226
227	return ret;
228}
229
230static void read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
231				       const __be32 **prop)
232{
233	const __be32 *p = *prop;
234
235	dr_cell->seq_lmbs = of_read_number(p++, 1);
236	dr_cell->base_addr = of_read_number(p, n_root_addr_cells);
237	p += n_root_addr_cells;
238	dr_cell->drc_index = of_read_number(p++, 1);
239	dr_cell->aa_index = of_read_number(p++, 1);
240	dr_cell->flags = of_read_number(p++, 1);
241
242	*prop = p;
243}
244
245static int
246__walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data,
247		     int (*func)(struct drmem_lmb *, const __be32 **, void *))
248{
249	struct of_drconf_cell_v2 dr_cell;
250	struct drmem_lmb lmb;
251	u32 i, j, lmb_sets;
252	int ret = 0;
253
254	lmb_sets = of_read_number(prop++, 1);
 
 
 
255	for (i = 0; i < lmb_sets; i++) {
256		read_drconf_v2_cell(&dr_cell, &prop);
257
258		for (j = 0; j < dr_cell.seq_lmbs; j++) {
259			lmb.base_addr = dr_cell.base_addr;
260			dr_cell.base_addr += drmem_lmb_size();
261
262			lmb.drc_index = dr_cell.drc_index;
263			dr_cell.drc_index++;
264
265			lmb.aa_index = dr_cell.aa_index;
266			lmb.flags = dr_cell.flags;
267
268			ret = func(&lmb, &usm, data);
269			if (ret)
270				break;
271		}
272	}
273
274	return ret;
275}
276
277#ifdef CONFIG_PPC_PSERIES
278int __init walk_drmem_lmbs_early(unsigned long node, void *data,
279		int (*func)(struct drmem_lmb *, const __be32 **, void *))
280{
281	const __be32 *prop, *usm;
282	int len, ret = -ENODEV;
283
284	prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
285	if (!prop || len < dt_root_size_cells * sizeof(__be32))
286		return ret;
287
288	/* Get the address & size cells */
289	n_root_addr_cells = dt_root_addr_cells;
290	n_root_size_cells = dt_root_size_cells;
291
292	drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
293
294	usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
295
296	prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
297	if (prop) {
298		ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
299	} else {
300		prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
301					   &len);
302		if (prop)
303			ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
304	}
305
306	memblock_dump_all();
307	return ret;
308}
309
310#endif
311
312static int init_drmem_lmb_size(struct device_node *dn)
313{
314	const __be32 *prop;
315	int len;
316
317	if (drmem_info->lmb_size)
318		return 0;
319
320	prop = of_get_property(dn, "ibm,lmb-size", &len);
321	if (!prop || len < n_root_size_cells * sizeof(__be32)) {
322		pr_info("Could not determine LMB size\n");
323		return -1;
324	}
325
326	drmem_info->lmb_size = of_read_number(prop, n_root_size_cells);
327	return 0;
328}
329
330/*
331 * Returns the property linux,drconf-usable-memory if
332 * it exists (the property exists only in kexec/kdump kernels,
333 * added by kexec-tools)
334 */
335static const __be32 *of_get_usable_memory(struct device_node *dn)
336{
337	const __be32 *prop;
338	u32 len;
339
340	prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
341	if (!prop || len < sizeof(unsigned int))
342		return NULL;
343
344	return prop;
345}
346
347int walk_drmem_lmbs(struct device_node *dn, void *data,
348		    int (*func)(struct drmem_lmb *, const __be32 **, void *))
349{
350	const __be32 *prop, *usm;
351	int ret = -ENODEV;
352
353	if (!of_root)
354		return ret;
355
356	/* Get the address & size cells */
357	of_node_get(of_root);
358	n_root_addr_cells = of_n_addr_cells(of_root);
359	n_root_size_cells = of_n_size_cells(of_root);
360	of_node_put(of_root);
361
362	if (init_drmem_lmb_size(dn))
363		return ret;
364
365	usm = of_get_usable_memory(dn);
366
367	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
368	if (prop) {
369		ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
370	} else {
371		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
372		if (prop)
373			ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
374	}
375
376	return ret;
377}
378
379static void __init init_drmem_v1_lmbs(const __be32 *prop)
380{
381	struct drmem_lmb *lmb;
382
383	drmem_info->n_lmbs = of_read_number(prop++, 1);
384	if (drmem_info->n_lmbs == 0)
385		return;
386
387	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
388				   GFP_KERNEL);
389	if (!drmem_info->lmbs)
390		return;
391
392	for_each_drmem_lmb(lmb) {
393		read_drconf_v1_cell(lmb, &prop);
394		lmb_set_nid(lmb);
395	}
396}
397
398static void __init init_drmem_v2_lmbs(const __be32 *prop)
399{
400	struct drmem_lmb *lmb;
401	struct of_drconf_cell_v2 dr_cell;
402	const __be32 *p;
403	u32 i, j, lmb_sets;
404	int lmb_index;
405
406	lmb_sets = of_read_number(prop++, 1);
407	if (lmb_sets == 0)
408		return;
409
410	/* first pass, calculate the number of LMBs */
411	p = prop;
412	for (i = 0; i < lmb_sets; i++) {
413		read_drconf_v2_cell(&dr_cell, &p);
414		drmem_info->n_lmbs += dr_cell.seq_lmbs;
415	}
416
417	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
418				   GFP_KERNEL);
419	if (!drmem_info->lmbs)
420		return;
421
422	/* second pass, read in the LMB information */
423	lmb_index = 0;
424	p = prop;
425
426	for (i = 0; i < lmb_sets; i++) {
427		read_drconf_v2_cell(&dr_cell, &p);
428
429		for (j = 0; j < dr_cell.seq_lmbs; j++) {
430			lmb = &drmem_info->lmbs[lmb_index++];
431
432			lmb->base_addr = dr_cell.base_addr;
433			dr_cell.base_addr += drmem_info->lmb_size;
434
435			lmb->drc_index = dr_cell.drc_index;
436			dr_cell.drc_index++;
437
438			lmb->aa_index = dr_cell.aa_index;
439			lmb->flags = dr_cell.flags;
440
441			lmb_set_nid(lmb);
442		}
443	}
444}
445
446static int __init drmem_init(void)
447{
448	struct device_node *dn;
449	const __be32 *prop;
450
451	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
452	if (!dn) {
453		pr_info("No dynamic reconfiguration memory found\n");
454		return 0;
455	}
456
457	if (init_drmem_lmb_size(dn)) {
458		of_node_put(dn);
459		return 0;
460	}
461
462	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
463	if (prop) {
464		init_drmem_v1_lmbs(prop);
465	} else {
466		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
467		if (prop)
468			init_drmem_v2_lmbs(prop);
469	}
470
471	of_node_put(dn);
472	return 0;
473}
474late_initcall(drmem_init);