Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Dynamic reconfiguration memory support
  4 *
  5 * Copyright 2017 IBM Corporation
 
 
 
 
 
  6 */
  7
  8#define pr_fmt(fmt) "drmem: " fmt
  9
 10#include <linux/kernel.h>
 11#include <linux/of.h>
 12#include <linux/of_fdt.h>
 13#include <linux/memblock.h>
 14#include <asm/prom.h>
 15#include <asm/drmem.h>
 16
 17static struct drmem_lmb_info __drmem_info;
 18struct drmem_lmb_info *drmem_info = &__drmem_info;
 19
 20u64 drmem_lmb_memory_max(void)
 21{
 22	struct drmem_lmb *last_lmb;
 23
 24	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
 25	return last_lmb->base_addr + drmem_lmb_size();
 26}
 27
 28static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
 29{
 30	/*
 31	 * Return the value of the lmb flags field minus the reserved
 32	 * bit used internally for hotplug processing.
 33	 */
 34	return lmb->flags & ~DRMEM_LMB_RESERVED;
 35}
 36
 37static struct property *clone_property(struct property *prop, u32 prop_sz)
 38{
 39	struct property *new_prop;
 40
 41	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
 42	if (!new_prop)
 43		return NULL;
 44
 45	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
 46	new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
 47	if (!new_prop->name || !new_prop->value) {
 48		kfree(new_prop->name);
 49		kfree(new_prop->value);
 50		kfree(new_prop);
 51		return NULL;
 52	}
 53
 54	new_prop->length = prop_sz;
 55#if defined(CONFIG_OF_DYNAMIC)
 56	of_property_set_flag(new_prop, OF_DYNAMIC);
 57#endif
 58	return new_prop;
 59}
 60
 61static int drmem_update_dt_v1(struct device_node *memory,
 62			      struct property *prop)
 63{
 64	struct property *new_prop;
 65	struct of_drconf_cell_v1 *dr_cell;
 66	struct drmem_lmb *lmb;
 67	u32 *p;
 68
 69	new_prop = clone_property(prop, prop->length);
 70	if (!new_prop)
 71		return -1;
 72
 73	p = new_prop->value;
 74	*p++ = cpu_to_be32(drmem_info->n_lmbs);
 75
 76	dr_cell = (struct of_drconf_cell_v1 *)p;
 77
 78	for_each_drmem_lmb(lmb) {
 79		dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
 80		dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
 81		dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
 82		dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
 83
 84		dr_cell++;
 85	}
 86
 87	of_update_property(memory, new_prop);
 88	return 0;
 89}
 90
 91static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
 92				struct drmem_lmb *lmb)
 93{
 94	dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
 95	dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
 96	dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
 97	dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
 98}
 99
100static int drmem_update_dt_v2(struct device_node *memory,
101			      struct property *prop)
102{
103	struct property *new_prop;
104	struct of_drconf_cell_v2 *dr_cell;
105	struct drmem_lmb *lmb, *prev_lmb;
106	u32 lmb_sets, prop_sz, seq_lmbs;
107	u32 *p;
108
109	/* First pass, determine how many LMB sets are needed. */
110	lmb_sets = 0;
111	prev_lmb = NULL;
112	for_each_drmem_lmb(lmb) {
113		if (!prev_lmb) {
114			prev_lmb = lmb;
115			lmb_sets++;
116			continue;
117		}
118
119		if (prev_lmb->aa_index != lmb->aa_index ||
120		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
121			lmb_sets++;
122
123		prev_lmb = lmb;
124	}
125
126	prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
127	new_prop = clone_property(prop, prop_sz);
128	if (!new_prop)
129		return -1;
130
131	p = new_prop->value;
132	*p++ = cpu_to_be32(lmb_sets);
133
134	dr_cell = (struct of_drconf_cell_v2 *)p;
135
136	/* Second pass, populate the LMB set data */
137	prev_lmb = NULL;
138	seq_lmbs = 0;
139	for_each_drmem_lmb(lmb) {
140		if (prev_lmb == NULL) {
141			/* Start of first LMB set */
142			prev_lmb = lmb;
143			init_drconf_v2_cell(dr_cell, lmb);
144			seq_lmbs++;
145			continue;
146		}
147
148		if (prev_lmb->aa_index != lmb->aa_index ||
149		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
150			/* end of one set, start of another */
151			dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
152			dr_cell++;
153
154			init_drconf_v2_cell(dr_cell, lmb);
155			seq_lmbs = 1;
156		} else {
157			seq_lmbs++;
158		}
159
160		prev_lmb = lmb;
161	}
162
163	/* close out last LMB set */
164	dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
165	of_update_property(memory, new_prop);
166	return 0;
167}
168
169int drmem_update_dt(void)
170{
171	struct device_node *memory;
172	struct property *prop;
173	int rc = -1;
174
175	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
176	if (!memory)
177		return -1;
178
179	prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
180	if (prop) {
181		rc = drmem_update_dt_v1(memory, prop);
182	} else {
183		prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
184		if (prop)
185			rc = drmem_update_dt_v2(memory, prop);
186	}
187
188	of_node_put(memory);
189	return rc;
190}
191
192static void __init read_drconf_v1_cell(struct drmem_lmb *lmb,
193				       const __be32 **prop)
194{
195	const __be32 *p = *prop;
196
197	lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
198	lmb->drc_index = of_read_number(p++, 1);
199
200	p++; /* skip reserved field */
201
202	lmb->aa_index = of_read_number(p++, 1);
203	lmb->flags = of_read_number(p++, 1);
204
205	*prop = p;
206}
207
208static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
209			void (*func)(struct drmem_lmb *, const __be32 **))
210{
211	struct drmem_lmb lmb;
212	u32 i, n_lmbs;
213
214	n_lmbs = of_read_number(prop++, 1);
215	if (n_lmbs == 0)
216		return;
217
218	for (i = 0; i < n_lmbs; i++) {
219		read_drconf_v1_cell(&lmb, &prop);
220		func(&lmb, &usm);
221	}
222}
223
224static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
225				       const __be32 **prop)
226{
227	const __be32 *p = *prop;
228
229	dr_cell->seq_lmbs = of_read_number(p++, 1);
230	dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
231	dr_cell->drc_index = of_read_number(p++, 1);
232	dr_cell->aa_index = of_read_number(p++, 1);
233	dr_cell->flags = of_read_number(p++, 1);
234
235	*prop = p;
236}
237
238static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
239			void (*func)(struct drmem_lmb *, const __be32 **))
240{
241	struct of_drconf_cell_v2 dr_cell;
242	struct drmem_lmb lmb;
243	u32 i, j, lmb_sets;
244
245	lmb_sets = of_read_number(prop++, 1);
246	if (lmb_sets == 0)
247		return;
248
249	for (i = 0; i < lmb_sets; i++) {
250		read_drconf_v2_cell(&dr_cell, &prop);
251
252		for (j = 0; j < dr_cell.seq_lmbs; j++) {
253			lmb.base_addr = dr_cell.base_addr;
254			dr_cell.base_addr += drmem_lmb_size();
255
256			lmb.drc_index = dr_cell.drc_index;
257			dr_cell.drc_index++;
258
259			lmb.aa_index = dr_cell.aa_index;
260			lmb.flags = dr_cell.flags;
261
262			func(&lmb, &usm);
263		}
264	}
265}
266
267#ifdef CONFIG_PPC_PSERIES
268void __init walk_drmem_lmbs_early(unsigned long node,
269			void (*func)(struct drmem_lmb *, const __be32 **))
270{
271	const __be32 *prop, *usm;
272	int len;
273
274	prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
275	if (!prop || len < dt_root_size_cells * sizeof(__be32))
276		return;
277
278	drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
279
280	usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
281
282	prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
283	if (prop) {
284		__walk_drmem_v1_lmbs(prop, usm, func);
285	} else {
286		prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
287					   &len);
288		if (prop)
289			__walk_drmem_v2_lmbs(prop, usm, func);
290	}
291
292	memblock_dump_all();
293}
294
295#endif
296
297static int __init init_drmem_lmb_size(struct device_node *dn)
298{
299	const __be32 *prop;
300	int len;
301
302	if (drmem_info->lmb_size)
303		return 0;
304
305	prop = of_get_property(dn, "ibm,lmb-size", &len);
306	if (!prop || len < dt_root_size_cells * sizeof(__be32)) {
307		pr_info("Could not determine LMB size\n");
308		return -1;
309	}
310
311	drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
312	return 0;
313}
314
315/*
316 * Returns the property linux,drconf-usable-memory if
317 * it exists (the property exists only in kexec/kdump kernels,
318 * added by kexec-tools)
319 */
320static const __be32 *of_get_usable_memory(struct device_node *dn)
321{
322	const __be32 *prop;
323	u32 len;
324
325	prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
326	if (!prop || len < sizeof(unsigned int))
327		return NULL;
328
329	return prop;
330}
331
332void __init walk_drmem_lmbs(struct device_node *dn,
333			    void (*func)(struct drmem_lmb *, const __be32 **))
334{
335	const __be32 *prop, *usm;
336
337	if (init_drmem_lmb_size(dn))
338		return;
339
340	usm = of_get_usable_memory(dn);
341
342	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
343	if (prop) {
344		__walk_drmem_v1_lmbs(prop, usm, func);
345	} else {
346		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
347		if (prop)
348			__walk_drmem_v2_lmbs(prop, usm, func);
349	}
350}
351
352static void __init init_drmem_v1_lmbs(const __be32 *prop)
353{
354	struct drmem_lmb *lmb;
355
356	drmem_info->n_lmbs = of_read_number(prop++, 1);
357	if (drmem_info->n_lmbs == 0)
358		return;
359
360	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
361				   GFP_KERNEL);
362	if (!drmem_info->lmbs)
363		return;
364
365	for_each_drmem_lmb(lmb) {
366		read_drconf_v1_cell(lmb, &prop);
367		lmb_set_nid(lmb);
368	}
369}
370
371static void __init init_drmem_v2_lmbs(const __be32 *prop)
372{
373	struct drmem_lmb *lmb;
374	struct of_drconf_cell_v2 dr_cell;
375	const __be32 *p;
376	u32 i, j, lmb_sets;
377	int lmb_index;
378
379	lmb_sets = of_read_number(prop++, 1);
380	if (lmb_sets == 0)
381		return;
382
383	/* first pass, calculate the number of LMBs */
384	p = prop;
385	for (i = 0; i < lmb_sets; i++) {
386		read_drconf_v2_cell(&dr_cell, &p);
387		drmem_info->n_lmbs += dr_cell.seq_lmbs;
388	}
389
390	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
391				   GFP_KERNEL);
392	if (!drmem_info->lmbs)
393		return;
394
395	/* second pass, read in the LMB information */
396	lmb_index = 0;
397	p = prop;
398
399	for (i = 0; i < lmb_sets; i++) {
400		read_drconf_v2_cell(&dr_cell, &p);
401
402		for (j = 0; j < dr_cell.seq_lmbs; j++) {
403			lmb = &drmem_info->lmbs[lmb_index++];
404
405			lmb->base_addr = dr_cell.base_addr;
406			dr_cell.base_addr += drmem_info->lmb_size;
407
408			lmb->drc_index = dr_cell.drc_index;
409			dr_cell.drc_index++;
410
411			lmb->aa_index = dr_cell.aa_index;
412			lmb->flags = dr_cell.flags;
413
414			lmb_set_nid(lmb);
415		}
416	}
417}
418
419static int __init drmem_init(void)
420{
421	struct device_node *dn;
422	const __be32 *prop;
423
424	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
425	if (!dn) {
426		pr_info("No dynamic reconfiguration memory found\n");
427		return 0;
428	}
429
430	if (init_drmem_lmb_size(dn)) {
431		of_node_put(dn);
432		return 0;
433	}
434
435	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
436	if (prop) {
437		init_drmem_v1_lmbs(prop);
438	} else {
439		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
440		if (prop)
441			init_drmem_v2_lmbs(prop);
442	}
443
444	of_node_put(dn);
445	return 0;
446}
447late_initcall(drmem_init);
v4.17
 
  1/*
  2 * Dynamic reconfiguration memory support
  3 *
  4 * Copyright 2017 IBM Corporation
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 11
 12#define pr_fmt(fmt) "drmem: " fmt
 13
 14#include <linux/kernel.h>
 15#include <linux/of.h>
 16#include <linux/of_fdt.h>
 17#include <linux/memblock.h>
 18#include <asm/prom.h>
 19#include <asm/drmem.h>
 20
 21static struct drmem_lmb_info __drmem_info;
 22struct drmem_lmb_info *drmem_info = &__drmem_info;
 23
 24u64 drmem_lmb_memory_max(void)
 25{
 26	struct drmem_lmb *last_lmb;
 27
 28	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
 29	return last_lmb->base_addr + drmem_lmb_size();
 30}
 31
 32static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
 33{
 34	/*
 35	 * Return the value of the lmb flags field minus the reserved
 36	 * bit used internally for hotplug processing.
 37	 */
 38	return lmb->flags & ~DRMEM_LMB_RESERVED;
 39}
 40
 41static struct property *clone_property(struct property *prop, u32 prop_sz)
 42{
 43	struct property *new_prop;
 44
 45	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
 46	if (!new_prop)
 47		return NULL;
 48
 49	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
 50	new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
 51	if (!new_prop->name || !new_prop->value) {
 52		kfree(new_prop->name);
 53		kfree(new_prop->value);
 54		kfree(new_prop);
 55		return NULL;
 56	}
 57
 58	new_prop->length = prop_sz;
 59#if defined(CONFIG_OF_DYNAMIC)
 60	of_property_set_flag(new_prop, OF_DYNAMIC);
 61#endif
 62	return new_prop;
 63}
 64
 65static int drmem_update_dt_v1(struct device_node *memory,
 66			      struct property *prop)
 67{
 68	struct property *new_prop;
 69	struct of_drconf_cell_v1 *dr_cell;
 70	struct drmem_lmb *lmb;
 71	u32 *p;
 72
 73	new_prop = clone_property(prop, prop->length);
 74	if (!new_prop)
 75		return -1;
 76
 77	p = new_prop->value;
 78	*p++ = cpu_to_be32(drmem_info->n_lmbs);
 79
 80	dr_cell = (struct of_drconf_cell_v1 *)p;
 81
 82	for_each_drmem_lmb(lmb) {
 83		dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
 84		dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
 85		dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
 86		dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
 87
 88		dr_cell++;
 89	}
 90
 91	of_update_property(memory, new_prop);
 92	return 0;
 93}
 94
 95static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
 96				struct drmem_lmb *lmb)
 97{
 98	dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
 99	dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
100	dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
101	dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
102}
103
104static int drmem_update_dt_v2(struct device_node *memory,
105			      struct property *prop)
106{
107	struct property *new_prop;
108	struct of_drconf_cell_v2 *dr_cell;
109	struct drmem_lmb *lmb, *prev_lmb;
110	u32 lmb_sets, prop_sz, seq_lmbs;
111	u32 *p;
112
113	/* First pass, determine how many LMB sets are needed. */
114	lmb_sets = 0;
115	prev_lmb = NULL;
116	for_each_drmem_lmb(lmb) {
117		if (!prev_lmb) {
118			prev_lmb = lmb;
119			lmb_sets++;
120			continue;
121		}
122
123		if (prev_lmb->aa_index != lmb->aa_index ||
124		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
125			lmb_sets++;
126
127		prev_lmb = lmb;
128	}
129
130	prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
131	new_prop = clone_property(prop, prop_sz);
132	if (!new_prop)
133		return -1;
134
135	p = new_prop->value;
136	*p++ = cpu_to_be32(lmb_sets);
137
138	dr_cell = (struct of_drconf_cell_v2 *)p;
139
140	/* Second pass, populate the LMB set data */
141	prev_lmb = NULL;
142	seq_lmbs = 0;
143	for_each_drmem_lmb(lmb) {
144		if (prev_lmb == NULL) {
145			/* Start of first LMB set */
146			prev_lmb = lmb;
147			init_drconf_v2_cell(dr_cell, lmb);
148			seq_lmbs++;
149			continue;
150		}
151
152		if (prev_lmb->aa_index != lmb->aa_index ||
153		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
154			/* end of one set, start of another */
155			dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
156			dr_cell++;
157
158			init_drconf_v2_cell(dr_cell, lmb);
159			seq_lmbs = 1;
160		} else {
161			seq_lmbs++;
162		}
163
164		prev_lmb = lmb;
165	}
166
167	/* close out last LMB set */
168	dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
169	of_update_property(memory, new_prop);
170	return 0;
171}
172
173int drmem_update_dt(void)
174{
175	struct device_node *memory;
176	struct property *prop;
177	int rc = -1;
178
179	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
180	if (!memory)
181		return -1;
182
183	prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
184	if (prop) {
185		rc = drmem_update_dt_v1(memory, prop);
186	} else {
187		prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
188		if (prop)
189			rc = drmem_update_dt_v2(memory, prop);
190	}
191
192	of_node_put(memory);
193	return rc;
194}
195
196static void __init read_drconf_v1_cell(struct drmem_lmb *lmb,
197				       const __be32 **prop)
198{
199	const __be32 *p = *prop;
200
201	lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
202	lmb->drc_index = of_read_number(p++, 1);
203
204	p++; /* skip reserved field */
205
206	lmb->aa_index = of_read_number(p++, 1);
207	lmb->flags = of_read_number(p++, 1);
208
209	*prop = p;
210}
211
212static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
213			void (*func)(struct drmem_lmb *, const __be32 **))
214{
215	struct drmem_lmb lmb;
216	u32 i, n_lmbs;
217
218	n_lmbs = of_read_number(prop++, 1);
219	if (n_lmbs == 0)
220		return;
221
222	for (i = 0; i < n_lmbs; i++) {
223		read_drconf_v1_cell(&lmb, &prop);
224		func(&lmb, &usm);
225	}
226}
227
228static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
229				       const __be32 **prop)
230{
231	const __be32 *p = *prop;
232
233	dr_cell->seq_lmbs = of_read_number(p++, 1);
234	dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
235	dr_cell->drc_index = of_read_number(p++, 1);
236	dr_cell->aa_index = of_read_number(p++, 1);
237	dr_cell->flags = of_read_number(p++, 1);
238
239	*prop = p;
240}
241
242static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
243			void (*func)(struct drmem_lmb *, const __be32 **))
244{
245	struct of_drconf_cell_v2 dr_cell;
246	struct drmem_lmb lmb;
247	u32 i, j, lmb_sets;
248
249	lmb_sets = of_read_number(prop++, 1);
250	if (lmb_sets == 0)
251		return;
252
253	for (i = 0; i < lmb_sets; i++) {
254		read_drconf_v2_cell(&dr_cell, &prop);
255
256		for (j = 0; j < dr_cell.seq_lmbs; j++) {
257			lmb.base_addr = dr_cell.base_addr;
258			dr_cell.base_addr += drmem_lmb_size();
259
260			lmb.drc_index = dr_cell.drc_index;
261			dr_cell.drc_index++;
262
263			lmb.aa_index = dr_cell.aa_index;
264			lmb.flags = dr_cell.flags;
265
266			func(&lmb, &usm);
267		}
268	}
269}
270
271#ifdef CONFIG_PPC_PSERIES
272void __init walk_drmem_lmbs_early(unsigned long node,
273			void (*func)(struct drmem_lmb *, const __be32 **))
274{
275	const __be32 *prop, *usm;
276	int len;
277
278	prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
279	if (!prop || len < dt_root_size_cells * sizeof(__be32))
280		return;
281
282	drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
283
284	usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
285
286	prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
287	if (prop) {
288		__walk_drmem_v1_lmbs(prop, usm, func);
289	} else {
290		prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
291					   &len);
292		if (prop)
293			__walk_drmem_v2_lmbs(prop, usm, func);
294	}
295
296	memblock_dump_all();
297}
298
299#endif
300
301static int __init init_drmem_lmb_size(struct device_node *dn)
302{
303	const __be32 *prop;
304	int len;
305
306	if (drmem_info->lmb_size)
307		return 0;
308
309	prop = of_get_property(dn, "ibm,lmb-size", &len);
310	if (!prop || len < dt_root_size_cells * sizeof(__be32)) {
311		pr_info("Could not determine LMB size\n");
312		return -1;
313	}
314
315	drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
316	return 0;
317}
318
319/*
320 * Returns the property linux,drconf-usable-memory if
321 * it exists (the property exists only in kexec/kdump kernels,
322 * added by kexec-tools)
323 */
324static const __be32 *of_get_usable_memory(struct device_node *dn)
325{
326	const __be32 *prop;
327	u32 len;
328
329	prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
330	if (!prop || len < sizeof(unsigned int))
331		return NULL;
332
333	return prop;
334}
335
336void __init walk_drmem_lmbs(struct device_node *dn,
337			    void (*func)(struct drmem_lmb *, const __be32 **))
338{
339	const __be32 *prop, *usm;
340
341	if (init_drmem_lmb_size(dn))
342		return;
343
344	usm = of_get_usable_memory(dn);
345
346	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
347	if (prop) {
348		__walk_drmem_v1_lmbs(prop, usm, func);
349	} else {
350		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
351		if (prop)
352			__walk_drmem_v2_lmbs(prop, usm, func);
353	}
354}
355
356static void __init init_drmem_v1_lmbs(const __be32 *prop)
357{
358	struct drmem_lmb *lmb;
359
360	drmem_info->n_lmbs = of_read_number(prop++, 1);
361	if (drmem_info->n_lmbs == 0)
362		return;
363
364	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
365				   GFP_KERNEL);
366	if (!drmem_info->lmbs)
367		return;
368
369	for_each_drmem_lmb(lmb)
370		read_drconf_v1_cell(lmb, &prop);
 
 
371}
372
373static void __init init_drmem_v2_lmbs(const __be32 *prop)
374{
375	struct drmem_lmb *lmb;
376	struct of_drconf_cell_v2 dr_cell;
377	const __be32 *p;
378	u32 i, j, lmb_sets;
379	int lmb_index;
380
381	lmb_sets = of_read_number(prop++, 1);
382	if (lmb_sets == 0)
383		return;
384
385	/* first pass, calculate the number of LMBs */
386	p = prop;
387	for (i = 0; i < lmb_sets; i++) {
388		read_drconf_v2_cell(&dr_cell, &p);
389		drmem_info->n_lmbs += dr_cell.seq_lmbs;
390	}
391
392	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
393				   GFP_KERNEL);
394	if (!drmem_info->lmbs)
395		return;
396
397	/* second pass, read in the LMB information */
398	lmb_index = 0;
399	p = prop;
400
401	for (i = 0; i < lmb_sets; i++) {
402		read_drconf_v2_cell(&dr_cell, &p);
403
404		for (j = 0; j < dr_cell.seq_lmbs; j++) {
405			lmb = &drmem_info->lmbs[lmb_index++];
406
407			lmb->base_addr = dr_cell.base_addr;
408			dr_cell.base_addr += drmem_info->lmb_size;
409
410			lmb->drc_index = dr_cell.drc_index;
411			dr_cell.drc_index++;
412
413			lmb->aa_index = dr_cell.aa_index;
414			lmb->flags = dr_cell.flags;
 
 
415		}
416	}
417}
418
419static int __init drmem_init(void)
420{
421	struct device_node *dn;
422	const __be32 *prop;
423
424	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
425	if (!dn) {
426		pr_info("No dynamic reconfiguration memory found\n");
427		return 0;
428	}
429
430	if (init_drmem_lmb_size(dn)) {
431		of_node_put(dn);
432		return 0;
433	}
434
435	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
436	if (prop) {
437		init_drmem_v1_lmbs(prop);
438	} else {
439		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
440		if (prop)
441			init_drmem_v2_lmbs(prop);
442	}
443
444	of_node_put(dn);
445	return 0;
446}
447late_initcall(drmem_init);