Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/processor.h>
  3#include <linux/errno.h>
  4#include <linux/init.h>
  5#include <asm/physmem_info.h>
  6#include <asm/stacktrace.h>
  7#include <asm/boot_data.h>
  8#include <asm/sparsemem.h>
  9#include <asm/sections.h>
 10#include <asm/setup.h>
 11#include <asm/sclp.h>
 12#include <asm/asm.h>
 13#include <asm/uv.h>
 14#include "decompressor.h"
 15#include "boot.h"
 16
 17struct physmem_info __bootdata(physmem_info);
 18static unsigned int physmem_alloc_ranges;
 19static unsigned long physmem_alloc_pos;
 20
 21/* up to 256 storage elements, 1020 subincrements each */
 22#define ENTRIES_EXTENDED_MAX						       \
 23	(256 * (1020 / 2) * sizeof(struct physmem_range))
 24
 25static struct physmem_range *__get_physmem_range_ptr(u32 n)
 26{
 27	if (n < MEM_INLINED_ENTRIES)
 28		return &physmem_info.online[n];
 29	if (unlikely(!physmem_info.online_extended)) {
 30		physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
 31			RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
 32			physmem_alloc_pos, true);
 33	}
 34	return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
 35}
 36
 37/*
 38 * sequential calls to add_physmem_online_range with adjacent memory ranges
 39 * are merged together into single memory range.
 40 */
 41void add_physmem_online_range(u64 start, u64 end)
 42{
 43	struct physmem_range *range;
 44
 45	if (physmem_info.range_count) {
 46		range = __get_physmem_range_ptr(physmem_info.range_count - 1);
 47		if (range->end == start) {
 48			range->end = end;
 49			return;
 50		}
 51	}
 52
 53	range = __get_physmem_range_ptr(physmem_info.range_count);
 54	range->start = start;
 55	range->end = end;
 56	physmem_info.range_count++;
 57}
 58
 59static int __diag260(unsigned long rx1, unsigned long rx2)
 60{
 61	unsigned long reg1, reg2, ry;
 62	union register_pair rx;
 63	int cc, exception;
 64	psw_t old;
 65
 66	rx.even = rx1;
 67	rx.odd	= rx2;
 68	ry = 0x10; /* storage configuration */
 69	exception = 1;
 70	asm volatile(
 71		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
 72		"	epsw	%[reg1],%[reg2]\n"
 73		"	st	%[reg1],0(%[psw_pgm])\n"
 74		"	st	%[reg2],4(%[psw_pgm])\n"
 75		"	larl	%[reg1],1f\n"
 76		"	stg	%[reg1],8(%[psw_pgm])\n"
 77		"	diag	%[rx],%[ry],0x260\n"
 78		"	lhi	%[exc],0\n"
 79		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
 80		CC_IPM(cc)
 81		: CC_OUT(cc, cc),
 82		  [exc] "+d" (exception),
 83		  [reg1] "=&d" (reg1),
 84		  [reg2] "=&a" (reg2),
 85		  [ry] "+&d" (ry),
 86		  "+Q" (get_lowcore()->program_new_psw),
 87		  "=Q" (old)
 88		: [rx] "d" (rx.pair),
 89		  [psw_old] "a" (&old),
 90		  [psw_pgm] "a" (&get_lowcore()->program_new_psw)
 91		: CC_CLOBBER_LIST("memory"));
 92	cc = exception ? -1 : CC_TRANSFORM(cc);
 93	return cc == 0 ? ry : -1;
 94}
 95
 96static int diag260(void)
 97{
 98	int rc, i;
 99
100	struct {
101		unsigned long start;
102		unsigned long end;
103	} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
104
105	memset(storage_extents, 0, sizeof(storage_extents));
106	rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
107	if (rc == -1)
108		return -1;
109
110	for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
111		add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
112	return 0;
113}
114
115#define DIAG500_SC_STOR_LIMIT 4
116
117static int diag500_storage_limit(unsigned long *max_physmem_end)
118{
119	unsigned long storage_limit;
120	unsigned long reg1, reg2;
121	psw_t old;
122
123	asm volatile(
124		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
125		"	epsw	%[reg1],%[reg2]\n"
126		"	st	%[reg1],0(%[psw_pgm])\n"
127		"	st	%[reg2],4(%[psw_pgm])\n"
128		"	larl	%[reg1],1f\n"
129		"	stg	%[reg1],8(%[psw_pgm])\n"
130		"	lghi	1,%[subcode]\n"
131		"	lghi	2,0\n"
132		"	diag	2,4,0x500\n"
133		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
134		"	lgr	%[slimit],2\n"
135		: [reg1] "=&d" (reg1),
136		  [reg2] "=&a" (reg2),
137		  [slimit] "=d" (storage_limit),
138		  "=Q" (get_lowcore()->program_new_psw),
139		  "=Q" (old)
140		: [psw_old] "a" (&old),
141		  [psw_pgm] "a" (&get_lowcore()->program_new_psw),
142		  [subcode] "i" (DIAG500_SC_STOR_LIMIT)
143		: "memory", "1", "2");
144	if (!storage_limit)
145		return -EINVAL;
146	/* Convert inclusive end to exclusive end */
147	*max_physmem_end = storage_limit + 1;
148	return 0;
149}
150
151static int tprot(unsigned long addr)
152{
153	unsigned long reg1, reg2;
154	int cc, exception;
155	psw_t old;
156
157	exception = 1;
158	asm volatile(
159		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
160		"	epsw	%[reg1],%[reg2]\n"
161		"	st	%[reg1],0(%[psw_pgm])\n"
162		"	st	%[reg2],4(%[psw_pgm])\n"
163		"	larl	%[reg1],1f\n"
164		"	stg	%[reg1],8(%[psw_pgm])\n"
165		"	tprot	0(%[addr]),0\n"
166		"	lhi	%[exc],0\n"
167		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
168		CC_IPM(cc)
169		: CC_OUT(cc, cc),
170		  [exc] "+d" (exception),
171		  [reg1] "=&d" (reg1),
172		  [reg2] "=&a" (reg2),
173		  "=Q" (get_lowcore()->program_new_psw.addr),
174		  "=Q" (old)
175		: [psw_old] "a" (&old),
176		  [psw_pgm] "a" (&get_lowcore()->program_new_psw),
177		  [addr] "a" (addr)
178		: CC_CLOBBER_LIST("memory"));
179	cc = exception ? -EFAULT : CC_TRANSFORM(cc);
180	return cc;
181}
182
183static unsigned long search_mem_end(void)
184{
185	unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
186	unsigned long offset = 0;
187	unsigned long pivot;
188
189	while (range > 1) {
190		range >>= 1;
191		pivot = offset + range;
192		if (!tprot(pivot << 20))
193			offset = pivot;
194	}
195	return (offset + 1) << 20;
196}
197
198unsigned long detect_max_physmem_end(void)
199{
200	unsigned long max_physmem_end = 0;
201
202	if (!diag500_storage_limit(&max_physmem_end)) {
203		physmem_info.info_source = MEM_DETECT_DIAG500_STOR_LIMIT;
204	} else if (!sclp_early_get_memsize(&max_physmem_end)) {
205		physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
206	} else {
207		max_physmem_end = search_mem_end();
208		physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
209	}
210	return max_physmem_end;
211}
212
213void detect_physmem_online_ranges(unsigned long max_physmem_end)
214{
215	if (!sclp_early_read_storage_info()) {
216		physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
217	} else if (physmem_info.info_source == MEM_DETECT_DIAG500_STOR_LIMIT) {
218		unsigned long online_end;
219
220		if (!sclp_early_get_memsize(&online_end)) {
221			physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
222			add_physmem_online_range(0, online_end);
223		}
224	} else if (!diag260()) {
225		physmem_info.info_source = MEM_DETECT_DIAG260;
226	} else if (max_physmem_end) {
227		add_physmem_online_range(0, max_physmem_end);
228	}
229}
230
231void physmem_set_usable_limit(unsigned long limit)
232{
233	physmem_info.usable = limit;
234	physmem_alloc_pos = limit;
235}
236
237static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
238{
239	unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
240	struct reserved_range *range;
241	enum reserved_range_type t;
242	int i;
243
244	boot_printk("Linux version %s\n", kernel_version);
245	if (!is_prot_virt_guest() && early_command_line[0])
246		boot_printk("Kernel command line: %s\n", early_command_line);
247	boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
248		    size, align, min, max);
249	boot_printk("Reserved memory ranges:\n");
250	for_each_physmem_reserved_range(t, range, &start, &end) {
251		boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
252		total_reserved_mem += end - start;
253	}
254	boot_printk("Usable online memory ranges (info source: %s [%x]):\n",
255		    get_physmem_info_source(), physmem_info.info_source);
256	for_each_physmem_usable_range(i, &start, &end) {
257		boot_printk("%016lx %016lx\n", start, end);
258		total_mem += end - start;
259	}
260	boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
261		    total_mem, total_reserved_mem,
262		    total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
263	print_stacktrace(current_frame_address());
264	boot_printk("\n\n -- System halted\n");
265	disabled_wait();
266}
267
268void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
269{
270	physmem_info.reserved[type].start = addr;
271	physmem_info.reserved[type].end = addr + size;
272}
273
274void physmem_free(enum reserved_range_type type)
275{
276	physmem_info.reserved[type].start = 0;
277	physmem_info.reserved[type].end = 0;
278}
279
280static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
281				       unsigned long *intersection_start)
282{
283	unsigned long res_addr, res_size;
284	int t;
285
286	for (t = 0; t < RR_MAX; t++) {
287		if (!get_physmem_reserved(t, &res_addr, &res_size))
288			continue;
289		if (intersects(addr, size, res_addr, res_size)) {
290			*intersection_start = res_addr;
291			return true;
292		}
293	}
294	return ipl_report_certs_intersects(addr, size, intersection_start);
295}
296
297static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
298					   unsigned long min, unsigned long max,
299					   unsigned int from_ranges, unsigned int *ranges_left,
300					   bool die_on_oom)
301{
302	unsigned int nranges = from_ranges ?: physmem_info.range_count;
303	unsigned long range_start, range_end;
304	unsigned long intersection_start;
305	unsigned long addr, pos = max;
306
307	align = max(align, 8UL);
308	while (nranges) {
309		__get_physmem_range(nranges - 1, &range_start, &range_end, false);
310		pos = min(range_end, pos);
311
312		if (round_up(min, align) + size > pos)
313			break;
314		addr = round_down(pos - size, align);
315		if (range_start > addr) {
316			nranges--;
317			continue;
318		}
319		if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
320			pos = intersection_start;
321			continue;
322		}
323
324		if (ranges_left)
325			*ranges_left = nranges;
326		return addr;
327	}
328	if (die_on_oom)
329		die_oom(size, align, min, max);
330	return 0;
331}
332
333unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
334				  unsigned long align, unsigned long min, unsigned long max,
335				  bool die_on_oom)
336{
337	unsigned long addr;
338
339	max = min(max, physmem_alloc_pos);
340	addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
341	if (addr)
342		physmem_reserve(type, addr, size);
343	return addr;
344}
345
346unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
347				     unsigned long align)
348{
349	struct reserved_range *range = &physmem_info.reserved[type];
350	struct reserved_range *new_range;
351	unsigned int ranges_left;
352	unsigned long addr;
353
354	addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
355				     &ranges_left, true);
356	/* if not a consecutive allocation of the same type or first allocation */
357	if (range->start != addr + size) {
358		if (range->end) {
359			physmem_alloc_pos = __physmem_alloc_range(
360				sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
361				physmem_alloc_ranges, &ranges_left, true);
362			new_range = (struct reserved_range *)physmem_alloc_pos;
363			*new_range = *range;
364			range->chain = new_range;
365			addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
366						     ranges_left, &ranges_left, true);
367		}
368		range->end = addr + size;
369	}
370	range->start = addr;
371	physmem_alloc_pos = addr;
372	physmem_alloc_ranges = ranges_left;
373	return addr;
374}
375
376unsigned long get_physmem_alloc_pos(void)
377{
378	return physmem_alloc_pos;
379}