Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/errno.h>
3#include <linux/init.h>
4#include <asm/setup.h>
5#include <asm/processor.h>
6#include <asm/sclp.h>
7#include <asm/sections.h>
8#include <asm/mem_detect.h>
9#include <asm/sparsemem.h>
10#include "decompressor.h"
11#include "boot.h"
12
13struct mem_detect_info __bootdata(mem_detect);
14
15/* up to 256 storage elements, 1020 subincrements each */
16#define ENTRIES_EXTENDED_MAX \
17 (256 * (1020 / 2) * sizeof(struct mem_detect_block))
18
19/*
20 * To avoid corrupting old kernel memory during dump, find lowest memory
21 * chunk possible either right after the kernel end (decompressed kernel) or
22 * after initrd (if it is present and there is no hole between the kernel end
23 * and initrd)
24 */
25static void *mem_detect_alloc_extended(void)
26{
27 unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
28
29 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
30 initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
31 offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
32
33 return (void *)offset;
34}
35
36static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
37{
38 if (n < MEM_INLINED_ENTRIES)
39 return &mem_detect.entries[n];
40 if (unlikely(!mem_detect.entries_extended))
41 mem_detect.entries_extended = mem_detect_alloc_extended();
42 return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
43}
44
45/*
46 * sequential calls to add_mem_detect_block with adjacent memory areas
47 * are merged together into single memory block.
48 */
49void add_mem_detect_block(u64 start, u64 end)
50{
51 struct mem_detect_block *block;
52
53 if (mem_detect.count) {
54 block = __get_mem_detect_block_ptr(mem_detect.count - 1);
55 if (block->end == start) {
56 block->end = end;
57 return;
58 }
59 }
60
61 block = __get_mem_detect_block_ptr(mem_detect.count);
62 block->start = start;
63 block->end = end;
64 mem_detect.count++;
65}
66
67static int __diag260(unsigned long rx1, unsigned long rx2)
68{
69 unsigned long reg1, reg2, ry;
70 union register_pair rx;
71 psw_t old;
72 int rc;
73
74 rx.even = rx1;
75 rx.odd = rx2;
76 ry = 0x10; /* storage configuration */
77 rc = -1; /* fail */
78 asm volatile(
79 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
80 " epsw %[reg1],%[reg2]\n"
81 " st %[reg1],0(%[psw_pgm])\n"
82 " st %[reg2],4(%[psw_pgm])\n"
83 " larl %[reg1],1f\n"
84 " stg %[reg1],8(%[psw_pgm])\n"
85 " diag %[rx],%[ry],0x260\n"
86 " ipm %[rc]\n"
87 " srl %[rc],28\n"
88 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
89 : [reg1] "=&d" (reg1),
90 [reg2] "=&a" (reg2),
91 [rc] "+&d" (rc),
92 [ry] "+&d" (ry),
93 "+Q" (S390_lowcore.program_new_psw),
94 "=Q" (old)
95 : [rx] "d" (rx.pair),
96 [psw_old] "a" (&old),
97 [psw_pgm] "a" (&S390_lowcore.program_new_psw)
98 : "cc", "memory");
99 return rc == 0 ? ry : -1;
100}
101
102static int diag260(void)
103{
104 int rc, i;
105
106 struct {
107 unsigned long start;
108 unsigned long end;
109 } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
110
111 memset(storage_extents, 0, sizeof(storage_extents));
112 rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
113 if (rc == -1)
114 return -1;
115
116 for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
117 add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
118 return 0;
119}
120
121static int tprot(unsigned long addr)
122{
123 unsigned long reg1, reg2;
124 int rc = -EFAULT;
125 psw_t old;
126
127 asm volatile(
128 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
129 " epsw %[reg1],%[reg2]\n"
130 " st %[reg1],0(%[psw_pgm])\n"
131 " st %[reg2],4(%[psw_pgm])\n"
132 " larl %[reg1],1f\n"
133 " stg %[reg1],8(%[psw_pgm])\n"
134 " tprot 0(%[addr]),0\n"
135 " ipm %[rc]\n"
136 " srl %[rc],28\n"
137 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
138 : [reg1] "=&d" (reg1),
139 [reg2] "=&a" (reg2),
140 [rc] "+&d" (rc),
141 "=Q" (S390_lowcore.program_new_psw.addr),
142 "=Q" (old)
143 : [psw_old] "a" (&old),
144 [psw_pgm] "a" (&S390_lowcore.program_new_psw),
145 [addr] "a" (addr)
146 : "cc", "memory");
147 return rc;
148}
149
150static void search_mem_end(void)
151{
152 unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
153 unsigned long offset = 0;
154 unsigned long pivot;
155
156 while (range > 1) {
157 range >>= 1;
158 pivot = offset + range;
159 if (!tprot(pivot << 20))
160 offset = pivot;
161 }
162
163 add_mem_detect_block(0, (offset + 1) << 20);
164}
165
166unsigned long detect_memory(void)
167{
168 unsigned long max_physmem_end;
169
170 sclp_early_get_memsize(&max_physmem_end);
171
172 if (!sclp_early_read_storage_info()) {
173 mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
174 return max_physmem_end;
175 }
176
177 if (!diag260()) {
178 mem_detect.info_source = MEM_DETECT_DIAG260;
179 return max_physmem_end;
180 }
181
182 if (max_physmem_end) {
183 add_mem_detect_block(0, max_physmem_end);
184 mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
185 return max_physmem_end;
186 }
187
188 search_mem_end();
189 mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
190 return get_mem_detect_end();
191}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/errno.h>
3#include <linux/init.h>
4#include <asm/sclp.h>
5#include <asm/sections.h>
6#include <asm/mem_detect.h>
7#include <asm/sparsemem.h>
8#include "compressed/decompressor.h"
9#include "boot.h"
10
11unsigned long __bootdata(max_physmem_end);
12struct mem_detect_info __bootdata(mem_detect);
13
14/* up to 256 storage elements, 1020 subincrements each */
15#define ENTRIES_EXTENDED_MAX \
16 (256 * (1020 / 2) * sizeof(struct mem_detect_block))
17
18/*
19 * To avoid corrupting old kernel memory during dump, find lowest memory
20 * chunk possible either right after the kernel end (decompressed kernel) or
21 * after initrd (if it is present and there is no hole between the kernel end
22 * and initrd)
23 */
24static void *mem_detect_alloc_extended(void)
25{
26 unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
27
28 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
29 INITRD_START < offset + ENTRIES_EXTENDED_MAX)
30 offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
31
32 return (void *)offset;
33}
34
35static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
36{
37 if (n < MEM_INLINED_ENTRIES)
38 return &mem_detect.entries[n];
39 if (unlikely(!mem_detect.entries_extended))
40 mem_detect.entries_extended = mem_detect_alloc_extended();
41 return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
42}
43
44/*
45 * sequential calls to add_mem_detect_block with adjacent memory areas
46 * are merged together into single memory block.
47 */
48void add_mem_detect_block(u64 start, u64 end)
49{
50 struct mem_detect_block *block;
51
52 if (mem_detect.count) {
53 block = __get_mem_detect_block_ptr(mem_detect.count - 1);
54 if (block->end == start) {
55 block->end = end;
56 return;
57 }
58 }
59
60 block = __get_mem_detect_block_ptr(mem_detect.count);
61 block->start = start;
62 block->end = end;
63 mem_detect.count++;
64}
65
66static int __diag260(unsigned long rx1, unsigned long rx2)
67{
68 register unsigned long _rx1 asm("2") = rx1;
69 register unsigned long _rx2 asm("3") = rx2;
70 register unsigned long _ry asm("4") = 0x10; /* storage configuration */
71 int rc = -1; /* fail */
72 unsigned long reg1, reg2;
73 psw_t old = S390_lowcore.program_new_psw;
74
75 asm volatile(
76 " epsw %0,%1\n"
77 " st %0,%[psw_pgm]\n"
78 " st %1,%[psw_pgm]+4\n"
79 " larl %0,1f\n"
80 " stg %0,%[psw_pgm]+8\n"
81 " diag %[rx],%[ry],0x260\n"
82 " ipm %[rc]\n"
83 " srl %[rc],28\n"
84 "1:\n"
85 : "=&d" (reg1), "=&a" (reg2),
86 [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
87 [rc] "+&d" (rc), [ry] "+d" (_ry)
88 : [rx] "d" (_rx1), "d" (_rx2)
89 : "cc", "memory");
90 S390_lowcore.program_new_psw = old;
91 return rc == 0 ? _ry : -1;
92}
93
94static int diag260(void)
95{
96 int rc, i;
97
98 struct {
99 unsigned long start;
100 unsigned long end;
101 } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
102
103 memset(storage_extents, 0, sizeof(storage_extents));
104 rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
105 if (rc == -1)
106 return -1;
107
108 for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
109 add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
110 return 0;
111}
112
113static int tprot(unsigned long addr)
114{
115 unsigned long pgm_addr;
116 int rc = -EFAULT;
117 psw_t old = S390_lowcore.program_new_psw;
118
119 S390_lowcore.program_new_psw.mask = __extract_psw();
120 asm volatile(
121 " larl %[pgm_addr],1f\n"
122 " stg %[pgm_addr],%[psw_pgm_addr]\n"
123 " tprot 0(%[addr]),0\n"
124 " ipm %[rc]\n"
125 " srl %[rc],28\n"
126 "1:\n"
127 : [pgm_addr] "=&d"(pgm_addr),
128 [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
129 [rc] "+&d"(rc)
130 : [addr] "a"(addr)
131 : "cc", "memory");
132 S390_lowcore.program_new_psw = old;
133 return rc;
134}
135
136static void search_mem_end(void)
137{
138 unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
139 unsigned long offset = 0;
140 unsigned long pivot;
141
142 while (range > 1) {
143 range >>= 1;
144 pivot = offset + range;
145 if (!tprot(pivot << 20))
146 offset = pivot;
147 }
148
149 add_mem_detect_block(0, (offset + 1) << 20);
150}
151
152void detect_memory(void)
153{
154 sclp_early_get_memsize(&max_physmem_end);
155
156 if (!sclp_early_read_storage_info()) {
157 mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
158 return;
159 }
160
161 if (!diag260()) {
162 mem_detect.info_source = MEM_DETECT_DIAG260;
163 return;
164 }
165
166 if (max_physmem_end) {
167 add_mem_detect_block(0, max_physmem_end);
168 mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
169 return;
170 }
171
172 search_mem_end();
173 mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
174 max_physmem_end = get_mem_detect_end();
175}