Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright IBM Corp. 2019
  4 */
  5#include <linux/pgtable.h>
  6#include <asm/physmem_info.h>
  7#include <asm/cpacf.h>
  8#include <asm/timex.h>
  9#include <asm/sclp.h>
 10#include <asm/kasan.h>
 11#include "decompressor.h"
 12#include "boot.h"
 13
 14#define PRNG_MODE_TDES	 1
 15#define PRNG_MODE_SHA512 2
 16#define PRNG_MODE_TRNG	 3
 17
 18struct prno_parm {
 19	u32 res;
 20	u32 reseed_counter;
 21	u64 stream_bytes;
 22	u8  V[112];
 23	u8  C[112];
 24};
 25
 26struct prng_parm {
 27	u8  parm_block[32];
 28	u32 reseed_counter;
 29	u64 byte_counter;
 30};
 31
 32static int check_prng(void)
 33{
 34	if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
 35		boot_printk("KASLR disabled: CPU has no PRNG\n");
 36		return 0;
 37	}
 38	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
 39		return PRNG_MODE_TRNG;
 40	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
 41		return PRNG_MODE_SHA512;
 42	else
 43		return PRNG_MODE_TDES;
 44}
 45
 46int get_random(unsigned long limit, unsigned long *value)
 47{
 48	struct prng_parm prng = {
 49		/* initial parameter block for tdes mode, copied from libica */
 50		.parm_block = {
 51			0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
 52			0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
 53			0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
 54			0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
 55		},
 56	};
 57	unsigned long seed, random;
 58	struct prno_parm prno;
 59	__u64 entropy[4];
 60	int mode, i;
 61
 62	mode = check_prng();
 63	seed = get_tod_clock_fast();
 64	switch (mode) {
 65	case PRNG_MODE_TRNG:
 66		cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
 67		break;
 68	case PRNG_MODE_SHA512:
 69		cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
 70			   (u8 *) &seed, sizeof(seed));
 71		cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
 72			   sizeof(random), NULL, 0);
 73		break;
 74	case PRNG_MODE_TDES:
 75		/* add entropy */
 76		*(unsigned long *) prng.parm_block ^= seed;
 77		for (i = 0; i < 16; i++) {
 78			cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
 79				  (u8 *) entropy, (u8 *) entropy,
 80				  sizeof(entropy));
 81			memcpy(prng.parm_block, entropy, sizeof(entropy));
 82		}
 83		random = seed;
 84		cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
 85			  (u8 *) &random, sizeof(random));
 86		break;
 87	default:
 88		return -1;
 89	}
 90	*value = random % limit;
 91	return 0;
 92}
 93
 94static void sort_reserved_ranges(struct reserved_range *res, unsigned long size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95{
 96	struct reserved_range tmp;
 97	int i, j;
 98
 99	for (i = 1; i < size; i++) {
100		tmp = res[i];
101		for (j = i - 1; j >= 0 && res[j].start > tmp.start; j--)
102			res[j + 1] = res[j];
103		res[j + 1] = tmp;
 
 
 
 
 
104	}
 
 
105}
106
107static unsigned long iterate_valid_positions(unsigned long size, unsigned long align,
108					     unsigned long _min, unsigned long _max,
109					     struct reserved_range *res, size_t res_count,
110					     bool pos_count, unsigned long find_pos)
111{
112	unsigned long start, end, tmp_end, range_pos, pos = 0;
113	struct reserved_range *res_end = res + res_count;
114	struct reserved_range *skip_res;
115	int i;
116
117	align = max(align, 8UL);
118	_min = round_up(_min, align);
119	for_each_physmem_usable_range(i, &start, &end) {
120		if (_min >= end)
121			continue;
122		start = round_up(start, align);
123		if (start >= _max)
124			break;
125		start = max(_min, start);
126		end = min(_max, end);
127
128		while (start + size <= end) {
129			/* skip reserved ranges below the start */
130			while (res && res->end <= start) {
131				res++;
132				if (res >= res_end)
133					res = NULL;
134			}
135			skip_res = NULL;
136			tmp_end = end;
137			/* has intersecting reserved range */
138			if (res && res->start < end) {
139				skip_res = res;
140				tmp_end = res->start;
141			}
142			if (start + size <= tmp_end) {
143				range_pos = (tmp_end - start - size) / align + 1;
144				if (pos_count) {
145					pos += range_pos;
146				} else {
147					if (range_pos >= find_pos)
148						return start + (find_pos - 1) * align;
149					find_pos -= range_pos;
150				}
151			}
152			if (!skip_res)
153				break;
154			start = round_up(skip_res->end, align);
155		}
156	}
157
158	return pos_count ? pos : 0;
159}
160
161/*
162 * Two types of decompressor memory allocations/reserves are considered
163 * differently.
164 *
165 * "Static" or "single" allocations are done via physmem_alloc_range() and
166 * physmem_reserve(), and they are listed in physmem_info.reserved[]. Each
167 * type of "static" allocation can only have one allocation per type and
168 * cannot have chains.
169 *
170 * On the other hand, "dynamic" or "repetitive" allocations are done via
171 * physmem_alloc_top_down(). These allocations are tightly packed together
172 * top down from the end of online memory. physmem_alloc_pos represents
173 * current position where those allocations start.
174 *
175 * Functions randomize_within_range() and iterate_valid_positions()
176 * only consider "dynamic" allocations by never looking above
177 * physmem_alloc_pos. "Static" allocations, however, are explicitly
178 * considered by checking the "res" (reserves) array. The first
179 * reserved_range of a "dynamic" allocation may also be checked along the
180 * way, but it will always be above the maximum value anyway.
181 */
182unsigned long randomize_within_range(unsigned long size, unsigned long align,
183				     unsigned long min, unsigned long max)
184{
185	struct reserved_range res[RR_MAX];
186	unsigned long max_pos, pos;
 
 
 
 
187
188	memcpy(res, physmem_info.reserved, sizeof(res));
189	sort_reserved_ranges(res, ARRAY_SIZE(res));
190	max = min(max, get_physmem_alloc_pos());
 
 
 
 
 
 
 
 
 
191
192	max_pos = iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), true, 0);
193	if (!max_pos)
194		return 0;
195	if (get_random(max_pos, &pos))
 
 
 
 
 
 
 
 
196		return 0;
197	return iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), false, pos + 1);
198}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright IBM Corp. 2019
  4 */
  5#include <linux/pgtable.h>
  6#include <asm/mem_detect.h>
  7#include <asm/cpacf.h>
  8#include <asm/timex.h>
  9#include <asm/sclp.h>
 10#include <asm/kasan.h>
 11#include "compressed/decompressor.h"
 12#include "boot.h"
 13
 14#define PRNG_MODE_TDES	 1
 15#define PRNG_MODE_SHA512 2
 16#define PRNG_MODE_TRNG	 3
 17
 18struct prno_parm {
 19	u32 res;
 20	u32 reseed_counter;
 21	u64 stream_bytes;
 22	u8  V[112];
 23	u8  C[112];
 24};
 25
 26struct prng_parm {
 27	u8  parm_block[32];
 28	u32 reseed_counter;
 29	u64 byte_counter;
 30};
 31
 32static int check_prng(void)
 33{
 34	if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
 35		sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
 36		return 0;
 37	}
 38	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
 39		return PRNG_MODE_TRNG;
 40	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
 41		return PRNG_MODE_SHA512;
 42	else
 43		return PRNG_MODE_TDES;
 44}
 45
 46static int get_random(unsigned long limit, unsigned long *value)
 47{
 48	struct prng_parm prng = {
 49		/* initial parameter block for tdes mode, copied from libica */
 50		.parm_block = {
 51			0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
 52			0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
 53			0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
 54			0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
 55		},
 56	};
 57	unsigned long seed, random;
 58	struct prno_parm prno;
 59	__u64 entropy[4];
 60	int mode, i;
 61
 62	mode = check_prng();
 63	seed = get_tod_clock_fast();
 64	switch (mode) {
 65	case PRNG_MODE_TRNG:
 66		cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
 67		break;
 68	case PRNG_MODE_SHA512:
 69		cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
 70			   (u8 *) &seed, sizeof(seed));
 71		cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
 72			   sizeof(random), NULL, 0);
 73		break;
 74	case PRNG_MODE_TDES:
 75		/* add entropy */
 76		*(unsigned long *) prng.parm_block ^= seed;
 77		for (i = 0; i < 16; i++) {
 78			cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
 79				  (u8 *) entropy, (u8 *) entropy,
 80				  sizeof(entropy));
 81			memcpy(prng.parm_block, entropy, sizeof(entropy));
 82		}
 83		random = seed;
 84		cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
 85			  (u8 *) &random, sizeof(random));
 86		break;
 87	default:
 88		return -1;
 89	}
 90	*value = random % limit;
 91	return 0;
 92}
 93
 94/*
 95 * To randomize kernel base address we have to consider several facts:
 96 * 1. physical online memory might not be continuous and have holes. mem_detect
 97 *    info contains list of online memory ranges we should consider.
 98 * 2. we have several memory regions which are occupied and we should not
 99 *    overlap and destroy them. Currently safe_addr tells us the border below
100 *    which all those occupied regions are. We are safe to use anything above
101 *    safe_addr.
102 * 3. the upper limit might apply as well, even if memory above that limit is
103 *    online. Currently those limitations are:
104 *    3.1. Limit set by "mem=" kernel command line option
105 *    3.2. memory reserved at the end for kasan initialization.
106 * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
107 *    Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
108 *    (16 pages when the kernel is built with kasan enabled)
109 * Assumptions:
110 * 1. kernel size (including .bss size) and upper memory limit are page aligned.
111 * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
112 *    aligned (in practice memory configurations granularity on z/VM and LPAR
113 *    is 1mb).
114 *
115 * To guarantee uniform distribution of kernel base address among all suitable
116 * addresses we generate random value just once. For that we need to build a
117 * continuous range in which every value would be suitable. We can build this
118 * range by simply counting all suitable addresses (let's call them positions)
119 * which would be valid as kernel base address. To count positions we iterate
120 * over online memory ranges. For each range which is big enough for the
121 * kernel image we count all suitable addresses we can put the kernel image at
122 * that is
123 * (end - start - kernel_size) / THREAD_SIZE + 1
124 * Two functions count_valid_kernel_positions and position_to_address help
125 * to count positions in memory range given and then convert position back
126 * to address.
127 */
128static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
129						  unsigned long _min,
130						  unsigned long _max)
131{
132	unsigned long start, end, pos = 0;
133	int i;
134
135	for_each_mem_detect_block(i, &start, &end) {
136		if (_min >= end)
137			continue;
138		if (start >= _max)
139			break;
140		start = max(_min, start);
141		end = min(_max, end);
142		if (end - start < kernel_size)
143			continue;
144		pos += (end - start - kernel_size) / THREAD_SIZE + 1;
145	}
146
147	return pos;
148}
149
150static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
151				 unsigned long _min, unsigned long _max)
 
 
152{
153	unsigned long start, end;
 
 
154	int i;
155
156	for_each_mem_detect_block(i, &start, &end) {
 
 
157		if (_min >= end)
158			continue;
 
159		if (start >= _max)
160			break;
161		start = max(_min, start);
162		end = min(_max, end);
163		if (end - start < kernel_size)
164			continue;
165		if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
166			return start + (pos - 1) * THREAD_SIZE;
167		pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168	}
169
170	return 0;
171}
172
173unsigned long get_random_base(unsigned long safe_addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174{
175	unsigned long memory_limit = get_mem_detect_end();
176	unsigned long base_pos, max_pos, kernel_size;
177	unsigned long kasan_needs;
178	int i;
179
180	memory_limit = min(memory_limit, ident_map_size);
181
182	/*
183	 * Avoid putting kernel in the end of physical memory
184	 * which kasan will use for shadow memory and early pgtable
185	 * mapping allocations.
186	 */
187	memory_limit -= kasan_estimate_memory_needs(memory_limit);
188
189	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
190		if (safe_addr < INITRD_START + INITRD_SIZE)
191			safe_addr = INITRD_START + INITRD_SIZE;
192	}
193	safe_addr = ALIGN(safe_addr, THREAD_SIZE);
194
195	kernel_size = vmlinux.image_size + vmlinux.bss_size;
196	if (safe_addr + kernel_size > memory_limit)
197		return 0;
198
199	max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
200	if (!max_pos) {
201		sclp_early_printk("KASLR disabled: not enough memory\n");
202		return 0;
203	}
204
205	/* we need a value in the range [1, base_pos] inclusive */
206	if (get_random(max_pos, &base_pos))
207		return 0;
208	return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
209}