Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright IBM Corp. 2019
  4 */
  5#include <linux/pgtable.h>
  6#include <asm/physmem_info.h>
  7#include <asm/cpacf.h>
  8#include <asm/timex.h>
  9#include <asm/sclp.h>
 10#include <asm/kasan.h>
 11#include "decompressor.h"
 12#include "boot.h"
 13
 14#define PRNG_MODE_TDES	 1
 15#define PRNG_MODE_SHA512 2
 16#define PRNG_MODE_TRNG	 3
 17
 18struct prno_parm {
 19	u32 res;
 20	u32 reseed_counter;
 21	u64 stream_bytes;
 22	u8  V[112];
 23	u8  C[112];
 24};
 25
 26struct prng_parm {
 27	u8  parm_block[32];
 28	u32 reseed_counter;
 29	u64 byte_counter;
 30};
 31
 32static int check_prng(void)
 33{
 34	if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
 35		boot_printk("KASLR disabled: CPU has no PRNG\n");
 36		return 0;
 37	}
 38	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
 39		return PRNG_MODE_TRNG;
 40	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
 41		return PRNG_MODE_SHA512;
 42	else
 43		return PRNG_MODE_TDES;
 44}
 45
 46int get_random(unsigned long limit, unsigned long *value)
 47{
 48	struct prng_parm prng = {
 49		/* initial parameter block for tdes mode, copied from libica */
 50		.parm_block = {
 51			0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
 52			0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
 53			0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
 54			0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
 55		},
 56	};
 57	unsigned long seed, random;
 58	struct prno_parm prno;
 59	__u64 entropy[4];
 60	int mode, i;
 61
 62	mode = check_prng();
 63	seed = get_tod_clock_fast();
 64	switch (mode) {
 65	case PRNG_MODE_TRNG:
 66		cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
 67		break;
 68	case PRNG_MODE_SHA512:
 69		cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
 70			   (u8 *) &seed, sizeof(seed));
 71		cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
 72			   sizeof(random), NULL, 0);
 73		break;
 74	case PRNG_MODE_TDES:
 75		/* add entropy */
 76		*(unsigned long *) prng.parm_block ^= seed;
 77		for (i = 0; i < 16; i++) {
 78			cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
 79				  (u8 *) entropy, (u8 *) entropy,
 80				  sizeof(entropy));
 81			memcpy(prng.parm_block, entropy, sizeof(entropy));
 82		}
 83		random = seed;
 84		cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
 85			  (u8 *) &random, sizeof(random));
 86		break;
 87	default:
 88		return -1;
 89	}
 90	*value = random % limit;
 91	return 0;
 92}
 93
 94static void sort_reserved_ranges(struct reserved_range *res, unsigned long size)
 95{
 96	struct reserved_range tmp;
 97	int i, j;
 
 
 
 98
 99	for (i = 1; i < size; i++) {
100		tmp = res[i];
101		for (j = i - 1; j >= 0 && res[j].start > tmp.start; j--)
102			res[j + 1] = res[j];
103		res[j + 1] = tmp;
104	}
105}
106
107static unsigned long iterate_valid_positions(unsigned long size, unsigned long align,
108					     unsigned long _min, unsigned long _max,
109					     struct reserved_range *res, size_t res_count,
110					     bool pos_count, unsigned long find_pos)
111{
112	unsigned long start, end, tmp_end, range_pos, pos = 0;
113	struct reserved_range *res_end = res + res_count;
114	struct reserved_range *skip_res;
115	int i;
116
117	align = max(align, 8UL);
118	_min = round_up(_min, align);
119	for_each_physmem_usable_range(i, &start, &end) {
120		if (_min >= end)
121			continue;
122		start = round_up(start, align);
123		if (start >= _max)
124			break;
125		start = max(_min, start);
126		end = min(_max, end);
 
 
 
 
 
 
 
 
 
 
 
127
128		while (start + size <= end) {
129			/* skip reserved ranges below the start */
130			while (res && res->end <= start) {
131				res++;
132				if (res >= res_end)
133					res = NULL;
134			}
135			skip_res = NULL;
136			tmp_end = end;
137			/* has intersecting reserved range */
138			if (res && res->start < end) {
139				skip_res = res;
140				tmp_end = res->start;
141			}
142			if (start + size <= tmp_end) {
143				range_pos = (tmp_end - start - size) / align + 1;
144				if (pos_count) {
145					pos += range_pos;
146				} else {
147					if (range_pos >= find_pos)
148						return start + (find_pos - 1) * align;
149					find_pos -= range_pos;
150				}
151			}
152			if (!skip_res)
153				break;
154			start = round_up(skip_res->end, align);
 
155		}
 
 
 
156	}
157
158	return pos_count ? pos : 0;
159}
160
161/*
162 * Two types of decompressor memory allocations/reserves are considered
163 * differently.
164 *
165 * "Static" or "single" allocations are done via physmem_alloc_range() and
166 * physmem_reserve(), and they are listed in physmem_info.reserved[]. Each
167 * type of "static" allocation can only have one allocation per type and
168 * cannot have chains.
169 *
170 * On the other hand, "dynamic" or "repetitive" allocations are done via
171 * physmem_alloc_top_down(). These allocations are tightly packed together
172 * top down from the end of online memory. physmem_alloc_pos represents
173 * current position where those allocations start.
174 *
175 * Functions randomize_within_range() and iterate_valid_positions()
176 * only consider "dynamic" allocations by never looking above
177 * physmem_alloc_pos. "Static" allocations, however, are explicitly
178 * considered by checking the "res" (reserves) array. The first
179 * reserved_range of a "dynamic" allocation may also be checked along the
180 * way, but it will always be above the maximum value anyway.
181 */
182unsigned long randomize_within_range(unsigned long size, unsigned long align,
183				     unsigned long min, unsigned long max)
184{
185	struct reserved_range res[RR_MAX];
186	unsigned long max_pos, pos;
187
188	memcpy(res, physmem_info.reserved, sizeof(res));
189	sort_reserved_ranges(res, ARRAY_SIZE(res));
190	max = min(max, get_physmem_alloc_pos());
191
192	max_pos = iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), true, 0);
193	if (!max_pos)
194		return 0;
195	if (get_random(max_pos, &pos))
 
 
 
196		return 0;
197	return iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), false, pos + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright IBM Corp. 2019
  4 */
  5#include <linux/pgtable.h>
  6#include <asm/mem_detect.h>
  7#include <asm/cpacf.h>
  8#include <asm/timex.h>
  9#include <asm/sclp.h>
 10#include "compressed/decompressor.h"
 
 11#include "boot.h"
 12
 13#define PRNG_MODE_TDES	 1
 14#define PRNG_MODE_SHA512 2
 15#define PRNG_MODE_TRNG	 3
 16
 17struct prno_parm {
 18	u32 res;
 19	u32 reseed_counter;
 20	u64 stream_bytes;
 21	u8  V[112];
 22	u8  C[112];
 23};
 24
 25struct prng_parm {
 26	u8  parm_block[32];
 27	u32 reseed_counter;
 28	u64 byte_counter;
 29};
 30
 31static int check_prng(void)
 32{
 33	if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
 34		sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
 35		return 0;
 36	}
 37	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
 38		return PRNG_MODE_TRNG;
 39	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
 40		return PRNG_MODE_SHA512;
 41	else
 42		return PRNG_MODE_TDES;
 43}
 44
 45static unsigned long get_random(unsigned long limit)
 46{
 47	struct prng_parm prng = {
 48		/* initial parameter block for tdes mode, copied from libica */
 49		.parm_block = {
 50			0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
 51			0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
 52			0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
 53			0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
 54		},
 55	};
 56	unsigned long seed, random;
 57	struct prno_parm prno;
 58	__u64 entropy[4];
 59	int mode, i;
 60
 61	mode = check_prng();
 62	seed = get_tod_clock_fast();
 63	switch (mode) {
 64	case PRNG_MODE_TRNG:
 65		cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
 66		break;
 67	case PRNG_MODE_SHA512:
 68		cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
 69			   (u8 *) &seed, sizeof(seed));
 70		cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
 71			   sizeof(random), NULL, 0);
 72		break;
 73	case PRNG_MODE_TDES:
 74		/* add entropy */
 75		*(unsigned long *) prng.parm_block ^= seed;
 76		for (i = 0; i < 16; i++) {
 77			cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
 78				  (u8 *) entropy, (u8 *) entropy,
 79				  sizeof(entropy));
 80			memcpy(prng.parm_block, entropy, sizeof(entropy));
 81		}
 82		random = seed;
 83		cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
 84			  (u8 *) &random, sizeof(random));
 85		break;
 86	default:
 87		random = 0;
 88	}
 89	return random % limit;
 
 90}
 91
 92unsigned long get_random_base(unsigned long safe_addr)
 93{
 94	unsigned long memory_limit = memory_end_set ? memory_end : 0;
 95	unsigned long base, start, end, kernel_size;
 96	unsigned long block_sum, offset;
 97	unsigned long kasan_needs;
 98	int i;
 99
100	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
101		if (safe_addr < INITRD_START + INITRD_SIZE)
102			safe_addr = INITRD_START + INITRD_SIZE;
 
 
103	}
104	safe_addr = ALIGN(safe_addr, THREAD_SIZE);
 
 
 
 
 
 
 
 
 
 
105
106	if ((IS_ENABLED(CONFIG_KASAN))) {
107		/*
108		 * Estimate kasan memory requirements, which it will reserve
109		 * at the very end of available physical memory. To estimate
110		 * that, we take into account that kasan would require
111		 * 1/8 of available physical memory (for shadow memory) +
112		 * creating page tables for the whole memory + shadow memory
113		 * region (1 + 1/8). To keep page tables estimates simple take
114		 * the double of combined ptes size.
115		 */
116		memory_limit = get_mem_detect_end();
117		if (memory_end_set && memory_limit > memory_end)
118			memory_limit = memory_end;
119
120		/* for shadow memory */
121		kasan_needs = memory_limit / 8;
122		/* for paging structures */
123		kasan_needs += (memory_limit + kasan_needs) / PAGE_SIZE /
124			       _PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2;
125		memory_limit -= kasan_needs;
126	}
127
128	kernel_size = vmlinux.image_size + vmlinux.bss_size;
129	block_sum = 0;
130	for_each_mem_detect_block(i, &start, &end) {
131		if (memory_limit) {
132			if (start >= memory_limit)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133				break;
134			if (end > memory_limit)
135				end = memory_limit;
136		}
137		if (end - start < kernel_size)
138			continue;
139		block_sum += end - start - kernel_size;
140	}
141	if (!block_sum) {
142		sclp_early_printk("KASLR disabled: not enough memory\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143		return 0;
144	}
145
146	base = get_random(block_sum);
147	if (base == 0)
148		return 0;
149	if (base < safe_addr)
150		base = safe_addr;
151	block_sum = offset = 0;
152	for_each_mem_detect_block(i, &start, &end) {
153		if (memory_limit) {
154			if (start >= memory_limit)
155				break;
156			if (end > memory_limit)
157				end = memory_limit;
158		}
159		if (end - start < kernel_size)
160			continue;
161		block_sum += end - start - kernel_size;
162		if (base <= block_sum) {
163			base = start + base - offset;
164			base = ALIGN_DOWN(base, THREAD_SIZE);
165			break;
166		}
167		offset = block_sum;
168	}
169	return base;
170}