Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/init.h>
  3#include <linux/async.h>
  4#include <linux/fs.h>
  5#include <linux/slab.h>
  6#include <linux/types.h>
  7#include <linux/fcntl.h>
  8#include <linux/delay.h>
  9#include <linux/string.h>
 10#include <linux/dirent.h>
 11#include <linux/syscalls.h>
 12#include <linux/utime.h>
 13#include <linux/file.h>
 14#include <linux/kstrtox.h>
 15#include <linux/memblock.h>
 16#include <linux/mm.h>
 17#include <linux/namei.h>
 18#include <linux/init_syscalls.h>
 19#include <linux/umh.h>
 20#include <linux/security.h>
 21
 22#include "do_mounts.h"
 23
 24static __initdata bool csum_present;
 25static __initdata u32 io_csum;
 26
 27static ssize_t __init xwrite(struct file *file, const unsigned char *p,
 28		size_t count, loff_t *pos)
 29{
 30	ssize_t out = 0;
 31
 32	/* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
 33	while (count) {
 34		ssize_t rv = kernel_write(file, p, count, pos);
 35
 36		if (rv < 0) {
 37			if (rv == -EINTR || rv == -EAGAIN)
 38				continue;
 39			return out ? out : rv;
 40		} else if (rv == 0)
 41			break;
 42
 43		if (csum_present) {
 44			ssize_t i;
 45
 46			for (i = 0; i < rv; i++)
 47				io_csum += p[i];
 48		}
 49
 50		p += rv;
 51		out += rv;
 52		count -= rv;
 53	}
 54
 55	return out;
 56}
 57
 58static __initdata char *message;
 59static void __init error(char *x)
 60{
 61	if (!message)
 62		message = x;
 63}
 64
 65#define panic_show_mem(fmt, ...) \
 66	({ show_mem(); panic(fmt, ##__VA_ARGS__); })
 
 
 
 
 
 
 
 67
 68/* link hash */
 69
 70#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
 71
 72static __initdata struct hash {
 73	int ino, minor, major;
 74	umode_t mode;
 75	struct hash *next;
 76	char name[N_ALIGN(PATH_MAX)];
 77} *head[32];
 78
 79static inline int hash(int major, int minor, int ino)
 80{
 81	unsigned long tmp = ino + minor + (major << 3);
 82	tmp += tmp >> 5;
 83	return tmp & 31;
 84}
 85
 86static char __init *find_link(int major, int minor, int ino,
 87			      umode_t mode, char *name)
 88{
 89	struct hash **p, *q;
 90	for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
 91		if ((*p)->ino != ino)
 92			continue;
 93		if ((*p)->minor != minor)
 94			continue;
 95		if ((*p)->major != major)
 96			continue;
 97		if (((*p)->mode ^ mode) & S_IFMT)
 98			continue;
 99		return (*p)->name;
100	}
101	q = kmalloc(sizeof(struct hash), GFP_KERNEL);
102	if (!q)
103		panic_show_mem("can't allocate link hash entry");
104	q->major = major;
105	q->minor = minor;
106	q->ino = ino;
107	q->mode = mode;
108	strcpy(q->name, name);
109	q->next = NULL;
110	*p = q;
111	return NULL;
112}
113
114static void __init free_hash(void)
115{
116	struct hash **p, *q;
117	for (p = head; p < head + 32; p++) {
118		while (*p) {
119			q = *p;
120			*p = q->next;
121			kfree(q);
122		}
123	}
124}
125
126#ifdef CONFIG_INITRAMFS_PRESERVE_MTIME
127static void __init do_utime(char *filename, time64_t mtime)
128{
129	struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
130	init_utimes(filename, t);
131}
132
133static void __init do_utime_path(const struct path *path, time64_t mtime)
134{
135	struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
136	vfs_utimes(path, t);
 
137}
138
139static __initdata LIST_HEAD(dir_list);
140struct dir_entry {
141	struct list_head list;
 
142	time64_t mtime;
143	char name[];
144};
145
146static void __init dir_add(const char *name, time64_t mtime)
147{
148	size_t nlen = strlen(name) + 1;
149	struct dir_entry *de;
150
151	de = kmalloc(sizeof(struct dir_entry) + nlen, GFP_KERNEL);
152	if (!de)
153		panic_show_mem("can't allocate dir_entry buffer");
154	INIT_LIST_HEAD(&de->list);
155	strscpy(de->name, name, nlen);
156	de->mtime = mtime;
157	list_add(&de->list, &dir_list);
158}
159
160static void __init dir_utime(void)
161{
162	struct dir_entry *de, *tmp;
163	list_for_each_entry_safe(de, tmp, &dir_list, list) {
164		list_del(&de->list);
165		do_utime(de->name, de->mtime);
 
166		kfree(de);
167	}
168}
169#else
170static void __init do_utime(char *filename, time64_t mtime) {}
171static void __init do_utime_path(const struct path *path, time64_t mtime) {}
172static void __init dir_add(const char *name, time64_t mtime) {}
173static void __init dir_utime(void) {}
174#endif
175
176static __initdata time64_t mtime;
177
178/* cpio header parsing */
179
180static __initdata unsigned long ino, major, minor, nlink;
181static __initdata umode_t mode;
182static __initdata unsigned long body_len, name_len;
183static __initdata uid_t uid;
184static __initdata gid_t gid;
185static __initdata unsigned rdev;
186static __initdata u32 hdr_csum;
187
188static void __init parse_header(char *s)
189{
190	unsigned long parsed[13];
191	char buf[9];
192	int i;
193
194	buf[8] = '\0';
195	for (i = 0, s += 6; i < 13; i++, s += 8) {
196		memcpy(buf, s, 8);
197		parsed[i] = simple_strtoul(buf, NULL, 16);
198	}
199	ino = parsed[0];
200	mode = parsed[1];
201	uid = parsed[2];
202	gid = parsed[3];
203	nlink = parsed[4];
204	mtime = parsed[5]; /* breaks in y2106 */
205	body_len = parsed[6];
206	major = parsed[7];
207	minor = parsed[8];
208	rdev = new_encode_dev(MKDEV(parsed[9], parsed[10]));
209	name_len = parsed[11];
210	hdr_csum = parsed[12];
211}
212
213/* FSM */
214
215static __initdata enum state {
216	Start,
217	Collect,
218	GotHeader,
219	SkipIt,
220	GotName,
221	CopyFile,
222	GotSymlink,
223	Reset
224} state, next_state;
225
226static __initdata char *victim;
227static unsigned long byte_count __initdata;
228static __initdata loff_t this_header, next_header;
229
230static inline void __init eat(unsigned n)
231{
232	victim += n;
233	this_header += n;
234	byte_count -= n;
235}
236
237static __initdata char *collected;
238static long remains __initdata;
239static __initdata char *collect;
240
241static void __init read_into(char *buf, unsigned size, enum state next)
242{
243	if (byte_count >= size) {
244		collected = victim;
245		eat(size);
246		state = next;
247	} else {
248		collect = collected = buf;
249		remains = size;
250		next_state = next;
251		state = Collect;
252	}
253}
254
255static __initdata char *header_buf, *symlink_buf, *name_buf;
256
257static int __init do_start(void)
258{
259	read_into(header_buf, 110, GotHeader);
260	return 0;
261}
262
263static int __init do_collect(void)
264{
265	unsigned long n = remains;
266	if (byte_count < n)
267		n = byte_count;
268	memcpy(collect, victim, n);
269	eat(n);
270	collect += n;
271	if ((remains -= n) != 0)
272		return 1;
273	state = next_state;
274	return 0;
275}
276
277static int __init do_header(void)
278{
279	if (!memcmp(collected, "070701", 6)) {
280		csum_present = false;
281	} else if (!memcmp(collected, "070702", 6)) {
282		csum_present = true;
283	} else {
284		if (memcmp(collected, "070707", 6) == 0)
285			error("incorrect cpio method used: use -H newc option");
286		else
287			error("no cpio magic");
288		return 1;
289	}
290	parse_header(collected);
291	next_header = this_header + N_ALIGN(name_len) + body_len;
292	next_header = (next_header + 3) & ~3;
293	state = SkipIt;
294	if (name_len <= 0 || name_len > PATH_MAX)
295		return 0;
296	if (S_ISLNK(mode)) {
297		if (body_len > PATH_MAX)
298			return 0;
299		collect = collected = symlink_buf;
300		remains = N_ALIGN(name_len) + body_len;
301		next_state = GotSymlink;
302		state = Collect;
303		return 0;
304	}
305	if (S_ISREG(mode) || !body_len)
306		read_into(name_buf, N_ALIGN(name_len), GotName);
307	return 0;
308}
309
310static int __init do_skip(void)
311{
312	if (this_header + byte_count < next_header) {
313		eat(byte_count);
314		return 1;
315	} else {
316		eat(next_header - this_header);
317		state = next_state;
318		return 0;
319	}
320}
321
322static int __init do_reset(void)
323{
324	while (byte_count && *victim == '\0')
325		eat(1);
326	if (byte_count && (this_header & 3))
327		error("broken padding");
328	return 1;
329}
330
331static void __init clean_path(char *path, umode_t fmode)
332{
333	struct kstat st;
334
335	if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
336	    (st.mode ^ fmode) & S_IFMT) {
337		if (S_ISDIR(st.mode))
338			init_rmdir(path);
339		else
340			init_unlink(path);
341	}
342}
343
344static int __init maybe_link(void)
345{
346	if (nlink >= 2) {
347		char *old = find_link(major, minor, ino, mode, collected);
348		if (old) {
349			clean_path(collected, 0);
350			return (init_link(old, collected) < 0) ? -1 : 1;
351		}
352	}
353	return 0;
354}
355
356static __initdata struct file *wfile;
357static __initdata loff_t wfile_pos;
358
359static int __init do_name(void)
360{
361	state = SkipIt;
362	next_state = Reset;
363
364	/* name_len > 0 && name_len <= PATH_MAX checked in do_header */
365	if (collected[name_len - 1] != '\0') {
366		pr_err("initramfs name without nulterm: %.*s\n",
367		       (int)name_len, collected);
368		error("malformed archive");
369		return 1;
370	}
371
372	if (strcmp(collected, "TRAILER!!!") == 0) {
373		free_hash();
374		return 0;
375	}
376	clean_path(collected, mode);
377	if (S_ISREG(mode)) {
378		int ml = maybe_link();
379		if (ml >= 0) {
380			int openflags = O_WRONLY|O_CREAT|O_LARGEFILE;
381			if (ml != 1)
382				openflags |= O_TRUNC;
383			wfile = filp_open(collected, openflags, mode);
384			if (IS_ERR(wfile))
385				return 0;
386			wfile_pos = 0;
387			io_csum = 0;
388
389			vfs_fchown(wfile, uid, gid);
390			vfs_fchmod(wfile, mode);
391			if (body_len)
392				vfs_truncate(&wfile->f_path, body_len);
393			state = CopyFile;
394		}
395	} else if (S_ISDIR(mode)) {
396		init_mkdir(collected, mode);
397		init_chown(collected, uid, gid, 0);
398		init_chmod(collected, mode);
399		dir_add(collected, mtime);
400	} else if (S_ISBLK(mode) || S_ISCHR(mode) ||
401		   S_ISFIFO(mode) || S_ISSOCK(mode)) {
402		if (maybe_link() == 0) {
403			init_mknod(collected, mode, rdev);
404			init_chown(collected, uid, gid, 0);
405			init_chmod(collected, mode);
406			do_utime(collected, mtime);
407		}
408	}
409	return 0;
410}
411
412static int __init do_copy(void)
413{
414	if (byte_count >= body_len) {
 
415		if (xwrite(wfile, victim, body_len, &wfile_pos) != body_len)
416			error("write error");
417
418		do_utime_path(&wfile->f_path, mtime);
 
 
 
419		fput(wfile);
420		if (csum_present && io_csum != hdr_csum)
421			error("bad data checksum");
422		eat(body_len);
423		state = SkipIt;
424		return 0;
425	} else {
426		if (xwrite(wfile, victim, byte_count, &wfile_pos) != byte_count)
427			error("write error");
428		body_len -= byte_count;
429		eat(byte_count);
430		return 1;
431	}
432}
433
434static int __init do_symlink(void)
435{
436	if (collected[name_len - 1] != '\0') {
437		pr_err("initramfs symlink without nulterm: %.*s\n",
438		       (int)name_len, collected);
439		error("malformed archive");
440		return 1;
441	}
442	collected[N_ALIGN(name_len) + body_len] = '\0';
443	clean_path(collected, 0);
444	init_symlink(collected + N_ALIGN(name_len), collected);
445	init_chown(collected, uid, gid, AT_SYMLINK_NOFOLLOW);
446	do_utime(collected, mtime);
447	state = SkipIt;
448	next_state = Reset;
449	return 0;
450}
451
452static __initdata int (*actions[])(void) = {
453	[Start]		= do_start,
454	[Collect]	= do_collect,
455	[GotHeader]	= do_header,
456	[SkipIt]	= do_skip,
457	[GotName]	= do_name,
458	[CopyFile]	= do_copy,
459	[GotSymlink]	= do_symlink,
460	[Reset]		= do_reset,
461};
462
463static long __init write_buffer(char *buf, unsigned long len)
464{
465	byte_count = len;
466	victim = buf;
467
468	while (!actions[state]())
469		;
470	return len - byte_count;
471}
472
473static long __init flush_buffer(void *bufv, unsigned long len)
474{
475	char *buf = bufv;
476	long written;
477	long origLen = len;
478	if (message)
479		return -1;
480	while ((written = write_buffer(buf, len)) < len && !message) {
481		char c = buf[written];
482		if (c == '0') {
483			buf += written;
484			len -= written;
485			state = Start;
486		} else if (c == 0) {
487			buf += written;
488			len -= written;
489			state = Reset;
490		} else
491			error("junk within compressed archive");
492	}
493	return origLen;
494}
495
496static unsigned long my_inptr __initdata; /* index of next byte to be processed in inbuf */
497
498#include <linux/decompress/generic.h>
499
500static char * __init unpack_to_rootfs(char *buf, unsigned long len)
501{
502	long written;
503	decompress_fn decompress;
504	const char *compress_name;
505	static __initdata char msg_buf[64];
506
507	header_buf = kmalloc(110, GFP_KERNEL);
508	symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
509	name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
510
511	if (!header_buf || !symlink_buf || !name_buf)
512		panic_show_mem("can't allocate buffers");
513
514	state = Start;
515	this_header = 0;
516	message = NULL;
517	while (!message && len) {
518		loff_t saved_offset = this_header;
519		if (*buf == '0' && !(this_header & 3)) {
520			state = Start;
521			written = write_buffer(buf, len);
522			buf += written;
523			len -= written;
524			continue;
525		}
526		if (!*buf) {
527			buf++;
528			len--;
529			this_header++;
530			continue;
531		}
532		this_header = 0;
533		decompress = decompress_method(buf, len, &compress_name);
534		pr_debug("Detected %s compressed data\n", compress_name);
535		if (decompress) {
536			int res = decompress(buf, len, NULL, flush_buffer, NULL,
537				   &my_inptr, error);
538			if (res)
539				error("decompressor failed");
540		} else if (compress_name) {
541			if (!message) {
542				snprintf(msg_buf, sizeof msg_buf,
543					 "compression method %s not configured",
544					 compress_name);
545				message = msg_buf;
546			}
547		} else
548			error("invalid magic at start of compressed archive");
549		if (state != Reset)
550			error("junk at the end of compressed archive");
551		this_header = saved_offset + my_inptr;
552		buf += my_inptr;
553		len -= my_inptr;
554	}
555	dir_utime();
556	kfree(name_buf);
557	kfree(symlink_buf);
558	kfree(header_buf);
559	return message;
560}
561
562static int __initdata do_retain_initrd;
563
564static int __init retain_initrd_param(char *str)
565{
566	if (*str)
567		return 0;
568	do_retain_initrd = 1;
569	return 1;
570}
571__setup("retain_initrd", retain_initrd_param);
572
573#ifdef CONFIG_ARCH_HAS_KEEPINITRD
574static int __init keepinitrd_setup(char *__unused)
575{
576	do_retain_initrd = 1;
577	return 1;
578}
579__setup("keepinitrd", keepinitrd_setup);
580#endif
581
582static bool __initdata initramfs_async = true;
583static int __init initramfs_async_setup(char *str)
584{
585	return kstrtobool(str, &initramfs_async) == 0;
 
586}
587__setup("initramfs_async=", initramfs_async_setup);
588
589extern char __initramfs_start[];
590extern unsigned long __initramfs_size;
591#include <linux/initrd.h>
592#include <linux/kexec.h>
593
594static BIN_ATTR(initrd, 0440, sysfs_bin_attr_simple_read, NULL, 0);
595
596void __init reserve_initrd_mem(void)
597{
598	phys_addr_t start;
599	unsigned long size;
600
601	/* Ignore the virtul address computed during device tree parsing */
602	initrd_start = initrd_end = 0;
603
604	if (!phys_initrd_size)
605		return;
606	/*
607	 * Round the memory region to page boundaries as per free_initrd_mem()
608	 * This allows us to detect whether the pages overlapping the initrd
609	 * are in use, but more importantly, reserves the entire set of pages
610	 * as we don't want these pages allocated for other purposes.
611	 */
612	start = round_down(phys_initrd_start, PAGE_SIZE);
613	size = phys_initrd_size + (phys_initrd_start - start);
614	size = round_up(size, PAGE_SIZE);
615
616	if (!memblock_is_region_memory(start, size)) {
617		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
618		       (u64)start, size);
619		goto disable;
620	}
621
622	if (memblock_is_region_reserved(start, size)) {
623		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
624		       (u64)start, size);
625		goto disable;
626	}
627
628	memblock_reserve(start, size);
629	/* Now convert initrd to virtual addresses */
630	initrd_start = (unsigned long)__va(phys_initrd_start);
631	initrd_end = initrd_start + phys_initrd_size;
632	initrd_below_start_ok = 1;
633
634	return;
635disable:
636	pr_cont(" - disabling initrd\n");
637	initrd_start = 0;
638	initrd_end = 0;
639}
640
641void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
642{
643#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
644	unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
645	unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
646
647	memblock_free((void *)aligned_start, aligned_end - aligned_start);
648#endif
649
650	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
651			"initrd");
652}
653
654#ifdef CONFIG_CRASH_RESERVE
655static bool __init kexec_free_initrd(void)
656{
657	unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
658	unsigned long crashk_end   = (unsigned long)__va(crashk_res.end);
659
660	/*
661	 * If the initrd region is overlapped with crashkernel reserved region,
662	 * free only memory that is not part of crashkernel region.
663	 */
664	if (initrd_start >= crashk_end || initrd_end <= crashk_start)
665		return false;
666
667	/*
668	 * Initialize initrd memory region since the kexec boot does not do.
669	 */
670	memset((void *)initrd_start, 0, initrd_end - initrd_start);
671	if (initrd_start < crashk_start)
672		free_initrd_mem(initrd_start, crashk_start);
673	if (initrd_end > crashk_end)
674		free_initrd_mem(crashk_end, initrd_end);
675	return true;
676}
677#else
678static inline bool kexec_free_initrd(void)
679{
680	return false;
681}
682#endif /* CONFIG_KEXEC_CORE */
683
684#ifdef CONFIG_BLK_DEV_RAM
685static void __init populate_initrd_image(char *err)
686{
687	ssize_t written;
688	struct file *file;
689	loff_t pos = 0;
690
 
 
691	printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
692			err);
693	file = filp_open("/initrd.image", O_WRONLY|O_CREAT|O_LARGEFILE, 0700);
694	if (IS_ERR(file))
695		return;
696
697	written = xwrite(file, (char *)initrd_start, initrd_end - initrd_start,
698			&pos);
699	if (written != initrd_end - initrd_start)
700		pr_err("/initrd.image: incomplete write (%zd != %ld)\n",
701		       written, initrd_end - initrd_start);
702	fput(file);
703}
704#endif /* CONFIG_BLK_DEV_RAM */
705
706static void __init do_populate_rootfs(void *unused, async_cookie_t cookie)
707{
708	/* Load the built in initramfs */
709	char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
710	if (err)
711		panic_show_mem("%s", err); /* Failed to decompress INTERNAL initramfs */
712
713	if (!initrd_start || IS_ENABLED(CONFIG_INITRAMFS_FORCE))
714		goto done;
715
716	if (IS_ENABLED(CONFIG_BLK_DEV_RAM))
717		printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
718	else
719		printk(KERN_INFO "Unpacking initramfs...\n");
720
721	err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start);
722	if (err) {
723#ifdef CONFIG_BLK_DEV_RAM
724		populate_initrd_image(err);
725#else
726		printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
727#endif
728	}
729
730done:
731	security_initramfs_populated();
732
733	/*
734	 * If the initrd region is overlapped with crashkernel reserved region,
735	 * free only memory that is not part of crashkernel region.
736	 */
737	if (!do_retain_initrd && initrd_start && !kexec_free_initrd()) {
738		free_initrd_mem(initrd_start, initrd_end);
739	} else if (do_retain_initrd && initrd_start) {
740		bin_attr_initrd.size = initrd_end - initrd_start;
741		bin_attr_initrd.private = (void *)initrd_start;
742		if (sysfs_create_bin_file(firmware_kobj, &bin_attr_initrd))
743			pr_err("Failed to create initrd sysfs file");
744	}
745	initrd_start = 0;
746	initrd_end = 0;
747
748	init_flush_fput();
749}
750
751static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain);
752static async_cookie_t initramfs_cookie;
753
754void wait_for_initramfs(void)
755{
756	if (!initramfs_cookie) {
757		/*
758		 * Something before rootfs_initcall wants to access
759		 * the filesystem/initramfs. Probably a bug. Make a
760		 * note, avoid deadlocking the machine, and let the
761		 * caller's access fail as it used to.
762		 */
763		pr_warn_once("wait_for_initramfs() called before rootfs_initcalls\n");
764		return;
765	}
766	async_synchronize_cookie_domain(initramfs_cookie + 1, &initramfs_domain);
767}
768EXPORT_SYMBOL_GPL(wait_for_initramfs);
769
770static int __init populate_rootfs(void)
771{
772	initramfs_cookie = async_schedule_domain(do_populate_rootfs, NULL,
773						 &initramfs_domain);
774	usermodehelper_enable();
775	if (!initramfs_async)
776		wait_for_initramfs();
777	return 0;
778}
779rootfs_initcall(populate_rootfs);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/init.h>
  3#include <linux/async.h>
  4#include <linux/fs.h>
  5#include <linux/slab.h>
  6#include <linux/types.h>
  7#include <linux/fcntl.h>
  8#include <linux/delay.h>
  9#include <linux/string.h>
 10#include <linux/dirent.h>
 11#include <linux/syscalls.h>
 12#include <linux/utime.h>
 13#include <linux/file.h>
 
 14#include <linux/memblock.h>
 15#include <linux/mm.h>
 16#include <linux/namei.h>
 17#include <linux/init_syscalls.h>
 18#include <linux/umh.h>
 
 19
 20static ssize_t __init xwrite(struct file *file, const char *p, size_t count,
 21		loff_t *pos)
 
 
 
 
 
 22{
 23	ssize_t out = 0;
 24
 25	/* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
 26	while (count) {
 27		ssize_t rv = kernel_write(file, p, count, pos);
 28
 29		if (rv < 0) {
 30			if (rv == -EINTR || rv == -EAGAIN)
 31				continue;
 32			return out ? out : rv;
 33		} else if (rv == 0)
 34			break;
 35
 
 
 
 
 
 
 
 36		p += rv;
 37		out += rv;
 38		count -= rv;
 39	}
 40
 41	return out;
 42}
 43
 44static __initdata char *message;
 45static void __init error(char *x)
 46{
 47	if (!message)
 48		message = x;
 49}
 50
 51static void panic_show_mem(const char *fmt, ...)
 52{
 53	va_list args;
 54
 55	show_mem(0, NULL);
 56	va_start(args, fmt);
 57	panic(fmt, args);
 58	va_end(args);
 59}
 60
 61/* link hash */
 62
 63#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
 64
 65static __initdata struct hash {
 66	int ino, minor, major;
 67	umode_t mode;
 68	struct hash *next;
 69	char name[N_ALIGN(PATH_MAX)];
 70} *head[32];
 71
 72static inline int hash(int major, int minor, int ino)
 73{
 74	unsigned long tmp = ino + minor + (major << 3);
 75	tmp += tmp >> 5;
 76	return tmp & 31;
 77}
 78
 79static char __init *find_link(int major, int minor, int ino,
 80			      umode_t mode, char *name)
 81{
 82	struct hash **p, *q;
 83	for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
 84		if ((*p)->ino != ino)
 85			continue;
 86		if ((*p)->minor != minor)
 87			continue;
 88		if ((*p)->major != major)
 89			continue;
 90		if (((*p)->mode ^ mode) & S_IFMT)
 91			continue;
 92		return (*p)->name;
 93	}
 94	q = kmalloc(sizeof(struct hash), GFP_KERNEL);
 95	if (!q)
 96		panic_show_mem("can't allocate link hash entry");
 97	q->major = major;
 98	q->minor = minor;
 99	q->ino = ino;
100	q->mode = mode;
101	strcpy(q->name, name);
102	q->next = NULL;
103	*p = q;
104	return NULL;
105}
106
107static void __init free_hash(void)
108{
109	struct hash **p, *q;
110	for (p = head; p < head + 32; p++) {
111		while (*p) {
112			q = *p;
113			*p = q->next;
114			kfree(q);
115		}
116	}
117}
118
119static long __init do_utime(char *filename, time64_t mtime)
 
120{
121	struct timespec64 t[2];
 
 
122
123	t[0].tv_sec = mtime;
124	t[0].tv_nsec = 0;
125	t[1].tv_sec = mtime;
126	t[1].tv_nsec = 0;
127	return init_utimes(filename, t);
128}
129
130static __initdata LIST_HEAD(dir_list);
131struct dir_entry {
132	struct list_head list;
133	char *name;
134	time64_t mtime;
 
135};
136
137static void __init dir_add(const char *name, time64_t mtime)
138{
139	struct dir_entry *de = kmalloc(sizeof(struct dir_entry), GFP_KERNEL);
 
 
 
140	if (!de)
141		panic_show_mem("can't allocate dir_entry buffer");
142	INIT_LIST_HEAD(&de->list);
143	de->name = kstrdup(name, GFP_KERNEL);
144	de->mtime = mtime;
145	list_add(&de->list, &dir_list);
146}
147
148static void __init dir_utime(void)
149{
150	struct dir_entry *de, *tmp;
151	list_for_each_entry_safe(de, tmp, &dir_list, list) {
152		list_del(&de->list);
153		do_utime(de->name, de->mtime);
154		kfree(de->name);
155		kfree(de);
156	}
157}
 
 
 
 
 
 
158
159static __initdata time64_t mtime;
160
161/* cpio header parsing */
162
163static __initdata unsigned long ino, major, minor, nlink;
164static __initdata umode_t mode;
165static __initdata unsigned long body_len, name_len;
166static __initdata uid_t uid;
167static __initdata gid_t gid;
168static __initdata unsigned rdev;
 
169
170static void __init parse_header(char *s)
171{
172	unsigned long parsed[12];
173	char buf[9];
174	int i;
175
176	buf[8] = '\0';
177	for (i = 0, s += 6; i < 12; i++, s += 8) {
178		memcpy(buf, s, 8);
179		parsed[i] = simple_strtoul(buf, NULL, 16);
180	}
181	ino = parsed[0];
182	mode = parsed[1];
183	uid = parsed[2];
184	gid = parsed[3];
185	nlink = parsed[4];
186	mtime = parsed[5]; /* breaks in y2106 */
187	body_len = parsed[6];
188	major = parsed[7];
189	minor = parsed[8];
190	rdev = new_encode_dev(MKDEV(parsed[9], parsed[10]));
191	name_len = parsed[11];
 
192}
193
194/* FSM */
195
196static __initdata enum state {
197	Start,
198	Collect,
199	GotHeader,
200	SkipIt,
201	GotName,
202	CopyFile,
203	GotSymlink,
204	Reset
205} state, next_state;
206
207static __initdata char *victim;
208static unsigned long byte_count __initdata;
209static __initdata loff_t this_header, next_header;
210
211static inline void __init eat(unsigned n)
212{
213	victim += n;
214	this_header += n;
215	byte_count -= n;
216}
217
218static __initdata char *collected;
219static long remains __initdata;
220static __initdata char *collect;
221
222static void __init read_into(char *buf, unsigned size, enum state next)
223{
224	if (byte_count >= size) {
225		collected = victim;
226		eat(size);
227		state = next;
228	} else {
229		collect = collected = buf;
230		remains = size;
231		next_state = next;
232		state = Collect;
233	}
234}
235
236static __initdata char *header_buf, *symlink_buf, *name_buf;
237
238static int __init do_start(void)
239{
240	read_into(header_buf, 110, GotHeader);
241	return 0;
242}
243
244static int __init do_collect(void)
245{
246	unsigned long n = remains;
247	if (byte_count < n)
248		n = byte_count;
249	memcpy(collect, victim, n);
250	eat(n);
251	collect += n;
252	if ((remains -= n) != 0)
253		return 1;
254	state = next_state;
255	return 0;
256}
257
258static int __init do_header(void)
259{
260	if (memcmp(collected, "070707", 6)==0) {
261		error("incorrect cpio method used: use -H newc option");
262		return 1;
263	}
264	if (memcmp(collected, "070701", 6)) {
265		error("no cpio magic");
 
 
 
266		return 1;
267	}
268	parse_header(collected);
269	next_header = this_header + N_ALIGN(name_len) + body_len;
270	next_header = (next_header + 3) & ~3;
271	state = SkipIt;
272	if (name_len <= 0 || name_len > PATH_MAX)
273		return 0;
274	if (S_ISLNK(mode)) {
275		if (body_len > PATH_MAX)
276			return 0;
277		collect = collected = symlink_buf;
278		remains = N_ALIGN(name_len) + body_len;
279		next_state = GotSymlink;
280		state = Collect;
281		return 0;
282	}
283	if (S_ISREG(mode) || !body_len)
284		read_into(name_buf, N_ALIGN(name_len), GotName);
285	return 0;
286}
287
288static int __init do_skip(void)
289{
290	if (this_header + byte_count < next_header) {
291		eat(byte_count);
292		return 1;
293	} else {
294		eat(next_header - this_header);
295		state = next_state;
296		return 0;
297	}
298}
299
300static int __init do_reset(void)
301{
302	while (byte_count && *victim == '\0')
303		eat(1);
304	if (byte_count && (this_header & 3))
305		error("broken padding");
306	return 1;
307}
308
309static void __init clean_path(char *path, umode_t fmode)
310{
311	struct kstat st;
312
313	if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
314	    (st.mode ^ fmode) & S_IFMT) {
315		if (S_ISDIR(st.mode))
316			init_rmdir(path);
317		else
318			init_unlink(path);
319	}
320}
321
322static int __init maybe_link(void)
323{
324	if (nlink >= 2) {
325		char *old = find_link(major, minor, ino, mode, collected);
326		if (old) {
327			clean_path(collected, 0);
328			return (init_link(old, collected) < 0) ? -1 : 1;
329		}
330	}
331	return 0;
332}
333
334static __initdata struct file *wfile;
335static __initdata loff_t wfile_pos;
336
337static int __init do_name(void)
338{
339	state = SkipIt;
340	next_state = Reset;
 
 
 
 
 
 
 
 
 
341	if (strcmp(collected, "TRAILER!!!") == 0) {
342		free_hash();
343		return 0;
344	}
345	clean_path(collected, mode);
346	if (S_ISREG(mode)) {
347		int ml = maybe_link();
348		if (ml >= 0) {
349			int openflags = O_WRONLY|O_CREAT;
350			if (ml != 1)
351				openflags |= O_TRUNC;
352			wfile = filp_open(collected, openflags, mode);
353			if (IS_ERR(wfile))
354				return 0;
355			wfile_pos = 0;
 
356
357			vfs_fchown(wfile, uid, gid);
358			vfs_fchmod(wfile, mode);
359			if (body_len)
360				vfs_truncate(&wfile->f_path, body_len);
361			state = CopyFile;
362		}
363	} else if (S_ISDIR(mode)) {
364		init_mkdir(collected, mode);
365		init_chown(collected, uid, gid, 0);
366		init_chmod(collected, mode);
367		dir_add(collected, mtime);
368	} else if (S_ISBLK(mode) || S_ISCHR(mode) ||
369		   S_ISFIFO(mode) || S_ISSOCK(mode)) {
370		if (maybe_link() == 0) {
371			init_mknod(collected, mode, rdev);
372			init_chown(collected, uid, gid, 0);
373			init_chmod(collected, mode);
374			do_utime(collected, mtime);
375		}
376	}
377	return 0;
378}
379
380static int __init do_copy(void)
381{
382	if (byte_count >= body_len) {
383		struct timespec64 t[2] = { };
384		if (xwrite(wfile, victim, body_len, &wfile_pos) != body_len)
385			error("write error");
386
387		t[0].tv_sec = mtime;
388		t[1].tv_sec = mtime;
389		vfs_utimes(&wfile->f_path, t);
390
391		fput(wfile);
 
 
392		eat(body_len);
393		state = SkipIt;
394		return 0;
395	} else {
396		if (xwrite(wfile, victim, byte_count, &wfile_pos) != byte_count)
397			error("write error");
398		body_len -= byte_count;
399		eat(byte_count);
400		return 1;
401	}
402}
403
404static int __init do_symlink(void)
405{
 
 
 
 
 
 
406	collected[N_ALIGN(name_len) + body_len] = '\0';
407	clean_path(collected, 0);
408	init_symlink(collected + N_ALIGN(name_len), collected);
409	init_chown(collected, uid, gid, AT_SYMLINK_NOFOLLOW);
410	do_utime(collected, mtime);
411	state = SkipIt;
412	next_state = Reset;
413	return 0;
414}
415
416static __initdata int (*actions[])(void) = {
417	[Start]		= do_start,
418	[Collect]	= do_collect,
419	[GotHeader]	= do_header,
420	[SkipIt]	= do_skip,
421	[GotName]	= do_name,
422	[CopyFile]	= do_copy,
423	[GotSymlink]	= do_symlink,
424	[Reset]		= do_reset,
425};
426
427static long __init write_buffer(char *buf, unsigned long len)
428{
429	byte_count = len;
430	victim = buf;
431
432	while (!actions[state]())
433		;
434	return len - byte_count;
435}
436
437static long __init flush_buffer(void *bufv, unsigned long len)
438{
439	char *buf = (char *) bufv;
440	long written;
441	long origLen = len;
442	if (message)
443		return -1;
444	while ((written = write_buffer(buf, len)) < len && !message) {
445		char c = buf[written];
446		if (c == '0') {
447			buf += written;
448			len -= written;
449			state = Start;
450		} else if (c == 0) {
451			buf += written;
452			len -= written;
453			state = Reset;
454		} else
455			error("junk within compressed archive");
456	}
457	return origLen;
458}
459
460static unsigned long my_inptr; /* index of next byte to be processed in inbuf */
461
462#include <linux/decompress/generic.h>
463
464static char * __init unpack_to_rootfs(char *buf, unsigned long len)
465{
466	long written;
467	decompress_fn decompress;
468	const char *compress_name;
469	static __initdata char msg_buf[64];
470
471	header_buf = kmalloc(110, GFP_KERNEL);
472	symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
473	name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
474
475	if (!header_buf || !symlink_buf || !name_buf)
476		panic_show_mem("can't allocate buffers");
477
478	state = Start;
479	this_header = 0;
480	message = NULL;
481	while (!message && len) {
482		loff_t saved_offset = this_header;
483		if (*buf == '0' && !(this_header & 3)) {
484			state = Start;
485			written = write_buffer(buf, len);
486			buf += written;
487			len -= written;
488			continue;
489		}
490		if (!*buf) {
491			buf++;
492			len--;
493			this_header++;
494			continue;
495		}
496		this_header = 0;
497		decompress = decompress_method(buf, len, &compress_name);
498		pr_debug("Detected %s compressed data\n", compress_name);
499		if (decompress) {
500			int res = decompress(buf, len, NULL, flush_buffer, NULL,
501				   &my_inptr, error);
502			if (res)
503				error("decompressor failed");
504		} else if (compress_name) {
505			if (!message) {
506				snprintf(msg_buf, sizeof msg_buf,
507					 "compression method %s not configured",
508					 compress_name);
509				message = msg_buf;
510			}
511		} else
512			error("invalid magic at start of compressed archive");
513		if (state != Reset)
514			error("junk at the end of compressed archive");
515		this_header = saved_offset + my_inptr;
516		buf += my_inptr;
517		len -= my_inptr;
518	}
519	dir_utime();
520	kfree(name_buf);
521	kfree(symlink_buf);
522	kfree(header_buf);
523	return message;
524}
525
526static int __initdata do_retain_initrd;
527
528static int __init retain_initrd_param(char *str)
529{
530	if (*str)
531		return 0;
532	do_retain_initrd = 1;
533	return 1;
534}
535__setup("retain_initrd", retain_initrd_param);
536
537#ifdef CONFIG_ARCH_HAS_KEEPINITRD
538static int __init keepinitrd_setup(char *__unused)
539{
540	do_retain_initrd = 1;
541	return 1;
542}
543__setup("keepinitrd", keepinitrd_setup);
544#endif
545
546static bool __initdata initramfs_async = true;
547static int __init initramfs_async_setup(char *str)
548{
549	strtobool(str, &initramfs_async);
550	return 1;
551}
552__setup("initramfs_async=", initramfs_async_setup);
553
554extern char __initramfs_start[];
555extern unsigned long __initramfs_size;
556#include <linux/initrd.h>
557#include <linux/kexec.h>
558
 
 
559void __init reserve_initrd_mem(void)
560{
561	phys_addr_t start;
562	unsigned long size;
563
564	/* Ignore the virtul address computed during device tree parsing */
565	initrd_start = initrd_end = 0;
566
567	if (!phys_initrd_size)
568		return;
569	/*
570	 * Round the memory region to page boundaries as per free_initrd_mem()
571	 * This allows us to detect whether the pages overlapping the initrd
572	 * are in use, but more importantly, reserves the entire set of pages
573	 * as we don't want these pages allocated for other purposes.
574	 */
575	start = round_down(phys_initrd_start, PAGE_SIZE);
576	size = phys_initrd_size + (phys_initrd_start - start);
577	size = round_up(size, PAGE_SIZE);
578
579	if (!memblock_is_region_memory(start, size)) {
580		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
581		       (u64)start, size);
582		goto disable;
583	}
584
585	if (memblock_is_region_reserved(start, size)) {
586		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
587		       (u64)start, size);
588		goto disable;
589	}
590
591	memblock_reserve(start, size);
592	/* Now convert initrd to virtual addresses */
593	initrd_start = (unsigned long)__va(phys_initrd_start);
594	initrd_end = initrd_start + phys_initrd_size;
595	initrd_below_start_ok = 1;
596
597	return;
598disable:
599	pr_cont(" - disabling initrd\n");
600	initrd_start = 0;
601	initrd_end = 0;
602}
603
604void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
605{
606#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
607	unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
608	unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
609
610	memblock_free(__pa(aligned_start), aligned_end - aligned_start);
611#endif
612
613	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
614			"initrd");
615}
616
617#ifdef CONFIG_KEXEC_CORE
618static bool __init kexec_free_initrd(void)
619{
620	unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
621	unsigned long crashk_end   = (unsigned long)__va(crashk_res.end);
622
623	/*
624	 * If the initrd region is overlapped with crashkernel reserved region,
625	 * free only memory that is not part of crashkernel region.
626	 */
627	if (initrd_start >= crashk_end || initrd_end <= crashk_start)
628		return false;
629
630	/*
631	 * Initialize initrd memory region since the kexec boot does not do.
632	 */
633	memset((void *)initrd_start, 0, initrd_end - initrd_start);
634	if (initrd_start < crashk_start)
635		free_initrd_mem(initrd_start, crashk_start);
636	if (initrd_end > crashk_end)
637		free_initrd_mem(crashk_end, initrd_end);
638	return true;
639}
640#else
641static inline bool kexec_free_initrd(void)
642{
643	return false;
644}
645#endif /* CONFIG_KEXEC_CORE */
646
647#ifdef CONFIG_BLK_DEV_RAM
648static void __init populate_initrd_image(char *err)
649{
650	ssize_t written;
651	struct file *file;
652	loff_t pos = 0;
653
654	unpack_to_rootfs(__initramfs_start, __initramfs_size);
655
656	printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
657			err);
658	file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
659	if (IS_ERR(file))
660		return;
661
662	written = xwrite(file, (char *)initrd_start, initrd_end - initrd_start,
663			&pos);
664	if (written != initrd_end - initrd_start)
665		pr_err("/initrd.image: incomplete write (%zd != %ld)\n",
666		       written, initrd_end - initrd_start);
667	fput(file);
668}
669#endif /* CONFIG_BLK_DEV_RAM */
670
671static void __init do_populate_rootfs(void *unused, async_cookie_t cookie)
672{
673	/* Load the built in initramfs */
674	char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
675	if (err)
676		panic_show_mem("%s", err); /* Failed to decompress INTERNAL initramfs */
677
678	if (!initrd_start || IS_ENABLED(CONFIG_INITRAMFS_FORCE))
679		goto done;
680
681	if (IS_ENABLED(CONFIG_BLK_DEV_RAM))
682		printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
683	else
684		printk(KERN_INFO "Unpacking initramfs...\n");
685
686	err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start);
687	if (err) {
688#ifdef CONFIG_BLK_DEV_RAM
689		populate_initrd_image(err);
690#else
691		printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
692#endif
693	}
694
695done:
 
 
696	/*
697	 * If the initrd region is overlapped with crashkernel reserved region,
698	 * free only memory that is not part of crashkernel region.
699	 */
700	if (!do_retain_initrd && initrd_start && !kexec_free_initrd())
701		free_initrd_mem(initrd_start, initrd_end);
 
 
 
 
 
 
702	initrd_start = 0;
703	initrd_end = 0;
704
705	flush_delayed_fput();
706}
707
708static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain);
709static async_cookie_t initramfs_cookie;
710
711void wait_for_initramfs(void)
712{
713	if (!initramfs_cookie) {
714		/*
715		 * Something before rootfs_initcall wants to access
716		 * the filesystem/initramfs. Probably a bug. Make a
717		 * note, avoid deadlocking the machine, and let the
718		 * caller's access fail as it used to.
719		 */
720		pr_warn_once("wait_for_initramfs() called before rootfs_initcalls\n");
721		return;
722	}
723	async_synchronize_cookie_domain(initramfs_cookie + 1, &initramfs_domain);
724}
725EXPORT_SYMBOL_GPL(wait_for_initramfs);
726
727static int __init populate_rootfs(void)
728{
729	initramfs_cookie = async_schedule_domain(do_populate_rootfs, NULL,
730						 &initramfs_domain);
731	usermodehelper_enable();
732	if (!initramfs_async)
733		wait_for_initramfs();
734	return 0;
735}
736rootfs_initcall(populate_rootfs);