Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/async.h>
4#include <linux/fs.h>
5#include <linux/slab.h>
6#include <linux/types.h>
7#include <linux/fcntl.h>
8#include <linux/delay.h>
9#include <linux/string.h>
10#include <linux/dirent.h>
11#include <linux/syscalls.h>
12#include <linux/utime.h>
13#include <linux/file.h>
14#include <linux/kstrtox.h>
15#include <linux/memblock.h>
16#include <linux/mm.h>
17#include <linux/namei.h>
18#include <linux/init_syscalls.h>
19#include <linux/umh.h>
20#include <linux/security.h>
21
22#include "do_mounts.h"
23
24static __initdata bool csum_present;
25static __initdata u32 io_csum;
26
27static ssize_t __init xwrite(struct file *file, const unsigned char *p,
28 size_t count, loff_t *pos)
29{
30 ssize_t out = 0;
31
32 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
33 while (count) {
34 ssize_t rv = kernel_write(file, p, count, pos);
35
36 if (rv < 0) {
37 if (rv == -EINTR || rv == -EAGAIN)
38 continue;
39 return out ? out : rv;
40 } else if (rv == 0)
41 break;
42
43 if (csum_present) {
44 ssize_t i;
45
46 for (i = 0; i < rv; i++)
47 io_csum += p[i];
48 }
49
50 p += rv;
51 out += rv;
52 count -= rv;
53 }
54
55 return out;
56}
57
58static __initdata char *message;
59static void __init error(char *x)
60{
61 if (!message)
62 message = x;
63}
64
65#define panic_show_mem(fmt, ...) \
66 ({ show_mem(); panic(fmt, ##__VA_ARGS__); })
67
68/* link hash */
69
70#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
71
72static __initdata struct hash {
73 int ino, minor, major;
74 umode_t mode;
75 struct hash *next;
76 char name[N_ALIGN(PATH_MAX)];
77} *head[32];
78
79static inline int hash(int major, int minor, int ino)
80{
81 unsigned long tmp = ino + minor + (major << 3);
82 tmp += tmp >> 5;
83 return tmp & 31;
84}
85
86static char __init *find_link(int major, int minor, int ino,
87 umode_t mode, char *name)
88{
89 struct hash **p, *q;
90 for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
91 if ((*p)->ino != ino)
92 continue;
93 if ((*p)->minor != minor)
94 continue;
95 if ((*p)->major != major)
96 continue;
97 if (((*p)->mode ^ mode) & S_IFMT)
98 continue;
99 return (*p)->name;
100 }
101 q = kmalloc(sizeof(struct hash), GFP_KERNEL);
102 if (!q)
103 panic_show_mem("can't allocate link hash entry");
104 q->major = major;
105 q->minor = minor;
106 q->ino = ino;
107 q->mode = mode;
108 strcpy(q->name, name);
109 q->next = NULL;
110 *p = q;
111 return NULL;
112}
113
114static void __init free_hash(void)
115{
116 struct hash **p, *q;
117 for (p = head; p < head + 32; p++) {
118 while (*p) {
119 q = *p;
120 *p = q->next;
121 kfree(q);
122 }
123 }
124}
125
126#ifdef CONFIG_INITRAMFS_PRESERVE_MTIME
127static void __init do_utime(char *filename, time64_t mtime)
128{
129 struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
130 init_utimes(filename, t);
131}
132
133static void __init do_utime_path(const struct path *path, time64_t mtime)
134{
135 struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
136 vfs_utimes(path, t);
137}
138
139static __initdata LIST_HEAD(dir_list);
140struct dir_entry {
141 struct list_head list;
142 time64_t mtime;
143 char name[];
144};
145
146static void __init dir_add(const char *name, time64_t mtime)
147{
148 size_t nlen = strlen(name) + 1;
149 struct dir_entry *de;
150
151 de = kmalloc(sizeof(struct dir_entry) + nlen, GFP_KERNEL);
152 if (!de)
153 panic_show_mem("can't allocate dir_entry buffer");
154 INIT_LIST_HEAD(&de->list);
155 strscpy(de->name, name, nlen);
156 de->mtime = mtime;
157 list_add(&de->list, &dir_list);
158}
159
160static void __init dir_utime(void)
161{
162 struct dir_entry *de, *tmp;
163 list_for_each_entry_safe(de, tmp, &dir_list, list) {
164 list_del(&de->list);
165 do_utime(de->name, de->mtime);
166 kfree(de);
167 }
168}
169#else
170static void __init do_utime(char *filename, time64_t mtime) {}
171static void __init do_utime_path(const struct path *path, time64_t mtime) {}
172static void __init dir_add(const char *name, time64_t mtime) {}
173static void __init dir_utime(void) {}
174#endif
175
176static __initdata time64_t mtime;
177
178/* cpio header parsing */
179
180static __initdata unsigned long ino, major, minor, nlink;
181static __initdata umode_t mode;
182static __initdata unsigned long body_len, name_len;
183static __initdata uid_t uid;
184static __initdata gid_t gid;
185static __initdata unsigned rdev;
186static __initdata u32 hdr_csum;
187
188static void __init parse_header(char *s)
189{
190 unsigned long parsed[13];
191 char buf[9];
192 int i;
193
194 buf[8] = '\0';
195 for (i = 0, s += 6; i < 13; i++, s += 8) {
196 memcpy(buf, s, 8);
197 parsed[i] = simple_strtoul(buf, NULL, 16);
198 }
199 ino = parsed[0];
200 mode = parsed[1];
201 uid = parsed[2];
202 gid = parsed[3];
203 nlink = parsed[4];
204 mtime = parsed[5]; /* breaks in y2106 */
205 body_len = parsed[6];
206 major = parsed[7];
207 minor = parsed[8];
208 rdev = new_encode_dev(MKDEV(parsed[9], parsed[10]));
209 name_len = parsed[11];
210 hdr_csum = parsed[12];
211}
212
213/* FSM */
214
215static __initdata enum state {
216 Start,
217 Collect,
218 GotHeader,
219 SkipIt,
220 GotName,
221 CopyFile,
222 GotSymlink,
223 Reset
224} state, next_state;
225
226static __initdata char *victim;
227static unsigned long byte_count __initdata;
228static __initdata loff_t this_header, next_header;
229
230static inline void __init eat(unsigned n)
231{
232 victim += n;
233 this_header += n;
234 byte_count -= n;
235}
236
237static __initdata char *collected;
238static long remains __initdata;
239static __initdata char *collect;
240
241static void __init read_into(char *buf, unsigned size, enum state next)
242{
243 if (byte_count >= size) {
244 collected = victim;
245 eat(size);
246 state = next;
247 } else {
248 collect = collected = buf;
249 remains = size;
250 next_state = next;
251 state = Collect;
252 }
253}
254
255static __initdata char *header_buf, *symlink_buf, *name_buf;
256
257static int __init do_start(void)
258{
259 read_into(header_buf, 110, GotHeader);
260 return 0;
261}
262
263static int __init do_collect(void)
264{
265 unsigned long n = remains;
266 if (byte_count < n)
267 n = byte_count;
268 memcpy(collect, victim, n);
269 eat(n);
270 collect += n;
271 if ((remains -= n) != 0)
272 return 1;
273 state = next_state;
274 return 0;
275}
276
277static int __init do_header(void)
278{
279 if (!memcmp(collected, "070701", 6)) {
280 csum_present = false;
281 } else if (!memcmp(collected, "070702", 6)) {
282 csum_present = true;
283 } else {
284 if (memcmp(collected, "070707", 6) == 0)
285 error("incorrect cpio method used: use -H newc option");
286 else
287 error("no cpio magic");
288 return 1;
289 }
290 parse_header(collected);
291 next_header = this_header + N_ALIGN(name_len) + body_len;
292 next_header = (next_header + 3) & ~3;
293 state = SkipIt;
294 if (name_len <= 0 || name_len > PATH_MAX)
295 return 0;
296 if (S_ISLNK(mode)) {
297 if (body_len > PATH_MAX)
298 return 0;
299 collect = collected = symlink_buf;
300 remains = N_ALIGN(name_len) + body_len;
301 next_state = GotSymlink;
302 state = Collect;
303 return 0;
304 }
305 if (S_ISREG(mode) || !body_len)
306 read_into(name_buf, N_ALIGN(name_len), GotName);
307 return 0;
308}
309
310static int __init do_skip(void)
311{
312 if (this_header + byte_count < next_header) {
313 eat(byte_count);
314 return 1;
315 } else {
316 eat(next_header - this_header);
317 state = next_state;
318 return 0;
319 }
320}
321
322static int __init do_reset(void)
323{
324 while (byte_count && *victim == '\0')
325 eat(1);
326 if (byte_count && (this_header & 3))
327 error("broken padding");
328 return 1;
329}
330
331static void __init clean_path(char *path, umode_t fmode)
332{
333 struct kstat st;
334
335 if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
336 (st.mode ^ fmode) & S_IFMT) {
337 if (S_ISDIR(st.mode))
338 init_rmdir(path);
339 else
340 init_unlink(path);
341 }
342}
343
344static int __init maybe_link(void)
345{
346 if (nlink >= 2) {
347 char *old = find_link(major, minor, ino, mode, collected);
348 if (old) {
349 clean_path(collected, 0);
350 return (init_link(old, collected) < 0) ? -1 : 1;
351 }
352 }
353 return 0;
354}
355
356static __initdata struct file *wfile;
357static __initdata loff_t wfile_pos;
358
359static int __init do_name(void)
360{
361 state = SkipIt;
362 next_state = Reset;
363
364 /* name_len > 0 && name_len <= PATH_MAX checked in do_header */
365 if (collected[name_len - 1] != '\0') {
366 pr_err("initramfs name without nulterm: %.*s\n",
367 (int)name_len, collected);
368 error("malformed archive");
369 return 1;
370 }
371
372 if (strcmp(collected, "TRAILER!!!") == 0) {
373 free_hash();
374 return 0;
375 }
376 clean_path(collected, mode);
377 if (S_ISREG(mode)) {
378 int ml = maybe_link();
379 if (ml >= 0) {
380 int openflags = O_WRONLY|O_CREAT|O_LARGEFILE;
381 if (ml != 1)
382 openflags |= O_TRUNC;
383 wfile = filp_open(collected, openflags, mode);
384 if (IS_ERR(wfile))
385 return 0;
386 wfile_pos = 0;
387 io_csum = 0;
388
389 vfs_fchown(wfile, uid, gid);
390 vfs_fchmod(wfile, mode);
391 if (body_len)
392 vfs_truncate(&wfile->f_path, body_len);
393 state = CopyFile;
394 }
395 } else if (S_ISDIR(mode)) {
396 init_mkdir(collected, mode);
397 init_chown(collected, uid, gid, 0);
398 init_chmod(collected, mode);
399 dir_add(collected, mtime);
400 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
401 S_ISFIFO(mode) || S_ISSOCK(mode)) {
402 if (maybe_link() == 0) {
403 init_mknod(collected, mode, rdev);
404 init_chown(collected, uid, gid, 0);
405 init_chmod(collected, mode);
406 do_utime(collected, mtime);
407 }
408 }
409 return 0;
410}
411
412static int __init do_copy(void)
413{
414 if (byte_count >= body_len) {
415 if (xwrite(wfile, victim, body_len, &wfile_pos) != body_len)
416 error("write error");
417
418 do_utime_path(&wfile->f_path, mtime);
419 fput(wfile);
420 if (csum_present && io_csum != hdr_csum)
421 error("bad data checksum");
422 eat(body_len);
423 state = SkipIt;
424 return 0;
425 } else {
426 if (xwrite(wfile, victim, byte_count, &wfile_pos) != byte_count)
427 error("write error");
428 body_len -= byte_count;
429 eat(byte_count);
430 return 1;
431 }
432}
433
434static int __init do_symlink(void)
435{
436 if (collected[name_len - 1] != '\0') {
437 pr_err("initramfs symlink without nulterm: %.*s\n",
438 (int)name_len, collected);
439 error("malformed archive");
440 return 1;
441 }
442 collected[N_ALIGN(name_len) + body_len] = '\0';
443 clean_path(collected, 0);
444 init_symlink(collected + N_ALIGN(name_len), collected);
445 init_chown(collected, uid, gid, AT_SYMLINK_NOFOLLOW);
446 do_utime(collected, mtime);
447 state = SkipIt;
448 next_state = Reset;
449 return 0;
450}
451
452static __initdata int (*actions[])(void) = {
453 [Start] = do_start,
454 [Collect] = do_collect,
455 [GotHeader] = do_header,
456 [SkipIt] = do_skip,
457 [GotName] = do_name,
458 [CopyFile] = do_copy,
459 [GotSymlink] = do_symlink,
460 [Reset] = do_reset,
461};
462
463static long __init write_buffer(char *buf, unsigned long len)
464{
465 byte_count = len;
466 victim = buf;
467
468 while (!actions[state]())
469 ;
470 return len - byte_count;
471}
472
473static long __init flush_buffer(void *bufv, unsigned long len)
474{
475 char *buf = bufv;
476 long written;
477 long origLen = len;
478 if (message)
479 return -1;
480 while ((written = write_buffer(buf, len)) < len && !message) {
481 char c = buf[written];
482 if (c == '0') {
483 buf += written;
484 len -= written;
485 state = Start;
486 } else if (c == 0) {
487 buf += written;
488 len -= written;
489 state = Reset;
490 } else
491 error("junk within compressed archive");
492 }
493 return origLen;
494}
495
496static unsigned long my_inptr __initdata; /* index of next byte to be processed in inbuf */
497
498#include <linux/decompress/generic.h>
499
500static char * __init unpack_to_rootfs(char *buf, unsigned long len)
501{
502 long written;
503 decompress_fn decompress;
504 const char *compress_name;
505 static __initdata char msg_buf[64];
506
507 header_buf = kmalloc(110, GFP_KERNEL);
508 symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
509 name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
510
511 if (!header_buf || !symlink_buf || !name_buf)
512 panic_show_mem("can't allocate buffers");
513
514 state = Start;
515 this_header = 0;
516 message = NULL;
517 while (!message && len) {
518 loff_t saved_offset = this_header;
519 if (*buf == '0' && !(this_header & 3)) {
520 state = Start;
521 written = write_buffer(buf, len);
522 buf += written;
523 len -= written;
524 continue;
525 }
526 if (!*buf) {
527 buf++;
528 len--;
529 this_header++;
530 continue;
531 }
532 this_header = 0;
533 decompress = decompress_method(buf, len, &compress_name);
534 pr_debug("Detected %s compressed data\n", compress_name);
535 if (decompress) {
536 int res = decompress(buf, len, NULL, flush_buffer, NULL,
537 &my_inptr, error);
538 if (res)
539 error("decompressor failed");
540 } else if (compress_name) {
541 if (!message) {
542 snprintf(msg_buf, sizeof msg_buf,
543 "compression method %s not configured",
544 compress_name);
545 message = msg_buf;
546 }
547 } else
548 error("invalid magic at start of compressed archive");
549 if (state != Reset)
550 error("junk at the end of compressed archive");
551 this_header = saved_offset + my_inptr;
552 buf += my_inptr;
553 len -= my_inptr;
554 }
555 dir_utime();
556 kfree(name_buf);
557 kfree(symlink_buf);
558 kfree(header_buf);
559 return message;
560}
561
562static int __initdata do_retain_initrd;
563
564static int __init retain_initrd_param(char *str)
565{
566 if (*str)
567 return 0;
568 do_retain_initrd = 1;
569 return 1;
570}
571__setup("retain_initrd", retain_initrd_param);
572
573#ifdef CONFIG_ARCH_HAS_KEEPINITRD
574static int __init keepinitrd_setup(char *__unused)
575{
576 do_retain_initrd = 1;
577 return 1;
578}
579__setup("keepinitrd", keepinitrd_setup);
580#endif
581
582static bool __initdata initramfs_async = true;
583static int __init initramfs_async_setup(char *str)
584{
585 return kstrtobool(str, &initramfs_async) == 0;
586}
587__setup("initramfs_async=", initramfs_async_setup);
588
589extern char __initramfs_start[];
590extern unsigned long __initramfs_size;
591#include <linux/initrd.h>
592#include <linux/kexec.h>
593
594static BIN_ATTR(initrd, 0440, sysfs_bin_attr_simple_read, NULL, 0);
595
596void __init reserve_initrd_mem(void)
597{
598 phys_addr_t start;
599 unsigned long size;
600
601 /* Ignore the virtul address computed during device tree parsing */
602 initrd_start = initrd_end = 0;
603
604 if (!phys_initrd_size)
605 return;
606 /*
607 * Round the memory region to page boundaries as per free_initrd_mem()
608 * This allows us to detect whether the pages overlapping the initrd
609 * are in use, but more importantly, reserves the entire set of pages
610 * as we don't want these pages allocated for other purposes.
611 */
612 start = round_down(phys_initrd_start, PAGE_SIZE);
613 size = phys_initrd_size + (phys_initrd_start - start);
614 size = round_up(size, PAGE_SIZE);
615
616 if (!memblock_is_region_memory(start, size)) {
617 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
618 (u64)start, size);
619 goto disable;
620 }
621
622 if (memblock_is_region_reserved(start, size)) {
623 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
624 (u64)start, size);
625 goto disable;
626 }
627
628 memblock_reserve(start, size);
629 /* Now convert initrd to virtual addresses */
630 initrd_start = (unsigned long)__va(phys_initrd_start);
631 initrd_end = initrd_start + phys_initrd_size;
632 initrd_below_start_ok = 1;
633
634 return;
635disable:
636 pr_cont(" - disabling initrd\n");
637 initrd_start = 0;
638 initrd_end = 0;
639}
640
641void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
642{
643#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
644 unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
645 unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
646
647 memblock_free((void *)aligned_start, aligned_end - aligned_start);
648#endif
649
650 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
651 "initrd");
652}
653
654#ifdef CONFIG_CRASH_RESERVE
655static bool __init kexec_free_initrd(void)
656{
657 unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
658 unsigned long crashk_end = (unsigned long)__va(crashk_res.end);
659
660 /*
661 * If the initrd region is overlapped with crashkernel reserved region,
662 * free only memory that is not part of crashkernel region.
663 */
664 if (initrd_start >= crashk_end || initrd_end <= crashk_start)
665 return false;
666
667 /*
668 * Initialize initrd memory region since the kexec boot does not do.
669 */
670 memset((void *)initrd_start, 0, initrd_end - initrd_start);
671 if (initrd_start < crashk_start)
672 free_initrd_mem(initrd_start, crashk_start);
673 if (initrd_end > crashk_end)
674 free_initrd_mem(crashk_end, initrd_end);
675 return true;
676}
677#else
678static inline bool kexec_free_initrd(void)
679{
680 return false;
681}
682#endif /* CONFIG_KEXEC_CORE */
683
684#ifdef CONFIG_BLK_DEV_RAM
685static void __init populate_initrd_image(char *err)
686{
687 ssize_t written;
688 struct file *file;
689 loff_t pos = 0;
690
691 printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
692 err);
693 file = filp_open("/initrd.image", O_WRONLY|O_CREAT|O_LARGEFILE, 0700);
694 if (IS_ERR(file))
695 return;
696
697 written = xwrite(file, (char *)initrd_start, initrd_end - initrd_start,
698 &pos);
699 if (written != initrd_end - initrd_start)
700 pr_err("/initrd.image: incomplete write (%zd != %ld)\n",
701 written, initrd_end - initrd_start);
702 fput(file);
703}
704#endif /* CONFIG_BLK_DEV_RAM */
705
706static void __init do_populate_rootfs(void *unused, async_cookie_t cookie)
707{
708 /* Load the built in initramfs */
709 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
710 if (err)
711 panic_show_mem("%s", err); /* Failed to decompress INTERNAL initramfs */
712
713 if (!initrd_start || IS_ENABLED(CONFIG_INITRAMFS_FORCE))
714 goto done;
715
716 if (IS_ENABLED(CONFIG_BLK_DEV_RAM))
717 printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
718 else
719 printk(KERN_INFO "Unpacking initramfs...\n");
720
721 err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start);
722 if (err) {
723#ifdef CONFIG_BLK_DEV_RAM
724 populate_initrd_image(err);
725#else
726 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
727#endif
728 }
729
730done:
731 security_initramfs_populated();
732
733 /*
734 * If the initrd region is overlapped with crashkernel reserved region,
735 * free only memory that is not part of crashkernel region.
736 */
737 if (!do_retain_initrd && initrd_start && !kexec_free_initrd()) {
738 free_initrd_mem(initrd_start, initrd_end);
739 } else if (do_retain_initrd && initrd_start) {
740 bin_attr_initrd.size = initrd_end - initrd_start;
741 bin_attr_initrd.private = (void *)initrd_start;
742 if (sysfs_create_bin_file(firmware_kobj, &bin_attr_initrd))
743 pr_err("Failed to create initrd sysfs file");
744 }
745 initrd_start = 0;
746 initrd_end = 0;
747
748 init_flush_fput();
749}
750
751static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain);
752static async_cookie_t initramfs_cookie;
753
754void wait_for_initramfs(void)
755{
756 if (!initramfs_cookie) {
757 /*
758 * Something before rootfs_initcall wants to access
759 * the filesystem/initramfs. Probably a bug. Make a
760 * note, avoid deadlocking the machine, and let the
761 * caller's access fail as it used to.
762 */
763 pr_warn_once("wait_for_initramfs() called before rootfs_initcalls\n");
764 return;
765 }
766 async_synchronize_cookie_domain(initramfs_cookie + 1, &initramfs_domain);
767}
768EXPORT_SYMBOL_GPL(wait_for_initramfs);
769
770static int __init populate_rootfs(void)
771{
772 initramfs_cookie = async_schedule_domain(do_populate_rootfs, NULL,
773 &initramfs_domain);
774 usermodehelper_enable();
775 if (!initramfs_async)
776 wait_for_initramfs();
777 return 0;
778}
779rootfs_initcall(populate_rootfs);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/async.h>
4#include <linux/fs.h>
5#include <linux/slab.h>
6#include <linux/types.h>
7#include <linux/fcntl.h>
8#include <linux/delay.h>
9#include <linux/string.h>
10#include <linux/dirent.h>
11#include <linux/syscalls.h>
12#include <linux/utime.h>
13#include <linux/file.h>
14#include <linux/kstrtox.h>
15#include <linux/memblock.h>
16#include <linux/mm.h>
17#include <linux/namei.h>
18#include <linux/init_syscalls.h>
19#include <linux/task_work.h>
20#include <linux/umh.h>
21
22static __initdata bool csum_present;
23static __initdata u32 io_csum;
24
25static ssize_t __init xwrite(struct file *file, const unsigned char *p,
26 size_t count, loff_t *pos)
27{
28 ssize_t out = 0;
29
30 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
31 while (count) {
32 ssize_t rv = kernel_write(file, p, count, pos);
33
34 if (rv < 0) {
35 if (rv == -EINTR || rv == -EAGAIN)
36 continue;
37 return out ? out : rv;
38 } else if (rv == 0)
39 break;
40
41 if (csum_present) {
42 ssize_t i;
43
44 for (i = 0; i < rv; i++)
45 io_csum += p[i];
46 }
47
48 p += rv;
49 out += rv;
50 count -= rv;
51 }
52
53 return out;
54}
55
56static __initdata char *message;
57static void __init error(char *x)
58{
59 if (!message)
60 message = x;
61}
62
63#define panic_show_mem(fmt, ...) \
64 ({ show_mem(); panic(fmt, ##__VA_ARGS__); })
65
66/* link hash */
67
68#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
69
70static __initdata struct hash {
71 int ino, minor, major;
72 umode_t mode;
73 struct hash *next;
74 char name[N_ALIGN(PATH_MAX)];
75} *head[32];
76
77static inline int hash(int major, int minor, int ino)
78{
79 unsigned long tmp = ino + minor + (major << 3);
80 tmp += tmp >> 5;
81 return tmp & 31;
82}
83
84static char __init *find_link(int major, int minor, int ino,
85 umode_t mode, char *name)
86{
87 struct hash **p, *q;
88 for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
89 if ((*p)->ino != ino)
90 continue;
91 if ((*p)->minor != minor)
92 continue;
93 if ((*p)->major != major)
94 continue;
95 if (((*p)->mode ^ mode) & S_IFMT)
96 continue;
97 return (*p)->name;
98 }
99 q = kmalloc(sizeof(struct hash), GFP_KERNEL);
100 if (!q)
101 panic_show_mem("can't allocate link hash entry");
102 q->major = major;
103 q->minor = minor;
104 q->ino = ino;
105 q->mode = mode;
106 strcpy(q->name, name);
107 q->next = NULL;
108 *p = q;
109 return NULL;
110}
111
112static void __init free_hash(void)
113{
114 struct hash **p, *q;
115 for (p = head; p < head + 32; p++) {
116 while (*p) {
117 q = *p;
118 *p = q->next;
119 kfree(q);
120 }
121 }
122}
123
124#ifdef CONFIG_INITRAMFS_PRESERVE_MTIME
125static void __init do_utime(char *filename, time64_t mtime)
126{
127 struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
128 init_utimes(filename, t);
129}
130
131static void __init do_utime_path(const struct path *path, time64_t mtime)
132{
133 struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
134 vfs_utimes(path, t);
135}
136
137static __initdata LIST_HEAD(dir_list);
138struct dir_entry {
139 struct list_head list;
140 time64_t mtime;
141 char name[];
142};
143
144static void __init dir_add(const char *name, time64_t mtime)
145{
146 size_t nlen = strlen(name) + 1;
147 struct dir_entry *de;
148
149 de = kmalloc(sizeof(struct dir_entry) + nlen, GFP_KERNEL);
150 if (!de)
151 panic_show_mem("can't allocate dir_entry buffer");
152 INIT_LIST_HEAD(&de->list);
153 strscpy(de->name, name, nlen);
154 de->mtime = mtime;
155 list_add(&de->list, &dir_list);
156}
157
158static void __init dir_utime(void)
159{
160 struct dir_entry *de, *tmp;
161 list_for_each_entry_safe(de, tmp, &dir_list, list) {
162 list_del(&de->list);
163 do_utime(de->name, de->mtime);
164 kfree(de);
165 }
166}
167#else
168static void __init do_utime(char *filename, time64_t mtime) {}
169static void __init do_utime_path(const struct path *path, time64_t mtime) {}
170static void __init dir_add(const char *name, time64_t mtime) {}
171static void __init dir_utime(void) {}
172#endif
173
174static __initdata time64_t mtime;
175
176/* cpio header parsing */
177
178static __initdata unsigned long ino, major, minor, nlink;
179static __initdata umode_t mode;
180static __initdata unsigned long body_len, name_len;
181static __initdata uid_t uid;
182static __initdata gid_t gid;
183static __initdata unsigned rdev;
184static __initdata u32 hdr_csum;
185
186static void __init parse_header(char *s)
187{
188 unsigned long parsed[13];
189 char buf[9];
190 int i;
191
192 buf[8] = '\0';
193 for (i = 0, s += 6; i < 13; i++, s += 8) {
194 memcpy(buf, s, 8);
195 parsed[i] = simple_strtoul(buf, NULL, 16);
196 }
197 ino = parsed[0];
198 mode = parsed[1];
199 uid = parsed[2];
200 gid = parsed[3];
201 nlink = parsed[4];
202 mtime = parsed[5]; /* breaks in y2106 */
203 body_len = parsed[6];
204 major = parsed[7];
205 minor = parsed[8];
206 rdev = new_encode_dev(MKDEV(parsed[9], parsed[10]));
207 name_len = parsed[11];
208 hdr_csum = parsed[12];
209}
210
211/* FSM */
212
213static __initdata enum state {
214 Start,
215 Collect,
216 GotHeader,
217 SkipIt,
218 GotName,
219 CopyFile,
220 GotSymlink,
221 Reset
222} state, next_state;
223
224static __initdata char *victim;
225static unsigned long byte_count __initdata;
226static __initdata loff_t this_header, next_header;
227
228static inline void __init eat(unsigned n)
229{
230 victim += n;
231 this_header += n;
232 byte_count -= n;
233}
234
235static __initdata char *collected;
236static long remains __initdata;
237static __initdata char *collect;
238
239static void __init read_into(char *buf, unsigned size, enum state next)
240{
241 if (byte_count >= size) {
242 collected = victim;
243 eat(size);
244 state = next;
245 } else {
246 collect = collected = buf;
247 remains = size;
248 next_state = next;
249 state = Collect;
250 }
251}
252
253static __initdata char *header_buf, *symlink_buf, *name_buf;
254
255static int __init do_start(void)
256{
257 read_into(header_buf, 110, GotHeader);
258 return 0;
259}
260
261static int __init do_collect(void)
262{
263 unsigned long n = remains;
264 if (byte_count < n)
265 n = byte_count;
266 memcpy(collect, victim, n);
267 eat(n);
268 collect += n;
269 if ((remains -= n) != 0)
270 return 1;
271 state = next_state;
272 return 0;
273}
274
275static int __init do_header(void)
276{
277 if (!memcmp(collected, "070701", 6)) {
278 csum_present = false;
279 } else if (!memcmp(collected, "070702", 6)) {
280 csum_present = true;
281 } else {
282 if (memcmp(collected, "070707", 6) == 0)
283 error("incorrect cpio method used: use -H newc option");
284 else
285 error("no cpio magic");
286 return 1;
287 }
288 parse_header(collected);
289 next_header = this_header + N_ALIGN(name_len) + body_len;
290 next_header = (next_header + 3) & ~3;
291 state = SkipIt;
292 if (name_len <= 0 || name_len > PATH_MAX)
293 return 0;
294 if (S_ISLNK(mode)) {
295 if (body_len > PATH_MAX)
296 return 0;
297 collect = collected = symlink_buf;
298 remains = N_ALIGN(name_len) + body_len;
299 next_state = GotSymlink;
300 state = Collect;
301 return 0;
302 }
303 if (S_ISREG(mode) || !body_len)
304 read_into(name_buf, N_ALIGN(name_len), GotName);
305 return 0;
306}
307
308static int __init do_skip(void)
309{
310 if (this_header + byte_count < next_header) {
311 eat(byte_count);
312 return 1;
313 } else {
314 eat(next_header - this_header);
315 state = next_state;
316 return 0;
317 }
318}
319
320static int __init do_reset(void)
321{
322 while (byte_count && *victim == '\0')
323 eat(1);
324 if (byte_count && (this_header & 3))
325 error("broken padding");
326 return 1;
327}
328
329static void __init clean_path(char *path, umode_t fmode)
330{
331 struct kstat st;
332
333 if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
334 (st.mode ^ fmode) & S_IFMT) {
335 if (S_ISDIR(st.mode))
336 init_rmdir(path);
337 else
338 init_unlink(path);
339 }
340}
341
342static int __init maybe_link(void)
343{
344 if (nlink >= 2) {
345 char *old = find_link(major, minor, ino, mode, collected);
346 if (old) {
347 clean_path(collected, 0);
348 return (init_link(old, collected) < 0) ? -1 : 1;
349 }
350 }
351 return 0;
352}
353
354static __initdata struct file *wfile;
355static __initdata loff_t wfile_pos;
356
357static int __init do_name(void)
358{
359 state = SkipIt;
360 next_state = Reset;
361 if (strcmp(collected, "TRAILER!!!") == 0) {
362 free_hash();
363 return 0;
364 }
365 clean_path(collected, mode);
366 if (S_ISREG(mode)) {
367 int ml = maybe_link();
368 if (ml >= 0) {
369 int openflags = O_WRONLY|O_CREAT;
370 if (ml != 1)
371 openflags |= O_TRUNC;
372 wfile = filp_open(collected, openflags, mode);
373 if (IS_ERR(wfile))
374 return 0;
375 wfile_pos = 0;
376 io_csum = 0;
377
378 vfs_fchown(wfile, uid, gid);
379 vfs_fchmod(wfile, mode);
380 if (body_len)
381 vfs_truncate(&wfile->f_path, body_len);
382 state = CopyFile;
383 }
384 } else if (S_ISDIR(mode)) {
385 init_mkdir(collected, mode);
386 init_chown(collected, uid, gid, 0);
387 init_chmod(collected, mode);
388 dir_add(collected, mtime);
389 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
390 S_ISFIFO(mode) || S_ISSOCK(mode)) {
391 if (maybe_link() == 0) {
392 init_mknod(collected, mode, rdev);
393 init_chown(collected, uid, gid, 0);
394 init_chmod(collected, mode);
395 do_utime(collected, mtime);
396 }
397 }
398 return 0;
399}
400
401static int __init do_copy(void)
402{
403 if (byte_count >= body_len) {
404 if (xwrite(wfile, victim, body_len, &wfile_pos) != body_len)
405 error("write error");
406
407 do_utime_path(&wfile->f_path, mtime);
408 fput(wfile);
409 if (csum_present && io_csum != hdr_csum)
410 error("bad data checksum");
411 eat(body_len);
412 state = SkipIt;
413 return 0;
414 } else {
415 if (xwrite(wfile, victim, byte_count, &wfile_pos) != byte_count)
416 error("write error");
417 body_len -= byte_count;
418 eat(byte_count);
419 return 1;
420 }
421}
422
423static int __init do_symlink(void)
424{
425 collected[N_ALIGN(name_len) + body_len] = '\0';
426 clean_path(collected, 0);
427 init_symlink(collected + N_ALIGN(name_len), collected);
428 init_chown(collected, uid, gid, AT_SYMLINK_NOFOLLOW);
429 do_utime(collected, mtime);
430 state = SkipIt;
431 next_state = Reset;
432 return 0;
433}
434
435static __initdata int (*actions[])(void) = {
436 [Start] = do_start,
437 [Collect] = do_collect,
438 [GotHeader] = do_header,
439 [SkipIt] = do_skip,
440 [GotName] = do_name,
441 [CopyFile] = do_copy,
442 [GotSymlink] = do_symlink,
443 [Reset] = do_reset,
444};
445
446static long __init write_buffer(char *buf, unsigned long len)
447{
448 byte_count = len;
449 victim = buf;
450
451 while (!actions[state]())
452 ;
453 return len - byte_count;
454}
455
456static long __init flush_buffer(void *bufv, unsigned long len)
457{
458 char *buf = bufv;
459 long written;
460 long origLen = len;
461 if (message)
462 return -1;
463 while ((written = write_buffer(buf, len)) < len && !message) {
464 char c = buf[written];
465 if (c == '0') {
466 buf += written;
467 len -= written;
468 state = Start;
469 } else if (c == 0) {
470 buf += written;
471 len -= written;
472 state = Reset;
473 } else
474 error("junk within compressed archive");
475 }
476 return origLen;
477}
478
479static unsigned long my_inptr __initdata; /* index of next byte to be processed in inbuf */
480
481#include <linux/decompress/generic.h>
482
483static char * __init unpack_to_rootfs(char *buf, unsigned long len)
484{
485 long written;
486 decompress_fn decompress;
487 const char *compress_name;
488 static __initdata char msg_buf[64];
489
490 header_buf = kmalloc(110, GFP_KERNEL);
491 symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
492 name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
493
494 if (!header_buf || !symlink_buf || !name_buf)
495 panic_show_mem("can't allocate buffers");
496
497 state = Start;
498 this_header = 0;
499 message = NULL;
500 while (!message && len) {
501 loff_t saved_offset = this_header;
502 if (*buf == '0' && !(this_header & 3)) {
503 state = Start;
504 written = write_buffer(buf, len);
505 buf += written;
506 len -= written;
507 continue;
508 }
509 if (!*buf) {
510 buf++;
511 len--;
512 this_header++;
513 continue;
514 }
515 this_header = 0;
516 decompress = decompress_method(buf, len, &compress_name);
517 pr_debug("Detected %s compressed data\n", compress_name);
518 if (decompress) {
519 int res = decompress(buf, len, NULL, flush_buffer, NULL,
520 &my_inptr, error);
521 if (res)
522 error("decompressor failed");
523 } else if (compress_name) {
524 if (!message) {
525 snprintf(msg_buf, sizeof msg_buf,
526 "compression method %s not configured",
527 compress_name);
528 message = msg_buf;
529 }
530 } else
531 error("invalid magic at start of compressed archive");
532 if (state != Reset)
533 error("junk at the end of compressed archive");
534 this_header = saved_offset + my_inptr;
535 buf += my_inptr;
536 len -= my_inptr;
537 }
538 dir_utime();
539 kfree(name_buf);
540 kfree(symlink_buf);
541 kfree(header_buf);
542 return message;
543}
544
545static int __initdata do_retain_initrd;
546
547static int __init retain_initrd_param(char *str)
548{
549 if (*str)
550 return 0;
551 do_retain_initrd = 1;
552 return 1;
553}
554__setup("retain_initrd", retain_initrd_param);
555
556#ifdef CONFIG_ARCH_HAS_KEEPINITRD
557static int __init keepinitrd_setup(char *__unused)
558{
559 do_retain_initrd = 1;
560 return 1;
561}
562__setup("keepinitrd", keepinitrd_setup);
563#endif
564
565static bool __initdata initramfs_async = true;
566static int __init initramfs_async_setup(char *str)
567{
568 return kstrtobool(str, &initramfs_async) == 0;
569}
570__setup("initramfs_async=", initramfs_async_setup);
571
572extern char __initramfs_start[];
573extern unsigned long __initramfs_size;
574#include <linux/initrd.h>
575#include <linux/kexec.h>
576
577static ssize_t raw_read(struct file *file, struct kobject *kobj,
578 struct bin_attribute *attr, char *buf,
579 loff_t pos, size_t count)
580{
581 memcpy(buf, attr->private + pos, count);
582 return count;
583}
584
585static BIN_ATTR(initrd, 0440, raw_read, NULL, 0);
586
587void __init reserve_initrd_mem(void)
588{
589 phys_addr_t start;
590 unsigned long size;
591
592 /* Ignore the virtul address computed during device tree parsing */
593 initrd_start = initrd_end = 0;
594
595 if (!phys_initrd_size)
596 return;
597 /*
598 * Round the memory region to page boundaries as per free_initrd_mem()
599 * This allows us to detect whether the pages overlapping the initrd
600 * are in use, but more importantly, reserves the entire set of pages
601 * as we don't want these pages allocated for other purposes.
602 */
603 start = round_down(phys_initrd_start, PAGE_SIZE);
604 size = phys_initrd_size + (phys_initrd_start - start);
605 size = round_up(size, PAGE_SIZE);
606
607 if (!memblock_is_region_memory(start, size)) {
608 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
609 (u64)start, size);
610 goto disable;
611 }
612
613 if (memblock_is_region_reserved(start, size)) {
614 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
615 (u64)start, size);
616 goto disable;
617 }
618
619 memblock_reserve(start, size);
620 /* Now convert initrd to virtual addresses */
621 initrd_start = (unsigned long)__va(phys_initrd_start);
622 initrd_end = initrd_start + phys_initrd_size;
623 initrd_below_start_ok = 1;
624
625 return;
626disable:
627 pr_cont(" - disabling initrd\n");
628 initrd_start = 0;
629 initrd_end = 0;
630}
631
632void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
633{
634#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
635 unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
636 unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
637
638 memblock_free((void *)aligned_start, aligned_end - aligned_start);
639#endif
640
641 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
642 "initrd");
643}
644
645#ifdef CONFIG_KEXEC_CORE
646static bool __init kexec_free_initrd(void)
647{
648 unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
649 unsigned long crashk_end = (unsigned long)__va(crashk_res.end);
650
651 /*
652 * If the initrd region is overlapped with crashkernel reserved region,
653 * free only memory that is not part of crashkernel region.
654 */
655 if (initrd_start >= crashk_end || initrd_end <= crashk_start)
656 return false;
657
658 /*
659 * Initialize initrd memory region since the kexec boot does not do.
660 */
661 memset((void *)initrd_start, 0, initrd_end - initrd_start);
662 if (initrd_start < crashk_start)
663 free_initrd_mem(initrd_start, crashk_start);
664 if (initrd_end > crashk_end)
665 free_initrd_mem(crashk_end, initrd_end);
666 return true;
667}
668#else
669static inline bool kexec_free_initrd(void)
670{
671 return false;
672}
673#endif /* CONFIG_KEXEC_CORE */
674
675#ifdef CONFIG_BLK_DEV_RAM
676static void __init populate_initrd_image(char *err)
677{
678 ssize_t written;
679 struct file *file;
680 loff_t pos = 0;
681
682 unpack_to_rootfs(__initramfs_start, __initramfs_size);
683
684 printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
685 err);
686 file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
687 if (IS_ERR(file))
688 return;
689
690 written = xwrite(file, (char *)initrd_start, initrd_end - initrd_start,
691 &pos);
692 if (written != initrd_end - initrd_start)
693 pr_err("/initrd.image: incomplete write (%zd != %ld)\n",
694 written, initrd_end - initrd_start);
695 fput(file);
696}
697#endif /* CONFIG_BLK_DEV_RAM */
698
699static void __init do_populate_rootfs(void *unused, async_cookie_t cookie)
700{
701 /* Load the built in initramfs */
702 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
703 if (err)
704 panic_show_mem("%s", err); /* Failed to decompress INTERNAL initramfs */
705
706 if (!initrd_start || IS_ENABLED(CONFIG_INITRAMFS_FORCE))
707 goto done;
708
709 if (IS_ENABLED(CONFIG_BLK_DEV_RAM))
710 printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
711 else
712 printk(KERN_INFO "Unpacking initramfs...\n");
713
714 err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start);
715 if (err) {
716#ifdef CONFIG_BLK_DEV_RAM
717 populate_initrd_image(err);
718#else
719 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
720#endif
721 }
722
723done:
724 /*
725 * If the initrd region is overlapped with crashkernel reserved region,
726 * free only memory that is not part of crashkernel region.
727 */
728 if (!do_retain_initrd && initrd_start && !kexec_free_initrd()) {
729 free_initrd_mem(initrd_start, initrd_end);
730 } else if (do_retain_initrd && initrd_start) {
731 bin_attr_initrd.size = initrd_end - initrd_start;
732 bin_attr_initrd.private = (void *)initrd_start;
733 if (sysfs_create_bin_file(firmware_kobj, &bin_attr_initrd))
734 pr_err("Failed to create initrd sysfs file");
735 }
736 initrd_start = 0;
737 initrd_end = 0;
738
739 flush_delayed_fput();
740 task_work_run();
741}
742
743static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain);
744static async_cookie_t initramfs_cookie;
745
746void wait_for_initramfs(void)
747{
748 if (!initramfs_cookie) {
749 /*
750 * Something before rootfs_initcall wants to access
751 * the filesystem/initramfs. Probably a bug. Make a
752 * note, avoid deadlocking the machine, and let the
753 * caller's access fail as it used to.
754 */
755 pr_warn_once("wait_for_initramfs() called before rootfs_initcalls\n");
756 return;
757 }
758 async_synchronize_cookie_domain(initramfs_cookie + 1, &initramfs_domain);
759}
760EXPORT_SYMBOL_GPL(wait_for_initramfs);
761
762static int __init populate_rootfs(void)
763{
764 initramfs_cookie = async_schedule_domain(do_populate_rootfs, NULL,
765 &initramfs_domain);
766 usermodehelper_enable();
767 if (!initramfs_async)
768 wait_for_initramfs();
769 return 0;
770}
771rootfs_initcall(populate_rootfs);