Loading...
1/*
2 * fs/proc/kcore.c kernel ELF core dumper
3 *
4 * Modelled on fs/exec.c:aout_core_dump()
5 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
6 * ELF version written by David Howells <David.Howells@nexor.co.uk>
7 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
8 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
9 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
10 */
11
12#include <linux/mm.h>
13#include <linux/proc_fs.h>
14#include <linux/kcore.h>
15#include <linux/user.h>
16#include <linux/capability.h>
17#include <linux/elf.h>
18#include <linux/elfcore.h>
19#include <linux/notifier.h>
20#include <linux/vmalloc.h>
21#include <linux/highmem.h>
22#include <linux/printk.h>
23#include <linux/bootmem.h>
24#include <linux/init.h>
25#include <linux/slab.h>
26#include <asm/uaccess.h>
27#include <asm/io.h>
28#include <linux/list.h>
29#include <linux/ioport.h>
30#include <linux/memory.h>
31#include <asm/sections.h>
32#include "internal.h"
33
34#define CORE_STR "CORE"
35
36#ifndef ELF_CORE_EFLAGS
37#define ELF_CORE_EFLAGS 0
38#endif
39
40static struct proc_dir_entry *proc_root_kcore;
41
42
43#ifndef kc_vaddr_to_offset
44#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
45#endif
46#ifndef kc_offset_to_vaddr
47#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
48#endif
49
50/* An ELF note in memory */
51struct memelfnote
52{
53 const char *name;
54 int type;
55 unsigned int datasz;
56 void *data;
57};
58
59static LIST_HEAD(kclist_head);
60static DEFINE_RWLOCK(kclist_lock);
61static int kcore_need_update = 1;
62
63void
64kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
65{
66 new->addr = (unsigned long)addr;
67 new->size = size;
68 new->type = type;
69
70 write_lock(&kclist_lock);
71 list_add_tail(&new->list, &kclist_head);
72 write_unlock(&kclist_lock);
73}
74
75static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
76{
77 size_t try, size;
78 struct kcore_list *m;
79
80 *nphdr = 1; /* PT_NOTE */
81 size = 0;
82
83 list_for_each_entry(m, &kclist_head, list) {
84 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
85 if (try > size)
86 size = try;
87 *nphdr = *nphdr + 1;
88 }
89 *elf_buflen = sizeof(struct elfhdr) +
90 (*nphdr + 2)*sizeof(struct elf_phdr) +
91 3 * ((sizeof(struct elf_note)) +
92 roundup(sizeof(CORE_STR), 4)) +
93 roundup(sizeof(struct elf_prstatus), 4) +
94 roundup(sizeof(struct elf_prpsinfo), 4) +
95 roundup(arch_task_struct_size, 4);
96 *elf_buflen = PAGE_ALIGN(*elf_buflen);
97 return size + *elf_buflen;
98}
99
100static void free_kclist_ents(struct list_head *head)
101{
102 struct kcore_list *tmp, *pos;
103
104 list_for_each_entry_safe(pos, tmp, head, list) {
105 list_del(&pos->list);
106 kfree(pos);
107 }
108}
109/*
110 * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
111 */
112static void __kcore_update_ram(struct list_head *list)
113{
114 int nphdr;
115 size_t size;
116 struct kcore_list *tmp, *pos;
117 LIST_HEAD(garbage);
118
119 write_lock(&kclist_lock);
120 if (kcore_need_update) {
121 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
122 if (pos->type == KCORE_RAM
123 || pos->type == KCORE_VMEMMAP)
124 list_move(&pos->list, &garbage);
125 }
126 list_splice_tail(list, &kclist_head);
127 } else
128 list_splice(list, &garbage);
129 kcore_need_update = 0;
130 proc_root_kcore->size = get_kcore_size(&nphdr, &size);
131 write_unlock(&kclist_lock);
132
133 free_kclist_ents(&garbage);
134}
135
136
137#ifdef CONFIG_HIGHMEM
138/*
139 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
140 * because memory hole is not as big as !HIGHMEM case.
141 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
142 */
143static int kcore_update_ram(void)
144{
145 LIST_HEAD(head);
146 struct kcore_list *ent;
147 int ret = 0;
148
149 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
150 if (!ent)
151 return -ENOMEM;
152 ent->addr = (unsigned long)__va(0);
153 ent->size = max_low_pfn << PAGE_SHIFT;
154 ent->type = KCORE_RAM;
155 list_add(&ent->list, &head);
156 __kcore_update_ram(&head);
157 return ret;
158}
159
160#else /* !CONFIG_HIGHMEM */
161
162#ifdef CONFIG_SPARSEMEM_VMEMMAP
163/* calculate vmemmap's address from given system ram pfn and register it */
164static int
165get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
166{
167 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
168 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
169 unsigned long start, end;
170 struct kcore_list *vmm, *tmp;
171
172
173 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
174 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
175 end = PAGE_ALIGN(end);
176 /* overlap check (because we have to align page */
177 list_for_each_entry(tmp, head, list) {
178 if (tmp->type != KCORE_VMEMMAP)
179 continue;
180 if (start < tmp->addr + tmp->size)
181 if (end > tmp->addr)
182 end = tmp->addr;
183 }
184 if (start < end) {
185 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
186 if (!vmm)
187 return 0;
188 vmm->addr = start;
189 vmm->size = end - start;
190 vmm->type = KCORE_VMEMMAP;
191 list_add_tail(&vmm->list, head);
192 }
193 return 1;
194
195}
196#else
197static int
198get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
199{
200 return 1;
201}
202
203#endif
204
205static int
206kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
207{
208 struct list_head *head = (struct list_head *)arg;
209 struct kcore_list *ent;
210
211 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
212 if (!ent)
213 return -ENOMEM;
214 ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
215 ent->size = nr_pages << PAGE_SHIFT;
216
217 /* Sanity check: Can happen in 32bit arch...maybe */
218 if (ent->addr < (unsigned long) __va(0))
219 goto free_out;
220
221 /* cut not-mapped area. ....from ppc-32 code. */
222 if (ULONG_MAX - ent->addr < ent->size)
223 ent->size = ULONG_MAX - ent->addr;
224
225 /* cut when vmalloc() area is higher than direct-map area */
226 if (VMALLOC_START > (unsigned long)__va(0)) {
227 if (ent->addr > VMALLOC_START)
228 goto free_out;
229 if (VMALLOC_START - ent->addr < ent->size)
230 ent->size = VMALLOC_START - ent->addr;
231 }
232
233 ent->type = KCORE_RAM;
234 list_add_tail(&ent->list, head);
235
236 if (!get_sparsemem_vmemmap_info(ent, head)) {
237 list_del(&ent->list);
238 goto free_out;
239 }
240
241 return 0;
242free_out:
243 kfree(ent);
244 return 1;
245}
246
247static int kcore_update_ram(void)
248{
249 int nid, ret;
250 unsigned long end_pfn;
251 LIST_HEAD(head);
252
253 /* Not inialized....update now */
254 /* find out "max pfn" */
255 end_pfn = 0;
256 for_each_node_state(nid, N_MEMORY) {
257 unsigned long node_end;
258 node_end = node_end_pfn(nid);
259 if (end_pfn < node_end)
260 end_pfn = node_end;
261 }
262 /* scan 0 to max_pfn */
263 ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
264 if (ret) {
265 free_kclist_ents(&head);
266 return -ENOMEM;
267 }
268 __kcore_update_ram(&head);
269 return ret;
270}
271#endif /* CONFIG_HIGHMEM */
272
273/*****************************************************************************/
274/*
275 * determine size of ELF note
276 */
277static int notesize(struct memelfnote *en)
278{
279 int sz;
280
281 sz = sizeof(struct elf_note);
282 sz += roundup((strlen(en->name) + 1), 4);
283 sz += roundup(en->datasz, 4);
284
285 return sz;
286} /* end notesize() */
287
288/*****************************************************************************/
289/*
290 * store a note in the header buffer
291 */
292static char *storenote(struct memelfnote *men, char *bufp)
293{
294 struct elf_note en;
295
296#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
297
298 en.n_namesz = strlen(men->name) + 1;
299 en.n_descsz = men->datasz;
300 en.n_type = men->type;
301
302 DUMP_WRITE(&en, sizeof(en));
303 DUMP_WRITE(men->name, en.n_namesz);
304
305 /* XXX - cast from long long to long to avoid need for libgcc.a */
306 bufp = (char*) roundup((unsigned long)bufp,4);
307 DUMP_WRITE(men->data, men->datasz);
308 bufp = (char*) roundup((unsigned long)bufp,4);
309
310#undef DUMP_WRITE
311
312 return bufp;
313} /* end storenote() */
314
315/*
316 * store an ELF coredump header in the supplied buffer
317 * nphdr is the number of elf_phdr to insert
318 */
319static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
320{
321 struct elf_prstatus prstatus; /* NT_PRSTATUS */
322 struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
323 struct elf_phdr *nhdr, *phdr;
324 struct elfhdr *elf;
325 struct memelfnote notes[3];
326 off_t offset = 0;
327 struct kcore_list *m;
328
329 /* setup ELF header */
330 elf = (struct elfhdr *) bufp;
331 bufp += sizeof(struct elfhdr);
332 offset += sizeof(struct elfhdr);
333 memcpy(elf->e_ident, ELFMAG, SELFMAG);
334 elf->e_ident[EI_CLASS] = ELF_CLASS;
335 elf->e_ident[EI_DATA] = ELF_DATA;
336 elf->e_ident[EI_VERSION]= EV_CURRENT;
337 elf->e_ident[EI_OSABI] = ELF_OSABI;
338 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
339 elf->e_type = ET_CORE;
340 elf->e_machine = ELF_ARCH;
341 elf->e_version = EV_CURRENT;
342 elf->e_entry = 0;
343 elf->e_phoff = sizeof(struct elfhdr);
344 elf->e_shoff = 0;
345 elf->e_flags = ELF_CORE_EFLAGS;
346 elf->e_ehsize = sizeof(struct elfhdr);
347 elf->e_phentsize= sizeof(struct elf_phdr);
348 elf->e_phnum = nphdr;
349 elf->e_shentsize= 0;
350 elf->e_shnum = 0;
351 elf->e_shstrndx = 0;
352
353 /* setup ELF PT_NOTE program header */
354 nhdr = (struct elf_phdr *) bufp;
355 bufp += sizeof(struct elf_phdr);
356 offset += sizeof(struct elf_phdr);
357 nhdr->p_type = PT_NOTE;
358 nhdr->p_offset = 0;
359 nhdr->p_vaddr = 0;
360 nhdr->p_paddr = 0;
361 nhdr->p_filesz = 0;
362 nhdr->p_memsz = 0;
363 nhdr->p_flags = 0;
364 nhdr->p_align = 0;
365
366 /* setup ELF PT_LOAD program header for every area */
367 list_for_each_entry(m, &kclist_head, list) {
368 phdr = (struct elf_phdr *) bufp;
369 bufp += sizeof(struct elf_phdr);
370 offset += sizeof(struct elf_phdr);
371
372 phdr->p_type = PT_LOAD;
373 phdr->p_flags = PF_R|PF_W|PF_X;
374 phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
375 phdr->p_vaddr = (size_t)m->addr;
376 phdr->p_paddr = 0;
377 phdr->p_filesz = phdr->p_memsz = m->size;
378 phdr->p_align = PAGE_SIZE;
379 }
380
381 /*
382 * Set up the notes in similar form to SVR4 core dumps made
383 * with info from their /proc.
384 */
385 nhdr->p_offset = offset;
386
387 /* set up the process status */
388 notes[0].name = CORE_STR;
389 notes[0].type = NT_PRSTATUS;
390 notes[0].datasz = sizeof(struct elf_prstatus);
391 notes[0].data = &prstatus;
392
393 memset(&prstatus, 0, sizeof(struct elf_prstatus));
394
395 nhdr->p_filesz = notesize(¬es[0]);
396 bufp = storenote(¬es[0], bufp);
397
398 /* set up the process info */
399 notes[1].name = CORE_STR;
400 notes[1].type = NT_PRPSINFO;
401 notes[1].datasz = sizeof(struct elf_prpsinfo);
402 notes[1].data = &prpsinfo;
403
404 memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
405 prpsinfo.pr_state = 0;
406 prpsinfo.pr_sname = 'R';
407 prpsinfo.pr_zomb = 0;
408
409 strcpy(prpsinfo.pr_fname, "vmlinux");
410 strlcpy(prpsinfo.pr_psargs, saved_command_line, sizeof(prpsinfo.pr_psargs));
411
412 nhdr->p_filesz += notesize(¬es[1]);
413 bufp = storenote(¬es[1], bufp);
414
415 /* set up the task structure */
416 notes[2].name = CORE_STR;
417 notes[2].type = NT_TASKSTRUCT;
418 notes[2].datasz = arch_task_struct_size;
419 notes[2].data = current;
420
421 nhdr->p_filesz += notesize(¬es[2]);
422 bufp = storenote(¬es[2], bufp);
423
424} /* end elf_kcore_store_hdr() */
425
426/*****************************************************************************/
427/*
428 * read from the ELF header and then kernel memory
429 */
430static ssize_t
431read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
432{
433 ssize_t acc = 0;
434 size_t size, tsz;
435 size_t elf_buflen;
436 int nphdr;
437 unsigned long start;
438
439 read_lock(&kclist_lock);
440 size = get_kcore_size(&nphdr, &elf_buflen);
441
442 if (buflen == 0 || *fpos >= size) {
443 read_unlock(&kclist_lock);
444 return 0;
445 }
446
447 /* trim buflen to not go beyond EOF */
448 if (buflen > size - *fpos)
449 buflen = size - *fpos;
450
451 /* construct an ELF core header if we'll need some of it */
452 if (*fpos < elf_buflen) {
453 char * elf_buf;
454
455 tsz = elf_buflen - *fpos;
456 if (buflen < tsz)
457 tsz = buflen;
458 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
459 if (!elf_buf) {
460 read_unlock(&kclist_lock);
461 return -ENOMEM;
462 }
463 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
464 read_unlock(&kclist_lock);
465 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
466 kfree(elf_buf);
467 return -EFAULT;
468 }
469 kfree(elf_buf);
470 buflen -= tsz;
471 *fpos += tsz;
472 buffer += tsz;
473 acc += tsz;
474
475 /* leave now if filled buffer already */
476 if (buflen == 0)
477 return acc;
478 } else
479 read_unlock(&kclist_lock);
480
481 /*
482 * Check to see if our file offset matches with any of
483 * the addresses in the elf_phdr on our list.
484 */
485 start = kc_offset_to_vaddr(*fpos - elf_buflen);
486 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
487 tsz = buflen;
488
489 while (buflen) {
490 struct kcore_list *m;
491
492 read_lock(&kclist_lock);
493 list_for_each_entry(m, &kclist_head, list) {
494 if (start >= m->addr && start < (m->addr+m->size))
495 break;
496 }
497 read_unlock(&kclist_lock);
498
499 if (&m->list == &kclist_head) {
500 if (clear_user(buffer, tsz))
501 return -EFAULT;
502 } else if (is_vmalloc_or_module_addr((void *)start)) {
503 char * elf_buf;
504
505 elf_buf = kzalloc(tsz, GFP_KERNEL);
506 if (!elf_buf)
507 return -ENOMEM;
508 vread(elf_buf, (char *)start, tsz);
509 /* we have to zero-fill user buffer even if no read */
510 if (copy_to_user(buffer, elf_buf, tsz)) {
511 kfree(elf_buf);
512 return -EFAULT;
513 }
514 kfree(elf_buf);
515 } else {
516 if (kern_addr_valid(start)) {
517 unsigned long n;
518
519 n = copy_to_user(buffer, (char *)start, tsz);
520 /*
521 * We cannot distinguish between fault on source
522 * and fault on destination. When this happens
523 * we clear too and hope it will trigger the
524 * EFAULT again.
525 */
526 if (n) {
527 if (clear_user(buffer + tsz - n,
528 n))
529 return -EFAULT;
530 }
531 } else {
532 if (clear_user(buffer, tsz))
533 return -EFAULT;
534 }
535 }
536 buflen -= tsz;
537 *fpos += tsz;
538 buffer += tsz;
539 acc += tsz;
540 start += tsz;
541 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
542 }
543
544 return acc;
545}
546
547
548static int open_kcore(struct inode *inode, struct file *filp)
549{
550 if (!capable(CAP_SYS_RAWIO))
551 return -EPERM;
552 if (kcore_need_update)
553 kcore_update_ram();
554 if (i_size_read(inode) != proc_root_kcore->size) {
555 inode_lock(inode);
556 i_size_write(inode, proc_root_kcore->size);
557 inode_unlock(inode);
558 }
559 return 0;
560}
561
562
563static const struct file_operations proc_kcore_operations = {
564 .read = read_kcore,
565 .open = open_kcore,
566 .llseek = default_llseek,
567};
568
569/* just remember that we have to update kcore */
570static int __meminit kcore_callback(struct notifier_block *self,
571 unsigned long action, void *arg)
572{
573 switch (action) {
574 case MEM_ONLINE:
575 case MEM_OFFLINE:
576 write_lock(&kclist_lock);
577 kcore_need_update = 1;
578 write_unlock(&kclist_lock);
579 }
580 return NOTIFY_OK;
581}
582
583static struct notifier_block kcore_callback_nb __meminitdata = {
584 .notifier_call = kcore_callback,
585 .priority = 0,
586};
587
588static struct kcore_list kcore_vmalloc;
589
590#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
591static struct kcore_list kcore_text;
592/*
593 * If defined, special segment is used for mapping kernel text instead of
594 * direct-map area. We need to create special TEXT section.
595 */
596static void __init proc_kcore_text_init(void)
597{
598 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
599}
600#else
601static void __init proc_kcore_text_init(void)
602{
603}
604#endif
605
606#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
607/*
608 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
609 */
610struct kcore_list kcore_modules;
611static void __init add_modules_range(void)
612{
613 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
614 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
615 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
616 }
617}
618#else
619static void __init add_modules_range(void)
620{
621}
622#endif
623
624static int __init proc_kcore_init(void)
625{
626 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
627 &proc_kcore_operations);
628 if (!proc_root_kcore) {
629 pr_err("couldn't create /proc/kcore\n");
630 return 0; /* Always returns 0. */
631 }
632 /* Store text area if it's special */
633 proc_kcore_text_init();
634 /* Store vmalloc area */
635 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
636 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
637 add_modules_range();
638 /* Store direct-map area from physical memory map */
639 kcore_update_ram();
640 register_hotmemory_notifier(&kcore_callback_nb);
641
642 return 0;
643}
644fs_initcall(proc_kcore_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/proc/kcore.c kernel ELF core dumper
4 *
5 * Modelled on fs/exec.c:aout_core_dump()
6 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
7 * ELF version written by David Howells <David.Howells@nexor.co.uk>
8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
11 */
12
13#include <linux/vmcore_info.h>
14#include <linux/mm.h>
15#include <linux/proc_fs.h>
16#include <linux/kcore.h>
17#include <linux/user.h>
18#include <linux/capability.h>
19#include <linux/elf.h>
20#include <linux/elfcore.h>
21#include <linux/vmalloc.h>
22#include <linux/highmem.h>
23#include <linux/printk.h>
24#include <linux/memblock.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/uio.h>
28#include <asm/io.h>
29#include <linux/list.h>
30#include <linux/ioport.h>
31#include <linux/memory.h>
32#include <linux/sched/task.h>
33#include <linux/security.h>
34#include <asm/sections.h>
35#include "internal.h"
36
37#define CORE_STR "CORE"
38
39#ifndef ELF_CORE_EFLAGS
40#define ELF_CORE_EFLAGS 0
41#endif
42
43static struct proc_dir_entry *proc_root_kcore;
44
45
46#ifndef kc_vaddr_to_offset
47#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
48#endif
49#ifndef kc_offset_to_vaddr
50#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
51#endif
52
53#ifndef kc_xlate_dev_mem_ptr
54#define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
55static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
56{
57 return __va(phys);
58}
59#endif
60#ifndef kc_unxlate_dev_mem_ptr
61#define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
62static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
63{
64}
65#endif
66
67static LIST_HEAD(kclist_head);
68static DECLARE_RWSEM(kclist_lock);
69static int kcore_need_update = 1;
70
71/*
72 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
73 * Same as oldmem_pfn_is_ram in vmcore
74 */
75static int (*mem_pfn_is_ram)(unsigned long pfn);
76
77int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
78{
79 if (mem_pfn_is_ram)
80 return -EBUSY;
81 mem_pfn_is_ram = fn;
82 return 0;
83}
84
85static int pfn_is_ram(unsigned long pfn)
86{
87 if (mem_pfn_is_ram)
88 return mem_pfn_is_ram(pfn);
89 else
90 return 1;
91}
92
93/* This doesn't grab kclist_lock, so it should only be used at init time. */
94void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
95 int type)
96{
97 new->addr = (unsigned long)addr;
98 new->size = size;
99 new->type = type;
100
101 list_add_tail(&new->list, &kclist_head);
102}
103
104static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
105 size_t *data_offset)
106{
107 size_t try, size;
108 struct kcore_list *m;
109
110 *nphdr = 1; /* PT_NOTE */
111 size = 0;
112
113 list_for_each_entry(m, &kclist_head, list) {
114 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
115 if (try > size)
116 size = try;
117 *nphdr = *nphdr + 1;
118 }
119
120 *phdrs_len = *nphdr * sizeof(struct elf_phdr);
121 *notes_len = (4 * sizeof(struct elf_note) +
122 3 * ALIGN(sizeof(CORE_STR), 4) +
123 VMCOREINFO_NOTE_NAME_BYTES +
124 ALIGN(sizeof(struct elf_prstatus), 4) +
125 ALIGN(sizeof(struct elf_prpsinfo), 4) +
126 ALIGN(arch_task_struct_size, 4) +
127 ALIGN(vmcoreinfo_size, 4));
128 *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
129 *notes_len);
130 return *data_offset + size;
131}
132
133#ifdef CONFIG_HIGHMEM
134/*
135 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
136 * because memory hole is not as big as !HIGHMEM case.
137 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
138 */
139static int kcore_ram_list(struct list_head *head)
140{
141 struct kcore_list *ent;
142
143 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
144 if (!ent)
145 return -ENOMEM;
146 ent->addr = (unsigned long)__va(0);
147 ent->size = max_low_pfn << PAGE_SHIFT;
148 ent->type = KCORE_RAM;
149 list_add(&ent->list, head);
150 return 0;
151}
152
153#else /* !CONFIG_HIGHMEM */
154
155#ifdef CONFIG_SPARSEMEM_VMEMMAP
156/* calculate vmemmap's address from given system ram pfn and register it */
157static int
158get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
159{
160 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
161 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
162 unsigned long start, end;
163 struct kcore_list *vmm, *tmp;
164
165
166 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
167 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
168 end = PAGE_ALIGN(end);
169 /* overlap check (because we have to align page */
170 list_for_each_entry(tmp, head, list) {
171 if (tmp->type != KCORE_VMEMMAP)
172 continue;
173 if (start < tmp->addr + tmp->size)
174 if (end > tmp->addr)
175 end = tmp->addr;
176 }
177 if (start < end) {
178 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
179 if (!vmm)
180 return 0;
181 vmm->addr = start;
182 vmm->size = end - start;
183 vmm->type = KCORE_VMEMMAP;
184 list_add_tail(&vmm->list, head);
185 }
186 return 1;
187
188}
189#else
190static int
191get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
192{
193 return 1;
194}
195
196#endif
197
198static int
199kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
200{
201 struct list_head *head = (struct list_head *)arg;
202 struct kcore_list *ent;
203 struct page *p;
204
205 if (!pfn_valid(pfn))
206 return 1;
207
208 p = pfn_to_page(pfn);
209
210 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
211 if (!ent)
212 return -ENOMEM;
213 ent->addr = (unsigned long)page_to_virt(p);
214 ent->size = nr_pages << PAGE_SHIFT;
215
216 if (!virt_addr_valid((void *)ent->addr))
217 goto free_out;
218
219 /* cut not-mapped area. ....from ppc-32 code. */
220 if (ULONG_MAX - ent->addr < ent->size)
221 ent->size = ULONG_MAX - ent->addr;
222
223 /*
224 * We've already checked virt_addr_valid so we know this address
225 * is a valid pointer, therefore we can check against it to determine
226 * if we need to trim
227 */
228 if (VMALLOC_START > ent->addr) {
229 if (VMALLOC_START - ent->addr < ent->size)
230 ent->size = VMALLOC_START - ent->addr;
231 }
232
233 ent->type = KCORE_RAM;
234 list_add_tail(&ent->list, head);
235
236 if (!get_sparsemem_vmemmap_info(ent, head)) {
237 list_del(&ent->list);
238 goto free_out;
239 }
240
241 return 0;
242free_out:
243 kfree(ent);
244 return 1;
245}
246
247static int kcore_ram_list(struct list_head *list)
248{
249 int nid, ret;
250 unsigned long end_pfn;
251
252 /* Not initialized....update now */
253 /* find out "max pfn" */
254 end_pfn = 0;
255 for_each_node_state(nid, N_MEMORY) {
256 unsigned long node_end;
257 node_end = node_end_pfn(nid);
258 if (end_pfn < node_end)
259 end_pfn = node_end;
260 }
261 /* scan 0 to max_pfn */
262 ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
263 if (ret)
264 return -ENOMEM;
265 return 0;
266}
267#endif /* CONFIG_HIGHMEM */
268
269static int kcore_update_ram(void)
270{
271 LIST_HEAD(list);
272 LIST_HEAD(garbage);
273 int nphdr;
274 size_t phdrs_len, notes_len, data_offset;
275 struct kcore_list *tmp, *pos;
276 int ret = 0;
277
278 down_write(&kclist_lock);
279 if (!xchg(&kcore_need_update, 0))
280 goto out;
281
282 ret = kcore_ram_list(&list);
283 if (ret) {
284 /* Couldn't get the RAM list, try again next time. */
285 WRITE_ONCE(kcore_need_update, 1);
286 list_splice_tail(&list, &garbage);
287 goto out;
288 }
289
290 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
291 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
292 list_move(&pos->list, &garbage);
293 }
294 list_splice_tail(&list, &kclist_head);
295
296 proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, ¬es_len,
297 &data_offset);
298
299out:
300 up_write(&kclist_lock);
301 list_for_each_entry_safe(pos, tmp, &garbage, list) {
302 list_del(&pos->list);
303 kfree(pos);
304 }
305 return ret;
306}
307
308static void append_kcore_note(char *notes, size_t *i, const char *name,
309 unsigned int type, const void *desc,
310 size_t descsz)
311{
312 struct elf_note *note = (struct elf_note *)¬es[*i];
313
314 note->n_namesz = strlen(name) + 1;
315 note->n_descsz = descsz;
316 note->n_type = type;
317 *i += sizeof(*note);
318 memcpy(¬es[*i], name, note->n_namesz);
319 *i = ALIGN(*i + note->n_namesz, 4);
320 memcpy(¬es[*i], desc, descsz);
321 *i = ALIGN(*i + descsz, 4);
322}
323
324static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
325{
326 struct file *file = iocb->ki_filp;
327 char *buf = file->private_data;
328 loff_t *fpos = &iocb->ki_pos;
329 size_t phdrs_offset, notes_offset, data_offset;
330 size_t page_offline_frozen = 1;
331 size_t phdrs_len, notes_len;
332 struct kcore_list *m;
333 size_t tsz;
334 int nphdr;
335 unsigned long start;
336 size_t buflen = iov_iter_count(iter);
337 size_t orig_buflen = buflen;
338 int ret = 0;
339
340 down_read(&kclist_lock);
341 /*
342 * Don't race against drivers that set PageOffline() and expect no
343 * further page access.
344 */
345 page_offline_freeze();
346
347 get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset);
348 phdrs_offset = sizeof(struct elfhdr);
349 notes_offset = phdrs_offset + phdrs_len;
350
351 /* ELF file header. */
352 if (buflen && *fpos < sizeof(struct elfhdr)) {
353 struct elfhdr ehdr = {
354 .e_ident = {
355 [EI_MAG0] = ELFMAG0,
356 [EI_MAG1] = ELFMAG1,
357 [EI_MAG2] = ELFMAG2,
358 [EI_MAG3] = ELFMAG3,
359 [EI_CLASS] = ELF_CLASS,
360 [EI_DATA] = ELF_DATA,
361 [EI_VERSION] = EV_CURRENT,
362 [EI_OSABI] = ELF_OSABI,
363 },
364 .e_type = ET_CORE,
365 .e_machine = ELF_ARCH,
366 .e_version = EV_CURRENT,
367 .e_phoff = sizeof(struct elfhdr),
368 .e_flags = ELF_CORE_EFLAGS,
369 .e_ehsize = sizeof(struct elfhdr),
370 .e_phentsize = sizeof(struct elf_phdr),
371 .e_phnum = nphdr,
372 };
373
374 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
375 if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) {
376 ret = -EFAULT;
377 goto out;
378 }
379
380 buflen -= tsz;
381 *fpos += tsz;
382 }
383
384 /* ELF program headers. */
385 if (buflen && *fpos < phdrs_offset + phdrs_len) {
386 struct elf_phdr *phdrs, *phdr;
387
388 phdrs = kzalloc(phdrs_len, GFP_KERNEL);
389 if (!phdrs) {
390 ret = -ENOMEM;
391 goto out;
392 }
393
394 phdrs[0].p_type = PT_NOTE;
395 phdrs[0].p_offset = notes_offset;
396 phdrs[0].p_filesz = notes_len;
397
398 phdr = &phdrs[1];
399 list_for_each_entry(m, &kclist_head, list) {
400 phdr->p_type = PT_LOAD;
401 phdr->p_flags = PF_R | PF_W | PF_X;
402 phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
403 phdr->p_vaddr = (size_t)m->addr;
404 if (m->type == KCORE_RAM)
405 phdr->p_paddr = __pa(m->addr);
406 else if (m->type == KCORE_TEXT)
407 phdr->p_paddr = __pa_symbol(m->addr);
408 else
409 phdr->p_paddr = (elf_addr_t)-1;
410 phdr->p_filesz = phdr->p_memsz = m->size;
411 phdr->p_align = PAGE_SIZE;
412 phdr++;
413 }
414
415 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
416 if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz,
417 iter) != tsz) {
418 kfree(phdrs);
419 ret = -EFAULT;
420 goto out;
421 }
422 kfree(phdrs);
423
424 buflen -= tsz;
425 *fpos += tsz;
426 }
427
428 /* ELF note segment. */
429 if (buflen && *fpos < notes_offset + notes_len) {
430 struct elf_prstatus prstatus = {};
431 struct elf_prpsinfo prpsinfo = {
432 .pr_sname = 'R',
433 .pr_fname = "vmlinux",
434 };
435 char *notes;
436 size_t i = 0;
437
438 strscpy(prpsinfo.pr_psargs, saved_command_line,
439 sizeof(prpsinfo.pr_psargs));
440
441 notes = kzalloc(notes_len, GFP_KERNEL);
442 if (!notes) {
443 ret = -ENOMEM;
444 goto out;
445 }
446
447 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
448 sizeof(prstatus));
449 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
450 sizeof(prpsinfo));
451 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
452 arch_task_struct_size);
453 /*
454 * vmcoreinfo_size is mostly constant after init time, but it
455 * can be changed by crash_save_vmcoreinfo(). Racing here with a
456 * panic on another CPU before the machine goes down is insanely
457 * unlikely, but it's better to not leave potential buffer
458 * overflows lying around, regardless.
459 */
460 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
461 vmcoreinfo_data,
462 min(vmcoreinfo_size, notes_len - i));
463
464 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
465 if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {
466 kfree(notes);
467 ret = -EFAULT;
468 goto out;
469 }
470 kfree(notes);
471
472 buflen -= tsz;
473 *fpos += tsz;
474 }
475
476 /*
477 * Check to see if our file offset matches with any of
478 * the addresses in the elf_phdr on our list.
479 */
480 start = kc_offset_to_vaddr(*fpos - data_offset);
481 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
482 tsz = buflen;
483
484 m = NULL;
485 while (buflen) {
486 struct page *page;
487 unsigned long pfn;
488 phys_addr_t phys;
489 void *__start;
490
491 /*
492 * If this is the first iteration or the address is not within
493 * the previous entry, search for a matching entry.
494 */
495 if (!m || start < m->addr || start >= m->addr + m->size) {
496 struct kcore_list *pos;
497
498 m = NULL;
499 list_for_each_entry(pos, &kclist_head, list) {
500 if (start >= pos->addr &&
501 start < pos->addr + pos->size) {
502 m = pos;
503 break;
504 }
505 }
506 }
507
508 if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
509 page_offline_thaw();
510 cond_resched();
511 page_offline_freeze();
512 }
513
514 if (!m) {
515 if (iov_iter_zero(tsz, iter) != tsz) {
516 ret = -EFAULT;
517 goto out;
518 }
519 goto skip;
520 }
521
522 switch (m->type) {
523 case KCORE_VMALLOC:
524 {
525 const char *src = (char *)start;
526 size_t read = 0, left = tsz;
527
528 /*
529 * vmalloc uses spinlocks, so we optimistically try to
530 * read memory. If this fails, fault pages in and try
531 * again until we are done.
532 */
533 while (true) {
534 read += vread_iter(iter, src, left);
535 if (read == tsz)
536 break;
537
538 src += read;
539 left -= read;
540
541 if (fault_in_iov_iter_writeable(iter, left)) {
542 ret = -EFAULT;
543 goto out;
544 }
545 }
546 break;
547 }
548 case KCORE_USER:
549 /* User page is handled prior to normal kernel page: */
550 if (copy_to_iter((char *)start, tsz, iter) != tsz) {
551 ret = -EFAULT;
552 goto out;
553 }
554 break;
555 case KCORE_RAM:
556 phys = __pa(start);
557 pfn = phys >> PAGE_SHIFT;
558 page = pfn_to_online_page(pfn);
559
560 /*
561 * Don't read offline sections, logically offline pages
562 * (e.g., inflated in a balloon), hwpoisoned pages,
563 * and explicitly excluded physical ranges.
564 */
565 if (!page || PageOffline(page) ||
566 is_page_hwpoison(page) || !pfn_is_ram(pfn) ||
567 pfn_is_unaccepted_memory(pfn)) {
568 if (iov_iter_zero(tsz, iter) != tsz) {
569 ret = -EFAULT;
570 goto out;
571 }
572 break;
573 }
574 fallthrough;
575 case KCORE_VMEMMAP:
576 case KCORE_TEXT:
577 if (m->type == KCORE_RAM) {
578 __start = kc_xlate_dev_mem_ptr(phys);
579 if (!__start) {
580 ret = -ENOMEM;
581 if (iov_iter_zero(tsz, iter) != tsz)
582 ret = -EFAULT;
583 goto out;
584 }
585 } else {
586 __start = (void *)start;
587 }
588
589 /*
590 * Sadly we must use a bounce buffer here to be able to
591 * make use of copy_from_kernel_nofault(), as these
592 * memory regions might not always be mapped on all
593 * architectures.
594 */
595 ret = copy_from_kernel_nofault(buf, __start, tsz);
596 if (m->type == KCORE_RAM)
597 kc_unxlate_dev_mem_ptr(phys, __start);
598 if (ret) {
599 if (iov_iter_zero(tsz, iter) != tsz) {
600 ret = -EFAULT;
601 goto out;
602 }
603 ret = 0;
604 /*
605 * We know the bounce buffer is safe to copy from, so
606 * use _copy_to_iter() directly.
607 */
608 } else if (_copy_to_iter(buf, tsz, iter) != tsz) {
609 ret = -EFAULT;
610 goto out;
611 }
612 break;
613 default:
614 pr_warn_once("Unhandled KCORE type: %d\n", m->type);
615 if (iov_iter_zero(tsz, iter) != tsz) {
616 ret = -EFAULT;
617 goto out;
618 }
619 }
620skip:
621 buflen -= tsz;
622 *fpos += tsz;
623 start += tsz;
624 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
625 }
626
627out:
628 page_offline_thaw();
629 up_read(&kclist_lock);
630 if (ret)
631 return ret;
632 return orig_buflen - buflen;
633}
634
635static int open_kcore(struct inode *inode, struct file *filp)
636{
637 int ret = security_locked_down(LOCKDOWN_KCORE);
638
639 if (!capable(CAP_SYS_RAWIO))
640 return -EPERM;
641
642 if (ret)
643 return ret;
644
645 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
646 if (!filp->private_data)
647 return -ENOMEM;
648
649 if (kcore_need_update)
650 kcore_update_ram();
651 if (i_size_read(inode) != proc_root_kcore->size) {
652 inode_lock(inode);
653 i_size_write(inode, proc_root_kcore->size);
654 inode_unlock(inode);
655 }
656 return 0;
657}
658
659static int release_kcore(struct inode *inode, struct file *file)
660{
661 kfree(file->private_data);
662 return 0;
663}
664
665static const struct proc_ops kcore_proc_ops = {
666 .proc_read_iter = read_kcore_iter,
667 .proc_open = open_kcore,
668 .proc_release = release_kcore,
669 .proc_lseek = default_llseek,
670};
671
672/* just remember that we have to update kcore */
673static int __meminit kcore_callback(struct notifier_block *self,
674 unsigned long action, void *arg)
675{
676 switch (action) {
677 case MEM_ONLINE:
678 case MEM_OFFLINE:
679 kcore_need_update = 1;
680 break;
681 }
682 return NOTIFY_OK;
683}
684
685
686static struct kcore_list kcore_vmalloc;
687
688#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
689static struct kcore_list kcore_text;
690/*
691 * If defined, special segment is used for mapping kernel text instead of
692 * direct-map area. We need to create special TEXT section.
693 */
694static void __init proc_kcore_text_init(void)
695{
696 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
697}
698#else
699static void __init proc_kcore_text_init(void)
700{
701}
702#endif
703
704#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
705/*
706 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
707 */
708static struct kcore_list kcore_modules;
709static void __init add_modules_range(void)
710{
711 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
712 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
713 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
714 }
715}
716#else
717static void __init add_modules_range(void)
718{
719}
720#endif
721
722static int __init proc_kcore_init(void)
723{
724 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
725 if (!proc_root_kcore) {
726 pr_err("couldn't create /proc/kcore\n");
727 return 0; /* Always returns 0. */
728 }
729 /* Store text area if it's special */
730 proc_kcore_text_init();
731 /* Store vmalloc area */
732 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
733 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
734 add_modules_range();
735 /* Store direct-map area from physical memory map */
736 kcore_update_ram();
737 hotplug_memory_notifier(kcore_callback, DEFAULT_CALLBACK_PRI);
738
739 return 0;
740}
741fs_initcall(proc_kcore_init);