Loading...
1/*
2 * fs/proc/kcore.c kernel ELF core dumper
3 *
4 * Modelled on fs/exec.c:aout_core_dump()
5 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
6 * ELF version written by David Howells <David.Howells@nexor.co.uk>
7 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
8 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
9 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
10 */
11
12#include <linux/mm.h>
13#include <linux/proc_fs.h>
14#include <linux/user.h>
15#include <linux/capability.h>
16#include <linux/elf.h>
17#include <linux/elfcore.h>
18#include <linux/vmalloc.h>
19#include <linux/highmem.h>
20#include <linux/bootmem.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <asm/uaccess.h>
24#include <asm/io.h>
25#include <linux/list.h>
26#include <linux/ioport.h>
27#include <linux/memory.h>
28#include <asm/sections.h>
29
30#define CORE_STR "CORE"
31
32#ifndef ELF_CORE_EFLAGS
33#define ELF_CORE_EFLAGS 0
34#endif
35
36static struct proc_dir_entry *proc_root_kcore;
37
38
39#ifndef kc_vaddr_to_offset
40#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
41#endif
42#ifndef kc_offset_to_vaddr
43#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
44#endif
45
46/* An ELF note in memory */
47struct memelfnote
48{
49 const char *name;
50 int type;
51 unsigned int datasz;
52 void *data;
53};
54
55static LIST_HEAD(kclist_head);
56static DEFINE_RWLOCK(kclist_lock);
57static int kcore_need_update = 1;
58
59void
60kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
61{
62 new->addr = (unsigned long)addr;
63 new->size = size;
64 new->type = type;
65
66 write_lock(&kclist_lock);
67 list_add_tail(&new->list, &kclist_head);
68 write_unlock(&kclist_lock);
69}
70
71static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
72{
73 size_t try, size;
74 struct kcore_list *m;
75
76 *nphdr = 1; /* PT_NOTE */
77 size = 0;
78
79 list_for_each_entry(m, &kclist_head, list) {
80 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
81 if (try > size)
82 size = try;
83 *nphdr = *nphdr + 1;
84 }
85 *elf_buflen = sizeof(struct elfhdr) +
86 (*nphdr + 2)*sizeof(struct elf_phdr) +
87 3 * ((sizeof(struct elf_note)) +
88 roundup(sizeof(CORE_STR), 4)) +
89 roundup(sizeof(struct elf_prstatus), 4) +
90 roundup(sizeof(struct elf_prpsinfo), 4) +
91 roundup(sizeof(struct task_struct), 4);
92 *elf_buflen = PAGE_ALIGN(*elf_buflen);
93 return size + *elf_buflen;
94}
95
96static void free_kclist_ents(struct list_head *head)
97{
98 struct kcore_list *tmp, *pos;
99
100 list_for_each_entry_safe(pos, tmp, head, list) {
101 list_del(&pos->list);
102 kfree(pos);
103 }
104}
105/*
106 * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
107 */
108static void __kcore_update_ram(struct list_head *list)
109{
110 int nphdr;
111 size_t size;
112 struct kcore_list *tmp, *pos;
113 LIST_HEAD(garbage);
114
115 write_lock(&kclist_lock);
116 if (kcore_need_update) {
117 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
118 if (pos->type == KCORE_RAM
119 || pos->type == KCORE_VMEMMAP)
120 list_move(&pos->list, &garbage);
121 }
122 list_splice_tail(list, &kclist_head);
123 } else
124 list_splice(list, &garbage);
125 kcore_need_update = 0;
126 proc_root_kcore->size = get_kcore_size(&nphdr, &size);
127 write_unlock(&kclist_lock);
128
129 free_kclist_ents(&garbage);
130}
131
132
133#ifdef CONFIG_HIGHMEM
134/*
135 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
136 * because memory hole is not as big as !HIGHMEM case.
137 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
138 */
139static int kcore_update_ram(void)
140{
141 LIST_HEAD(head);
142 struct kcore_list *ent;
143 int ret = 0;
144
145 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
146 if (!ent)
147 return -ENOMEM;
148 ent->addr = (unsigned long)__va(0);
149 ent->size = max_low_pfn << PAGE_SHIFT;
150 ent->type = KCORE_RAM;
151 list_add(&ent->list, &head);
152 __kcore_update_ram(&head);
153 return ret;
154}
155
156#else /* !CONFIG_HIGHMEM */
157
158#ifdef CONFIG_SPARSEMEM_VMEMMAP
159/* calculate vmemmap's address from given system ram pfn and register it */
160static int
161get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
162{
163 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
164 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
165 unsigned long start, end;
166 struct kcore_list *vmm, *tmp;
167
168
169 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
170 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
171 end = ALIGN(end, PAGE_SIZE);
172 /* overlap check (because we have to align page */
173 list_for_each_entry(tmp, head, list) {
174 if (tmp->type != KCORE_VMEMMAP)
175 continue;
176 if (start < tmp->addr + tmp->size)
177 if (end > tmp->addr)
178 end = tmp->addr;
179 }
180 if (start < end) {
181 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
182 if (!vmm)
183 return 0;
184 vmm->addr = start;
185 vmm->size = end - start;
186 vmm->type = KCORE_VMEMMAP;
187 list_add_tail(&vmm->list, head);
188 }
189 return 1;
190
191}
192#else
193static int
194get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
195{
196 return 1;
197}
198
199#endif
200
201static int
202kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
203{
204 struct list_head *head = (struct list_head *)arg;
205 struct kcore_list *ent;
206
207 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
208 if (!ent)
209 return -ENOMEM;
210 ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
211 ent->size = nr_pages << PAGE_SHIFT;
212
213 /* Sanity check: Can happen in 32bit arch...maybe */
214 if (ent->addr < (unsigned long) __va(0))
215 goto free_out;
216
217 /* cut not-mapped area. ....from ppc-32 code. */
218 if (ULONG_MAX - ent->addr < ent->size)
219 ent->size = ULONG_MAX - ent->addr;
220
221 /* cut when vmalloc() area is higher than direct-map area */
222 if (VMALLOC_START > (unsigned long)__va(0)) {
223 if (ent->addr > VMALLOC_START)
224 goto free_out;
225 if (VMALLOC_START - ent->addr < ent->size)
226 ent->size = VMALLOC_START - ent->addr;
227 }
228
229 ent->type = KCORE_RAM;
230 list_add_tail(&ent->list, head);
231
232 if (!get_sparsemem_vmemmap_info(ent, head)) {
233 list_del(&ent->list);
234 goto free_out;
235 }
236
237 return 0;
238free_out:
239 kfree(ent);
240 return 1;
241}
242
243static int kcore_update_ram(void)
244{
245 int nid, ret;
246 unsigned long end_pfn;
247 LIST_HEAD(head);
248
249 /* Not inialized....update now */
250 /* find out "max pfn" */
251 end_pfn = 0;
252 for_each_node_state(nid, N_HIGH_MEMORY) {
253 unsigned long node_end;
254 node_end = NODE_DATA(nid)->node_start_pfn +
255 NODE_DATA(nid)->node_spanned_pages;
256 if (end_pfn < node_end)
257 end_pfn = node_end;
258 }
259 /* scan 0 to max_pfn */
260 ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
261 if (ret) {
262 free_kclist_ents(&head);
263 return -ENOMEM;
264 }
265 __kcore_update_ram(&head);
266 return ret;
267}
268#endif /* CONFIG_HIGHMEM */
269
270/*****************************************************************************/
271/*
272 * determine size of ELF note
273 */
274static int notesize(struct memelfnote *en)
275{
276 int sz;
277
278 sz = sizeof(struct elf_note);
279 sz += roundup((strlen(en->name) + 1), 4);
280 sz += roundup(en->datasz, 4);
281
282 return sz;
283} /* end notesize() */
284
285/*****************************************************************************/
286/*
287 * store a note in the header buffer
288 */
289static char *storenote(struct memelfnote *men, char *bufp)
290{
291 struct elf_note en;
292
293#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
294
295 en.n_namesz = strlen(men->name) + 1;
296 en.n_descsz = men->datasz;
297 en.n_type = men->type;
298
299 DUMP_WRITE(&en, sizeof(en));
300 DUMP_WRITE(men->name, en.n_namesz);
301
302 /* XXX - cast from long long to long to avoid need for libgcc.a */
303 bufp = (char*) roundup((unsigned long)bufp,4);
304 DUMP_WRITE(men->data, men->datasz);
305 bufp = (char*) roundup((unsigned long)bufp,4);
306
307#undef DUMP_WRITE
308
309 return bufp;
310} /* end storenote() */
311
312/*
313 * store an ELF coredump header in the supplied buffer
314 * nphdr is the number of elf_phdr to insert
315 */
316static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
317{
318 struct elf_prstatus prstatus; /* NT_PRSTATUS */
319 struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
320 struct elf_phdr *nhdr, *phdr;
321 struct elfhdr *elf;
322 struct memelfnote notes[3];
323 off_t offset = 0;
324 struct kcore_list *m;
325
326 /* setup ELF header */
327 elf = (struct elfhdr *) bufp;
328 bufp += sizeof(struct elfhdr);
329 offset += sizeof(struct elfhdr);
330 memcpy(elf->e_ident, ELFMAG, SELFMAG);
331 elf->e_ident[EI_CLASS] = ELF_CLASS;
332 elf->e_ident[EI_DATA] = ELF_DATA;
333 elf->e_ident[EI_VERSION]= EV_CURRENT;
334 elf->e_ident[EI_OSABI] = ELF_OSABI;
335 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
336 elf->e_type = ET_CORE;
337 elf->e_machine = ELF_ARCH;
338 elf->e_version = EV_CURRENT;
339 elf->e_entry = 0;
340 elf->e_phoff = sizeof(struct elfhdr);
341 elf->e_shoff = 0;
342 elf->e_flags = ELF_CORE_EFLAGS;
343 elf->e_ehsize = sizeof(struct elfhdr);
344 elf->e_phentsize= sizeof(struct elf_phdr);
345 elf->e_phnum = nphdr;
346 elf->e_shentsize= 0;
347 elf->e_shnum = 0;
348 elf->e_shstrndx = 0;
349
350 /* setup ELF PT_NOTE program header */
351 nhdr = (struct elf_phdr *) bufp;
352 bufp += sizeof(struct elf_phdr);
353 offset += sizeof(struct elf_phdr);
354 nhdr->p_type = PT_NOTE;
355 nhdr->p_offset = 0;
356 nhdr->p_vaddr = 0;
357 nhdr->p_paddr = 0;
358 nhdr->p_filesz = 0;
359 nhdr->p_memsz = 0;
360 nhdr->p_flags = 0;
361 nhdr->p_align = 0;
362
363 /* setup ELF PT_LOAD program header for every area */
364 list_for_each_entry(m, &kclist_head, list) {
365 phdr = (struct elf_phdr *) bufp;
366 bufp += sizeof(struct elf_phdr);
367 offset += sizeof(struct elf_phdr);
368
369 phdr->p_type = PT_LOAD;
370 phdr->p_flags = PF_R|PF_W|PF_X;
371 phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
372 phdr->p_vaddr = (size_t)m->addr;
373 phdr->p_paddr = 0;
374 phdr->p_filesz = phdr->p_memsz = m->size;
375 phdr->p_align = PAGE_SIZE;
376 }
377
378 /*
379 * Set up the notes in similar form to SVR4 core dumps made
380 * with info from their /proc.
381 */
382 nhdr->p_offset = offset;
383
384 /* set up the process status */
385 notes[0].name = CORE_STR;
386 notes[0].type = NT_PRSTATUS;
387 notes[0].datasz = sizeof(struct elf_prstatus);
388 notes[0].data = &prstatus;
389
390 memset(&prstatus, 0, sizeof(struct elf_prstatus));
391
392 nhdr->p_filesz = notesize(¬es[0]);
393 bufp = storenote(¬es[0], bufp);
394
395 /* set up the process info */
396 notes[1].name = CORE_STR;
397 notes[1].type = NT_PRPSINFO;
398 notes[1].datasz = sizeof(struct elf_prpsinfo);
399 notes[1].data = &prpsinfo;
400
401 memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
402 prpsinfo.pr_state = 0;
403 prpsinfo.pr_sname = 'R';
404 prpsinfo.pr_zomb = 0;
405
406 strcpy(prpsinfo.pr_fname, "vmlinux");
407 strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
408
409 nhdr->p_filesz += notesize(¬es[1]);
410 bufp = storenote(¬es[1], bufp);
411
412 /* set up the task structure */
413 notes[2].name = CORE_STR;
414 notes[2].type = NT_TASKSTRUCT;
415 notes[2].datasz = sizeof(struct task_struct);
416 notes[2].data = current;
417
418 nhdr->p_filesz += notesize(¬es[2]);
419 bufp = storenote(¬es[2], bufp);
420
421} /* end elf_kcore_store_hdr() */
422
423/*****************************************************************************/
424/*
425 * read from the ELF header and then kernel memory
426 */
427static ssize_t
428read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
429{
430 ssize_t acc = 0;
431 size_t size, tsz;
432 size_t elf_buflen;
433 int nphdr;
434 unsigned long start;
435
436 read_lock(&kclist_lock);
437 size = get_kcore_size(&nphdr, &elf_buflen);
438
439 if (buflen == 0 || *fpos >= size) {
440 read_unlock(&kclist_lock);
441 return 0;
442 }
443
444 /* trim buflen to not go beyond EOF */
445 if (buflen > size - *fpos)
446 buflen = size - *fpos;
447
448 /* construct an ELF core header if we'll need some of it */
449 if (*fpos < elf_buflen) {
450 char * elf_buf;
451
452 tsz = elf_buflen - *fpos;
453 if (buflen < tsz)
454 tsz = buflen;
455 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
456 if (!elf_buf) {
457 read_unlock(&kclist_lock);
458 return -ENOMEM;
459 }
460 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
461 read_unlock(&kclist_lock);
462 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
463 kfree(elf_buf);
464 return -EFAULT;
465 }
466 kfree(elf_buf);
467 buflen -= tsz;
468 *fpos += tsz;
469 buffer += tsz;
470 acc += tsz;
471
472 /* leave now if filled buffer already */
473 if (buflen == 0)
474 return acc;
475 } else
476 read_unlock(&kclist_lock);
477
478 /*
479 * Check to see if our file offset matches with any of
480 * the addresses in the elf_phdr on our list.
481 */
482 start = kc_offset_to_vaddr(*fpos - elf_buflen);
483 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
484 tsz = buflen;
485
486 while (buflen) {
487 struct kcore_list *m;
488
489 read_lock(&kclist_lock);
490 list_for_each_entry(m, &kclist_head, list) {
491 if (start >= m->addr && start < (m->addr+m->size))
492 break;
493 }
494 read_unlock(&kclist_lock);
495
496 if (&m->list == &kclist_head) {
497 if (clear_user(buffer, tsz))
498 return -EFAULT;
499 } else if (is_vmalloc_or_module_addr((void *)start)) {
500 char * elf_buf;
501
502 elf_buf = kzalloc(tsz, GFP_KERNEL);
503 if (!elf_buf)
504 return -ENOMEM;
505 vread(elf_buf, (char *)start, tsz);
506 /* we have to zero-fill user buffer even if no read */
507 if (copy_to_user(buffer, elf_buf, tsz)) {
508 kfree(elf_buf);
509 return -EFAULT;
510 }
511 kfree(elf_buf);
512 } else {
513 if (kern_addr_valid(start)) {
514 unsigned long n;
515
516 n = copy_to_user(buffer, (char *)start, tsz);
517 /*
518 * We cannot distinguish between fault on source
519 * and fault on destination. When this happens
520 * we clear too and hope it will trigger the
521 * EFAULT again.
522 */
523 if (n) {
524 if (clear_user(buffer + tsz - n,
525 n))
526 return -EFAULT;
527 }
528 } else {
529 if (clear_user(buffer, tsz))
530 return -EFAULT;
531 }
532 }
533 buflen -= tsz;
534 *fpos += tsz;
535 buffer += tsz;
536 acc += tsz;
537 start += tsz;
538 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
539 }
540
541 return acc;
542}
543
544
545static int open_kcore(struct inode *inode, struct file *filp)
546{
547 if (!capable(CAP_SYS_RAWIO))
548 return -EPERM;
549 if (kcore_need_update)
550 kcore_update_ram();
551 if (i_size_read(inode) != proc_root_kcore->size) {
552 mutex_lock(&inode->i_mutex);
553 i_size_write(inode, proc_root_kcore->size);
554 mutex_unlock(&inode->i_mutex);
555 }
556 return 0;
557}
558
559
560static const struct file_operations proc_kcore_operations = {
561 .read = read_kcore,
562 .open = open_kcore,
563 .llseek = default_llseek,
564};
565
566#ifdef CONFIG_MEMORY_HOTPLUG
567/* just remember that we have to update kcore */
568static int __meminit kcore_callback(struct notifier_block *self,
569 unsigned long action, void *arg)
570{
571 switch (action) {
572 case MEM_ONLINE:
573 case MEM_OFFLINE:
574 write_lock(&kclist_lock);
575 kcore_need_update = 1;
576 write_unlock(&kclist_lock);
577 }
578 return NOTIFY_OK;
579}
580#endif
581
582
583static struct kcore_list kcore_vmalloc;
584
585#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
586static struct kcore_list kcore_text;
587/*
588 * If defined, special segment is used for mapping kernel text instead of
589 * direct-map area. We need to create special TEXT section.
590 */
591static void __init proc_kcore_text_init(void)
592{
593 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
594}
595#else
596static void __init proc_kcore_text_init(void)
597{
598}
599#endif
600
601#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
602/*
603 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
604 */
605struct kcore_list kcore_modules;
606static void __init add_modules_range(void)
607{
608 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
609 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
610}
611#else
612static void __init add_modules_range(void)
613{
614}
615#endif
616
617static int __init proc_kcore_init(void)
618{
619 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
620 &proc_kcore_operations);
621 if (!proc_root_kcore) {
622 printk(KERN_ERR "couldn't create /proc/kcore\n");
623 return 0; /* Always returns 0. */
624 }
625 /* Store text area if it's special */
626 proc_kcore_text_init();
627 /* Store vmalloc area */
628 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
629 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
630 add_modules_range();
631 /* Store direct-map area from physical memory map */
632 kcore_update_ram();
633 hotplug_memory_notifier(kcore_callback, 0);
634
635 return 0;
636}
637module_init(proc_kcore_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/proc/kcore.c kernel ELF core dumper
4 *
5 * Modelled on fs/exec.c:aout_core_dump()
6 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
7 * ELF version written by David Howells <David.Howells@nexor.co.uk>
8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
11 */
12
13#include <linux/vmcore_info.h>
14#include <linux/mm.h>
15#include <linux/proc_fs.h>
16#include <linux/kcore.h>
17#include <linux/user.h>
18#include <linux/capability.h>
19#include <linux/elf.h>
20#include <linux/elfcore.h>
21#include <linux/vmalloc.h>
22#include <linux/highmem.h>
23#include <linux/printk.h>
24#include <linux/memblock.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/uio.h>
28#include <asm/io.h>
29#include <linux/list.h>
30#include <linux/ioport.h>
31#include <linux/memory.h>
32#include <linux/sched/task.h>
33#include <linux/security.h>
34#include <asm/sections.h>
35#include "internal.h"
36
37#define CORE_STR "CORE"
38
39#ifndef ELF_CORE_EFLAGS
40#define ELF_CORE_EFLAGS 0
41#endif
42
43static struct proc_dir_entry *proc_root_kcore;
44
45
46#ifndef kc_vaddr_to_offset
47#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
48#endif
49#ifndef kc_offset_to_vaddr
50#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
51#endif
52
53#ifndef kc_xlate_dev_mem_ptr
54#define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
55static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
56{
57 return __va(phys);
58}
59#endif
60#ifndef kc_unxlate_dev_mem_ptr
61#define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
62static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
63{
64}
65#endif
66
67static LIST_HEAD(kclist_head);
68static DECLARE_RWSEM(kclist_lock);
69static int kcore_need_update = 1;
70
71/*
72 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
73 * Same as oldmem_pfn_is_ram in vmcore
74 */
75static int (*mem_pfn_is_ram)(unsigned long pfn);
76
77int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
78{
79 if (mem_pfn_is_ram)
80 return -EBUSY;
81 mem_pfn_is_ram = fn;
82 return 0;
83}
84
85static int pfn_is_ram(unsigned long pfn)
86{
87 if (mem_pfn_is_ram)
88 return mem_pfn_is_ram(pfn);
89 else
90 return 1;
91}
92
93/* This doesn't grab kclist_lock, so it should only be used at init time. */
94void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
95 int type)
96{
97 new->addr = (unsigned long)addr;
98 new->size = size;
99 new->type = type;
100
101 list_add_tail(&new->list, &kclist_head);
102}
103
104static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
105 size_t *data_offset)
106{
107 size_t try, size;
108 struct kcore_list *m;
109
110 *nphdr = 1; /* PT_NOTE */
111 size = 0;
112
113 list_for_each_entry(m, &kclist_head, list) {
114 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
115 if (try > size)
116 size = try;
117 *nphdr = *nphdr + 1;
118 }
119
120 *phdrs_len = *nphdr * sizeof(struct elf_phdr);
121 *notes_len = (4 * sizeof(struct elf_note) +
122 3 * ALIGN(sizeof(CORE_STR), 4) +
123 VMCOREINFO_NOTE_NAME_BYTES +
124 ALIGN(sizeof(struct elf_prstatus), 4) +
125 ALIGN(sizeof(struct elf_prpsinfo), 4) +
126 ALIGN(arch_task_struct_size, 4) +
127 ALIGN(vmcoreinfo_size, 4));
128 *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
129 *notes_len);
130 return *data_offset + size;
131}
132
133#ifdef CONFIG_HIGHMEM
134/*
135 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
136 * because memory hole is not as big as !HIGHMEM case.
137 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
138 */
139static int kcore_ram_list(struct list_head *head)
140{
141 struct kcore_list *ent;
142
143 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
144 if (!ent)
145 return -ENOMEM;
146 ent->addr = (unsigned long)__va(0);
147 ent->size = max_low_pfn << PAGE_SHIFT;
148 ent->type = KCORE_RAM;
149 list_add(&ent->list, head);
150 return 0;
151}
152
153#else /* !CONFIG_HIGHMEM */
154
155#ifdef CONFIG_SPARSEMEM_VMEMMAP
156/* calculate vmemmap's address from given system ram pfn and register it */
157static int
158get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
159{
160 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
161 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
162 unsigned long start, end;
163 struct kcore_list *vmm, *tmp;
164
165
166 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
167 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
168 end = PAGE_ALIGN(end);
169 /* overlap check (because we have to align page */
170 list_for_each_entry(tmp, head, list) {
171 if (tmp->type != KCORE_VMEMMAP)
172 continue;
173 if (start < tmp->addr + tmp->size)
174 if (end > tmp->addr)
175 end = tmp->addr;
176 }
177 if (start < end) {
178 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
179 if (!vmm)
180 return 0;
181 vmm->addr = start;
182 vmm->size = end - start;
183 vmm->type = KCORE_VMEMMAP;
184 list_add_tail(&vmm->list, head);
185 }
186 return 1;
187
188}
189#else
190static int
191get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
192{
193 return 1;
194}
195
196#endif
197
198static int
199kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
200{
201 struct list_head *head = (struct list_head *)arg;
202 struct kcore_list *ent;
203 struct page *p;
204
205 if (!pfn_valid(pfn))
206 return 1;
207
208 p = pfn_to_page(pfn);
209
210 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
211 if (!ent)
212 return -ENOMEM;
213 ent->addr = (unsigned long)page_to_virt(p);
214 ent->size = nr_pages << PAGE_SHIFT;
215
216 if (!virt_addr_valid((void *)ent->addr))
217 goto free_out;
218
219 /* cut not-mapped area. ....from ppc-32 code. */
220 if (ULONG_MAX - ent->addr < ent->size)
221 ent->size = ULONG_MAX - ent->addr;
222
223 /*
224 * We've already checked virt_addr_valid so we know this address
225 * is a valid pointer, therefore we can check against it to determine
226 * if we need to trim
227 */
228 if (VMALLOC_START > ent->addr) {
229 if (VMALLOC_START - ent->addr < ent->size)
230 ent->size = VMALLOC_START - ent->addr;
231 }
232
233 ent->type = KCORE_RAM;
234 list_add_tail(&ent->list, head);
235
236 if (!get_sparsemem_vmemmap_info(ent, head)) {
237 list_del(&ent->list);
238 goto free_out;
239 }
240
241 return 0;
242free_out:
243 kfree(ent);
244 return 1;
245}
246
247static int kcore_ram_list(struct list_head *list)
248{
249 int nid, ret;
250 unsigned long end_pfn;
251
252 /* Not initialized....update now */
253 /* find out "max pfn" */
254 end_pfn = 0;
255 for_each_node_state(nid, N_MEMORY) {
256 unsigned long node_end;
257 node_end = node_end_pfn(nid);
258 if (end_pfn < node_end)
259 end_pfn = node_end;
260 }
261 /* scan 0 to max_pfn */
262 ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
263 if (ret)
264 return -ENOMEM;
265 return 0;
266}
267#endif /* CONFIG_HIGHMEM */
268
269static int kcore_update_ram(void)
270{
271 LIST_HEAD(list);
272 LIST_HEAD(garbage);
273 int nphdr;
274 size_t phdrs_len, notes_len, data_offset;
275 struct kcore_list *tmp, *pos;
276 int ret = 0;
277
278 down_write(&kclist_lock);
279 if (!xchg(&kcore_need_update, 0))
280 goto out;
281
282 ret = kcore_ram_list(&list);
283 if (ret) {
284 /* Couldn't get the RAM list, try again next time. */
285 WRITE_ONCE(kcore_need_update, 1);
286 list_splice_tail(&list, &garbage);
287 goto out;
288 }
289
290 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
291 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
292 list_move(&pos->list, &garbage);
293 }
294 list_splice_tail(&list, &kclist_head);
295
296 proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, ¬es_len,
297 &data_offset);
298
299out:
300 up_write(&kclist_lock);
301 list_for_each_entry_safe(pos, tmp, &garbage, list) {
302 list_del(&pos->list);
303 kfree(pos);
304 }
305 return ret;
306}
307
308static void append_kcore_note(char *notes, size_t *i, const char *name,
309 unsigned int type, const void *desc,
310 size_t descsz)
311{
312 struct elf_note *note = (struct elf_note *)¬es[*i];
313
314 note->n_namesz = strlen(name) + 1;
315 note->n_descsz = descsz;
316 note->n_type = type;
317 *i += sizeof(*note);
318 memcpy(¬es[*i], name, note->n_namesz);
319 *i = ALIGN(*i + note->n_namesz, 4);
320 memcpy(¬es[*i], desc, descsz);
321 *i = ALIGN(*i + descsz, 4);
322}
323
324static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
325{
326 struct file *file = iocb->ki_filp;
327 char *buf = file->private_data;
328 loff_t *fpos = &iocb->ki_pos;
329 size_t phdrs_offset, notes_offset, data_offset;
330 size_t page_offline_frozen = 1;
331 size_t phdrs_len, notes_len;
332 struct kcore_list *m;
333 size_t tsz;
334 int nphdr;
335 unsigned long start;
336 size_t buflen = iov_iter_count(iter);
337 size_t orig_buflen = buflen;
338 int ret = 0;
339
340 down_read(&kclist_lock);
341 /*
342 * Don't race against drivers that set PageOffline() and expect no
343 * further page access.
344 */
345 page_offline_freeze();
346
347 get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset);
348 phdrs_offset = sizeof(struct elfhdr);
349 notes_offset = phdrs_offset + phdrs_len;
350
351 /* ELF file header. */
352 if (buflen && *fpos < sizeof(struct elfhdr)) {
353 struct elfhdr ehdr = {
354 .e_ident = {
355 [EI_MAG0] = ELFMAG0,
356 [EI_MAG1] = ELFMAG1,
357 [EI_MAG2] = ELFMAG2,
358 [EI_MAG3] = ELFMAG3,
359 [EI_CLASS] = ELF_CLASS,
360 [EI_DATA] = ELF_DATA,
361 [EI_VERSION] = EV_CURRENT,
362 [EI_OSABI] = ELF_OSABI,
363 },
364 .e_type = ET_CORE,
365 .e_machine = ELF_ARCH,
366 .e_version = EV_CURRENT,
367 .e_phoff = sizeof(struct elfhdr),
368 .e_flags = ELF_CORE_EFLAGS,
369 .e_ehsize = sizeof(struct elfhdr),
370 .e_phentsize = sizeof(struct elf_phdr),
371 .e_phnum = nphdr,
372 };
373
374 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
375 if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) {
376 ret = -EFAULT;
377 goto out;
378 }
379
380 buflen -= tsz;
381 *fpos += tsz;
382 }
383
384 /* ELF program headers. */
385 if (buflen && *fpos < phdrs_offset + phdrs_len) {
386 struct elf_phdr *phdrs, *phdr;
387
388 phdrs = kzalloc(phdrs_len, GFP_KERNEL);
389 if (!phdrs) {
390 ret = -ENOMEM;
391 goto out;
392 }
393
394 phdrs[0].p_type = PT_NOTE;
395 phdrs[0].p_offset = notes_offset;
396 phdrs[0].p_filesz = notes_len;
397
398 phdr = &phdrs[1];
399 list_for_each_entry(m, &kclist_head, list) {
400 phdr->p_type = PT_LOAD;
401 phdr->p_flags = PF_R | PF_W | PF_X;
402 phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
403 phdr->p_vaddr = (size_t)m->addr;
404 if (m->type == KCORE_RAM)
405 phdr->p_paddr = __pa(m->addr);
406 else if (m->type == KCORE_TEXT)
407 phdr->p_paddr = __pa_symbol(m->addr);
408 else
409 phdr->p_paddr = (elf_addr_t)-1;
410 phdr->p_filesz = phdr->p_memsz = m->size;
411 phdr->p_align = PAGE_SIZE;
412 phdr++;
413 }
414
415 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
416 if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz,
417 iter) != tsz) {
418 kfree(phdrs);
419 ret = -EFAULT;
420 goto out;
421 }
422 kfree(phdrs);
423
424 buflen -= tsz;
425 *fpos += tsz;
426 }
427
428 /* ELF note segment. */
429 if (buflen && *fpos < notes_offset + notes_len) {
430 struct elf_prstatus prstatus = {};
431 struct elf_prpsinfo prpsinfo = {
432 .pr_sname = 'R',
433 .pr_fname = "vmlinux",
434 };
435 char *notes;
436 size_t i = 0;
437
438 strscpy(prpsinfo.pr_psargs, saved_command_line,
439 sizeof(prpsinfo.pr_psargs));
440
441 notes = kzalloc(notes_len, GFP_KERNEL);
442 if (!notes) {
443 ret = -ENOMEM;
444 goto out;
445 }
446
447 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
448 sizeof(prstatus));
449 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
450 sizeof(prpsinfo));
451 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
452 arch_task_struct_size);
453 /*
454 * vmcoreinfo_size is mostly constant after init time, but it
455 * can be changed by crash_save_vmcoreinfo(). Racing here with a
456 * panic on another CPU before the machine goes down is insanely
457 * unlikely, but it's better to not leave potential buffer
458 * overflows lying around, regardless.
459 */
460 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
461 vmcoreinfo_data,
462 min(vmcoreinfo_size, notes_len - i));
463
464 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
465 if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {
466 kfree(notes);
467 ret = -EFAULT;
468 goto out;
469 }
470 kfree(notes);
471
472 buflen -= tsz;
473 *fpos += tsz;
474 }
475
476 /*
477 * Check to see if our file offset matches with any of
478 * the addresses in the elf_phdr on our list.
479 */
480 start = kc_offset_to_vaddr(*fpos - data_offset);
481 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
482 tsz = buflen;
483
484 m = NULL;
485 while (buflen) {
486 struct page *page;
487 unsigned long pfn;
488 phys_addr_t phys;
489 void *__start;
490
491 /*
492 * If this is the first iteration or the address is not within
493 * the previous entry, search for a matching entry.
494 */
495 if (!m || start < m->addr || start >= m->addr + m->size) {
496 struct kcore_list *pos;
497
498 m = NULL;
499 list_for_each_entry(pos, &kclist_head, list) {
500 if (start >= pos->addr &&
501 start < pos->addr + pos->size) {
502 m = pos;
503 break;
504 }
505 }
506 }
507
508 if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
509 page_offline_thaw();
510 cond_resched();
511 page_offline_freeze();
512 }
513
514 if (!m) {
515 if (iov_iter_zero(tsz, iter) != tsz) {
516 ret = -EFAULT;
517 goto out;
518 }
519 goto skip;
520 }
521
522 switch (m->type) {
523 case KCORE_VMALLOC:
524 {
525 const char *src = (char *)start;
526 size_t read = 0, left = tsz;
527
528 /*
529 * vmalloc uses spinlocks, so we optimistically try to
530 * read memory. If this fails, fault pages in and try
531 * again until we are done.
532 */
533 while (true) {
534 read += vread_iter(iter, src, left);
535 if (read == tsz)
536 break;
537
538 src += read;
539 left -= read;
540
541 if (fault_in_iov_iter_writeable(iter, left)) {
542 ret = -EFAULT;
543 goto out;
544 }
545 }
546 break;
547 }
548 case KCORE_USER:
549 /* User page is handled prior to normal kernel page: */
550 if (copy_to_iter((char *)start, tsz, iter) != tsz) {
551 ret = -EFAULT;
552 goto out;
553 }
554 break;
555 case KCORE_RAM:
556 phys = __pa(start);
557 pfn = phys >> PAGE_SHIFT;
558 page = pfn_to_online_page(pfn);
559
560 /*
561 * Don't read offline sections, logically offline pages
562 * (e.g., inflated in a balloon), hwpoisoned pages,
563 * and explicitly excluded physical ranges.
564 */
565 if (!page || PageOffline(page) ||
566 is_page_hwpoison(page) || !pfn_is_ram(pfn) ||
567 pfn_is_unaccepted_memory(pfn)) {
568 if (iov_iter_zero(tsz, iter) != tsz) {
569 ret = -EFAULT;
570 goto out;
571 }
572 break;
573 }
574 fallthrough;
575 case KCORE_VMEMMAP:
576 case KCORE_TEXT:
577 if (m->type == KCORE_RAM) {
578 __start = kc_xlate_dev_mem_ptr(phys);
579 if (!__start) {
580 ret = -ENOMEM;
581 if (iov_iter_zero(tsz, iter) != tsz)
582 ret = -EFAULT;
583 goto out;
584 }
585 } else {
586 __start = (void *)start;
587 }
588
589 /*
590 * Sadly we must use a bounce buffer here to be able to
591 * make use of copy_from_kernel_nofault(), as these
592 * memory regions might not always be mapped on all
593 * architectures.
594 */
595 ret = copy_from_kernel_nofault(buf, __start, tsz);
596 if (m->type == KCORE_RAM)
597 kc_unxlate_dev_mem_ptr(phys, __start);
598 if (ret) {
599 if (iov_iter_zero(tsz, iter) != tsz) {
600 ret = -EFAULT;
601 goto out;
602 }
603 ret = 0;
604 /*
605 * We know the bounce buffer is safe to copy from, so
606 * use _copy_to_iter() directly.
607 */
608 } else if (_copy_to_iter(buf, tsz, iter) != tsz) {
609 ret = -EFAULT;
610 goto out;
611 }
612 break;
613 default:
614 pr_warn_once("Unhandled KCORE type: %d\n", m->type);
615 if (iov_iter_zero(tsz, iter) != tsz) {
616 ret = -EFAULT;
617 goto out;
618 }
619 }
620skip:
621 buflen -= tsz;
622 *fpos += tsz;
623 start += tsz;
624 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
625 }
626
627out:
628 page_offline_thaw();
629 up_read(&kclist_lock);
630 if (ret)
631 return ret;
632 return orig_buflen - buflen;
633}
634
635static int open_kcore(struct inode *inode, struct file *filp)
636{
637 int ret = security_locked_down(LOCKDOWN_KCORE);
638
639 if (!capable(CAP_SYS_RAWIO))
640 return -EPERM;
641
642 if (ret)
643 return ret;
644
645 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
646 if (!filp->private_data)
647 return -ENOMEM;
648
649 if (kcore_need_update)
650 kcore_update_ram();
651 if (i_size_read(inode) != proc_root_kcore->size) {
652 inode_lock(inode);
653 i_size_write(inode, proc_root_kcore->size);
654 inode_unlock(inode);
655 }
656 return 0;
657}
658
659static int release_kcore(struct inode *inode, struct file *file)
660{
661 kfree(file->private_data);
662 return 0;
663}
664
665static const struct proc_ops kcore_proc_ops = {
666 .proc_read_iter = read_kcore_iter,
667 .proc_open = open_kcore,
668 .proc_release = release_kcore,
669 .proc_lseek = default_llseek,
670};
671
672/* just remember that we have to update kcore */
673static int __meminit kcore_callback(struct notifier_block *self,
674 unsigned long action, void *arg)
675{
676 switch (action) {
677 case MEM_ONLINE:
678 case MEM_OFFLINE:
679 kcore_need_update = 1;
680 break;
681 }
682 return NOTIFY_OK;
683}
684
685
686static struct kcore_list kcore_vmalloc;
687
688#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
689static struct kcore_list kcore_text;
690/*
691 * If defined, special segment is used for mapping kernel text instead of
692 * direct-map area. We need to create special TEXT section.
693 */
694static void __init proc_kcore_text_init(void)
695{
696 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
697}
698#else
699static void __init proc_kcore_text_init(void)
700{
701}
702#endif
703
704#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
705/*
706 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
707 */
708static struct kcore_list kcore_modules;
709static void __init add_modules_range(void)
710{
711 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
712 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
713 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
714 }
715}
716#else
717static void __init add_modules_range(void)
718{
719}
720#endif
721
722static int __init proc_kcore_init(void)
723{
724 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
725 if (!proc_root_kcore) {
726 pr_err("couldn't create /proc/kcore\n");
727 return 0; /* Always returns 0. */
728 }
729 /* Store text area if it's special */
730 proc_kcore_text_init();
731 /* Store vmalloc area */
732 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
733 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
734 add_modules_range();
735 /* Store direct-map area from physical memory map */
736 kcore_update_ram();
737 hotplug_memory_notifier(kcore_callback, DEFAULT_CALLBACK_PRI);
738
739 return 0;
740}
741fs_initcall(proc_kcore_init);