Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	fs/proc/kcore.c kernel ELF core dumper
  4 *
  5 *	Modelled on fs/exec.c:aout_core_dump()
  6 *	Jeremy Fitzhardinge <jeremy@sw.oz.au>
  7 *	ELF version written by David Howells <David.Howells@nexor.co.uk>
  8 *	Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
  9 *	Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
 10 *	Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
 11 */
 12
 13#include <linux/crash_core.h>
 14#include <linux/mm.h>
 15#include <linux/proc_fs.h>
 16#include <linux/kcore.h>
 17#include <linux/user.h>
 18#include <linux/capability.h>
 19#include <linux/elf.h>
 20#include <linux/elfcore.h>
 21#include <linux/vmalloc.h>
 22#include <linux/highmem.h>
 23#include <linux/printk.h>
 24#include <linux/memblock.h>
 25#include <linux/init.h>
 26#include <linux/slab.h>
 27#include <linux/uaccess.h>
 28#include <asm/io.h>
 29#include <linux/list.h>
 30#include <linux/ioport.h>
 31#include <linux/memory.h>
 32#include <linux/sched/task.h>
 33#include <linux/security.h>
 34#include <asm/sections.h>
 35#include "internal.h"
 36
 37#define CORE_STR "CORE"
 38
 39#ifndef ELF_CORE_EFLAGS
 40#define ELF_CORE_EFLAGS	0
 41#endif
 42
 43static struct proc_dir_entry *proc_root_kcore;
 44
 45
 46#ifndef kc_vaddr_to_offset
 47#define	kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
 48#endif
 49#ifndef	kc_offset_to_vaddr
 50#define	kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
 51#endif
 52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53static LIST_HEAD(kclist_head);
 54static DECLARE_RWSEM(kclist_lock);
 55static int kcore_need_update = 1;
 56
 57/*
 58 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
 59 * Same as oldmem_pfn_is_ram in vmcore
 60 */
 61static int (*mem_pfn_is_ram)(unsigned long pfn);
 62
 63int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
 64{
 65	if (mem_pfn_is_ram)
 66		return -EBUSY;
 67	mem_pfn_is_ram = fn;
 68	return 0;
 69}
 70
 71static int pfn_is_ram(unsigned long pfn)
 72{
 73	if (mem_pfn_is_ram)
 74		return mem_pfn_is_ram(pfn);
 75	else
 76		return 1;
 77}
 78
 79/* This doesn't grab kclist_lock, so it should only be used at init time. */
 80void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
 81		       int type)
 82{
 83	new->addr = (unsigned long)addr;
 84	new->size = size;
 85	new->type = type;
 86
 87	list_add_tail(&new->list, &kclist_head);
 88}
 89
 90static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
 91			     size_t *data_offset)
 92{
 93	size_t try, size;
 94	struct kcore_list *m;
 95
 96	*nphdr = 1; /* PT_NOTE */
 97	size = 0;
 98
 99	list_for_each_entry(m, &kclist_head, list) {
100		try = kc_vaddr_to_offset((size_t)m->addr + m->size);
101		if (try > size)
102			size = try;
103		*nphdr = *nphdr + 1;
104	}
105
106	*phdrs_len = *nphdr * sizeof(struct elf_phdr);
107	*notes_len = (4 * sizeof(struct elf_note) +
108		      3 * ALIGN(sizeof(CORE_STR), 4) +
109		      VMCOREINFO_NOTE_NAME_BYTES +
110		      ALIGN(sizeof(struct elf_prstatus), 4) +
111		      ALIGN(sizeof(struct elf_prpsinfo), 4) +
112		      ALIGN(arch_task_struct_size, 4) +
113		      ALIGN(vmcoreinfo_size, 4));
114	*data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
115				  *notes_len);
116	return *data_offset + size;
117}
118
119#ifdef CONFIG_HIGHMEM
120/*
121 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
122 * because memory hole is not as big as !HIGHMEM case.
123 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
124 */
125static int kcore_ram_list(struct list_head *head)
126{
127	struct kcore_list *ent;
128
129	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
130	if (!ent)
131		return -ENOMEM;
132	ent->addr = (unsigned long)__va(0);
133	ent->size = max_low_pfn << PAGE_SHIFT;
134	ent->type = KCORE_RAM;
135	list_add(&ent->list, head);
136	return 0;
137}
138
139#else /* !CONFIG_HIGHMEM */
140
141#ifdef CONFIG_SPARSEMEM_VMEMMAP
142/* calculate vmemmap's address from given system ram pfn and register it */
143static int
144get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
145{
146	unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
147	unsigned long nr_pages = ent->size >> PAGE_SHIFT;
148	unsigned long start, end;
149	struct kcore_list *vmm, *tmp;
150
151
152	start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
153	end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
154	end = PAGE_ALIGN(end);
155	/* overlap check (because we have to align page */
156	list_for_each_entry(tmp, head, list) {
157		if (tmp->type != KCORE_VMEMMAP)
158			continue;
159		if (start < tmp->addr + tmp->size)
160			if (end > tmp->addr)
161				end = tmp->addr;
162	}
163	if (start < end) {
164		vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
165		if (!vmm)
166			return 0;
167		vmm->addr = start;
168		vmm->size = end - start;
169		vmm->type = KCORE_VMEMMAP;
170		list_add_tail(&vmm->list, head);
171	}
172	return 1;
173
174}
175#else
176static int
177get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
178{
179	return 1;
180}
181
182#endif
183
184static int
185kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
186{
187	struct list_head *head = (struct list_head *)arg;
188	struct kcore_list *ent;
189	struct page *p;
190
191	if (!pfn_valid(pfn))
192		return 1;
193
194	p = pfn_to_page(pfn);
195
196	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
197	if (!ent)
198		return -ENOMEM;
199	ent->addr = (unsigned long)page_to_virt(p);
200	ent->size = nr_pages << PAGE_SHIFT;
201
202	if (!virt_addr_valid(ent->addr))
203		goto free_out;
204
205	/* cut not-mapped area. ....from ppc-32 code. */
206	if (ULONG_MAX - ent->addr < ent->size)
207		ent->size = ULONG_MAX - ent->addr;
208
209	/*
210	 * We've already checked virt_addr_valid so we know this address
211	 * is a valid pointer, therefore we can check against it to determine
212	 * if we need to trim
213	 */
214	if (VMALLOC_START > ent->addr) {
215		if (VMALLOC_START - ent->addr < ent->size)
216			ent->size = VMALLOC_START - ent->addr;
217	}
218
219	ent->type = KCORE_RAM;
220	list_add_tail(&ent->list, head);
221
222	if (!get_sparsemem_vmemmap_info(ent, head)) {
223		list_del(&ent->list);
224		goto free_out;
225	}
226
227	return 0;
228free_out:
229	kfree(ent);
230	return 1;
231}
232
233static int kcore_ram_list(struct list_head *list)
234{
235	int nid, ret;
236	unsigned long end_pfn;
237
238	/* Not inialized....update now */
239	/* find out "max pfn" */
240	end_pfn = 0;
241	for_each_node_state(nid, N_MEMORY) {
242		unsigned long node_end;
243		node_end = node_end_pfn(nid);
244		if (end_pfn < node_end)
245			end_pfn = node_end;
246	}
247	/* scan 0 to max_pfn */
248	ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
249	if (ret)
250		return -ENOMEM;
251	return 0;
252}
253#endif /* CONFIG_HIGHMEM */
254
255static int kcore_update_ram(void)
256{
257	LIST_HEAD(list);
258	LIST_HEAD(garbage);
259	int nphdr;
260	size_t phdrs_len, notes_len, data_offset;
261	struct kcore_list *tmp, *pos;
262	int ret = 0;
263
264	down_write(&kclist_lock);
265	if (!xchg(&kcore_need_update, 0))
266		goto out;
267
268	ret = kcore_ram_list(&list);
269	if (ret) {
270		/* Couldn't get the RAM list, try again next time. */
271		WRITE_ONCE(kcore_need_update, 1);
272		list_splice_tail(&list, &garbage);
273		goto out;
274	}
275
276	list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
277		if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
278			list_move(&pos->list, &garbage);
279	}
280	list_splice_tail(&list, &kclist_head);
281
282	proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
283					       &data_offset);
284
285out:
286	up_write(&kclist_lock);
287	list_for_each_entry_safe(pos, tmp, &garbage, list) {
288		list_del(&pos->list);
289		kfree(pos);
290	}
291	return ret;
292}
293
294static void append_kcore_note(char *notes, size_t *i, const char *name,
295			      unsigned int type, const void *desc,
296			      size_t descsz)
297{
298	struct elf_note *note = (struct elf_note *)&notes[*i];
299
300	note->n_namesz = strlen(name) + 1;
301	note->n_descsz = descsz;
302	note->n_type = type;
303	*i += sizeof(*note);
304	memcpy(&notes[*i], name, note->n_namesz);
305	*i = ALIGN(*i + note->n_namesz, 4);
306	memcpy(&notes[*i], desc, descsz);
307	*i = ALIGN(*i + descsz, 4);
308}
309
310static ssize_t
311read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
312{
 
313	char *buf = file->private_data;
 
314	size_t phdrs_offset, notes_offset, data_offset;
315	size_t page_offline_frozen = 1;
316	size_t phdrs_len, notes_len;
317	struct kcore_list *m;
318	size_t tsz;
319	int nphdr;
320	unsigned long start;
 
321	size_t orig_buflen = buflen;
322	int ret = 0;
323
324	down_read(&kclist_lock);
325	/*
326	 * Don't race against drivers that set PageOffline() and expect no
327	 * further page access.
328	 */
329	page_offline_freeze();
330
331	get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
332	phdrs_offset = sizeof(struct elfhdr);
333	notes_offset = phdrs_offset + phdrs_len;
334
335	/* ELF file header. */
336	if (buflen && *fpos < sizeof(struct elfhdr)) {
337		struct elfhdr ehdr = {
338			.e_ident = {
339				[EI_MAG0] = ELFMAG0,
340				[EI_MAG1] = ELFMAG1,
341				[EI_MAG2] = ELFMAG2,
342				[EI_MAG3] = ELFMAG3,
343				[EI_CLASS] = ELF_CLASS,
344				[EI_DATA] = ELF_DATA,
345				[EI_VERSION] = EV_CURRENT,
346				[EI_OSABI] = ELF_OSABI,
347			},
348			.e_type = ET_CORE,
349			.e_machine = ELF_ARCH,
350			.e_version = EV_CURRENT,
351			.e_phoff = sizeof(struct elfhdr),
352			.e_flags = ELF_CORE_EFLAGS,
353			.e_ehsize = sizeof(struct elfhdr),
354			.e_phentsize = sizeof(struct elf_phdr),
355			.e_phnum = nphdr,
356		};
357
358		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
359		if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
360			ret = -EFAULT;
361			goto out;
362		}
363
364		buffer += tsz;
365		buflen -= tsz;
366		*fpos += tsz;
367	}
368
369	/* ELF program headers. */
370	if (buflen && *fpos < phdrs_offset + phdrs_len) {
371		struct elf_phdr *phdrs, *phdr;
372
373		phdrs = kzalloc(phdrs_len, GFP_KERNEL);
374		if (!phdrs) {
375			ret = -ENOMEM;
376			goto out;
377		}
378
379		phdrs[0].p_type = PT_NOTE;
380		phdrs[0].p_offset = notes_offset;
381		phdrs[0].p_filesz = notes_len;
382
383		phdr = &phdrs[1];
384		list_for_each_entry(m, &kclist_head, list) {
385			phdr->p_type = PT_LOAD;
386			phdr->p_flags = PF_R | PF_W | PF_X;
387			phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
388			phdr->p_vaddr = (size_t)m->addr;
389			if (m->type == KCORE_RAM)
390				phdr->p_paddr = __pa(m->addr);
391			else if (m->type == KCORE_TEXT)
392				phdr->p_paddr = __pa_symbol(m->addr);
393			else
394				phdr->p_paddr = (elf_addr_t)-1;
395			phdr->p_filesz = phdr->p_memsz = m->size;
396			phdr->p_align = PAGE_SIZE;
397			phdr++;
398		}
399
400		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
401		if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
402				 tsz)) {
403			kfree(phdrs);
404			ret = -EFAULT;
405			goto out;
406		}
407		kfree(phdrs);
408
409		buffer += tsz;
410		buflen -= tsz;
411		*fpos += tsz;
412	}
413
414	/* ELF note segment. */
415	if (buflen && *fpos < notes_offset + notes_len) {
416		struct elf_prstatus prstatus = {};
417		struct elf_prpsinfo prpsinfo = {
418			.pr_sname = 'R',
419			.pr_fname = "vmlinux",
420		};
421		char *notes;
422		size_t i = 0;
423
424		strlcpy(prpsinfo.pr_psargs, saved_command_line,
425			sizeof(prpsinfo.pr_psargs));
426
427		notes = kzalloc(notes_len, GFP_KERNEL);
428		if (!notes) {
429			ret = -ENOMEM;
430			goto out;
431		}
432
433		append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
434				  sizeof(prstatus));
435		append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
436				  sizeof(prpsinfo));
437		append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
438				  arch_task_struct_size);
439		/*
440		 * vmcoreinfo_size is mostly constant after init time, but it
441		 * can be changed by crash_save_vmcoreinfo(). Racing here with a
442		 * panic on another CPU before the machine goes down is insanely
443		 * unlikely, but it's better to not leave potential buffer
444		 * overflows lying around, regardless.
445		 */
446		append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
447				  vmcoreinfo_data,
448				  min(vmcoreinfo_size, notes_len - i));
449
450		tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
451		if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
452			kfree(notes);
453			ret = -EFAULT;
454			goto out;
455		}
456		kfree(notes);
457
458		buffer += tsz;
459		buflen -= tsz;
460		*fpos += tsz;
461	}
462
463	/*
464	 * Check to see if our file offset matches with any of
465	 * the addresses in the elf_phdr on our list.
466	 */
467	start = kc_offset_to_vaddr(*fpos - data_offset);
468	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
469		tsz = buflen;
470
471	m = NULL;
472	while (buflen) {
473		struct page *page;
474		unsigned long pfn;
 
 
475
476		/*
477		 * If this is the first iteration or the address is not within
478		 * the previous entry, search for a matching entry.
479		 */
480		if (!m || start < m->addr || start >= m->addr + m->size) {
481			struct kcore_list *iter;
482
483			m = NULL;
484			list_for_each_entry(iter, &kclist_head, list) {
485				if (start >= iter->addr &&
486				    start < iter->addr + iter->size) {
487					m = iter;
488					break;
489				}
490			}
491		}
492
493		if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
494			page_offline_thaw();
495			cond_resched();
496			page_offline_freeze();
497		}
498
499		if (!m) {
500			if (clear_user(buffer, tsz)) {
501				ret = -EFAULT;
502				goto out;
503			}
504			goto skip;
505		}
506
507		switch (m->type) {
508		case KCORE_VMALLOC:
509			vread(buf, (char *)start, tsz);
510			/* we have to zero-fill user buffer even if no read */
511			if (copy_to_user(buffer, buf, tsz)) {
512				ret = -EFAULT;
513				goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514			}
515			break;
 
516		case KCORE_USER:
517			/* User page is handled prior to normal kernel page: */
518			if (copy_to_user(buffer, (char *)start, tsz)) {
519				ret = -EFAULT;
520				goto out;
521			}
522			break;
523		case KCORE_RAM:
524			pfn = __pa(start) >> PAGE_SHIFT;
 
525			page = pfn_to_online_page(pfn);
526
527			/*
528			 * Don't read offline sections, logically offline pages
529			 * (e.g., inflated in a balloon), hwpoisoned pages,
530			 * and explicitly excluded physical ranges.
531			 */
532			if (!page || PageOffline(page) ||
533			    is_page_hwpoison(page) || !pfn_is_ram(pfn)) {
534				if (clear_user(buffer, tsz)) {
 
535					ret = -EFAULT;
536					goto out;
537				}
538				break;
539			}
540			fallthrough;
541		case KCORE_VMEMMAP:
542		case KCORE_TEXT:
543			/*
544			 * Using bounce buffer to bypass the
545			 * hardened user copy kernel text checks.
546			 */
547			if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
548				if (clear_user(buffer, tsz)) {
549					ret = -EFAULT;
550					goto out;
551				}
552			} else {
553				if (copy_to_user(buffer, buf, tsz)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
554					ret = -EFAULT;
555					goto out;
556				}
 
 
 
 
 
 
 
 
557			}
558			break;
559		default:
560			pr_warn_once("Unhandled KCORE type: %d\n", m->type);
561			if (clear_user(buffer, tsz)) {
562				ret = -EFAULT;
563				goto out;
564			}
565		}
566skip:
567		buflen -= tsz;
568		*fpos += tsz;
569		buffer += tsz;
570		start += tsz;
571		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
572	}
573
574out:
575	page_offline_thaw();
576	up_read(&kclist_lock);
577	if (ret)
578		return ret;
579	return orig_buflen - buflen;
580}
581
582static int open_kcore(struct inode *inode, struct file *filp)
583{
584	int ret = security_locked_down(LOCKDOWN_KCORE);
585
586	if (!capable(CAP_SYS_RAWIO))
587		return -EPERM;
588
589	if (ret)
590		return ret;
591
592	filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
593	if (!filp->private_data)
594		return -ENOMEM;
595
596	if (kcore_need_update)
597		kcore_update_ram();
598	if (i_size_read(inode) != proc_root_kcore->size) {
599		inode_lock(inode);
600		i_size_write(inode, proc_root_kcore->size);
601		inode_unlock(inode);
602	}
603	return 0;
604}
605
606static int release_kcore(struct inode *inode, struct file *file)
607{
608	kfree(file->private_data);
609	return 0;
610}
611
612static const struct proc_ops kcore_proc_ops = {
613	.proc_read	= read_kcore,
614	.proc_open	= open_kcore,
615	.proc_release	= release_kcore,
616	.proc_lseek	= default_llseek,
617};
618
619/* just remember that we have to update kcore */
620static int __meminit kcore_callback(struct notifier_block *self,
621				    unsigned long action, void *arg)
622{
623	switch (action) {
624	case MEM_ONLINE:
625	case MEM_OFFLINE:
626		kcore_need_update = 1;
627		break;
628	}
629	return NOTIFY_OK;
630}
631
632
633static struct kcore_list kcore_vmalloc;
634
635#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
636static struct kcore_list kcore_text;
637/*
638 * If defined, special segment is used for mapping kernel text instead of
639 * direct-map area. We need to create special TEXT section.
640 */
641static void __init proc_kcore_text_init(void)
642{
643	kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
644}
645#else
646static void __init proc_kcore_text_init(void)
647{
648}
649#endif
650
651#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
652/*
653 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
654 */
655static struct kcore_list kcore_modules;
656static void __init add_modules_range(void)
657{
658	if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
659		kclist_add(&kcore_modules, (void *)MODULES_VADDR,
660			MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
661	}
662}
663#else
664static void __init add_modules_range(void)
665{
666}
667#endif
668
669static int __init proc_kcore_init(void)
670{
671	proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
672	if (!proc_root_kcore) {
673		pr_err("couldn't create /proc/kcore\n");
674		return 0; /* Always returns 0. */
675	}
676	/* Store text area if it's special */
677	proc_kcore_text_init();
678	/* Store vmalloc area */
679	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
680		VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
681	add_modules_range();
682	/* Store direct-map area from physical memory map */
683	kcore_update_ram();
684	hotplug_memory_notifier(kcore_callback, DEFAULT_CALLBACK_PRI);
685
686	return 0;
687}
688fs_initcall(proc_kcore_init);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	fs/proc/kcore.c kernel ELF core dumper
  4 *
  5 *	Modelled on fs/exec.c:aout_core_dump()
  6 *	Jeremy Fitzhardinge <jeremy@sw.oz.au>
  7 *	ELF version written by David Howells <David.Howells@nexor.co.uk>
  8 *	Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
  9 *	Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
 10 *	Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
 11 */
 12
 13#include <linux/vmcore_info.h>
 14#include <linux/mm.h>
 15#include <linux/proc_fs.h>
 16#include <linux/kcore.h>
 17#include <linux/user.h>
 18#include <linux/capability.h>
 19#include <linux/elf.h>
 20#include <linux/elfcore.h>
 21#include <linux/vmalloc.h>
 22#include <linux/highmem.h>
 23#include <linux/printk.h>
 24#include <linux/memblock.h>
 25#include <linux/init.h>
 26#include <linux/slab.h>
 27#include <linux/uio.h>
 28#include <asm/io.h>
 29#include <linux/list.h>
 30#include <linux/ioport.h>
 31#include <linux/memory.h>
 32#include <linux/sched/task.h>
 33#include <linux/security.h>
 34#include <asm/sections.h>
 35#include "internal.h"
 36
 37#define CORE_STR "CORE"
 38
 39#ifndef ELF_CORE_EFLAGS
 40#define ELF_CORE_EFLAGS	0
 41#endif
 42
 43static struct proc_dir_entry *proc_root_kcore;
 44
 45
 46#ifndef kc_vaddr_to_offset
 47#define	kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
 48#endif
 49#ifndef	kc_offset_to_vaddr
 50#define	kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
 51#endif
 52
 53#ifndef kc_xlate_dev_mem_ptr
 54#define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
 55static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
 56{
 57	return __va(phys);
 58}
 59#endif
 60#ifndef kc_unxlate_dev_mem_ptr
 61#define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
 62static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
 63{
 64}
 65#endif
 66
 67static LIST_HEAD(kclist_head);
 68static DECLARE_RWSEM(kclist_lock);
 69static int kcore_need_update = 1;
 70
 71/*
 72 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
 73 * Same as oldmem_pfn_is_ram in vmcore
 74 */
 75static int (*mem_pfn_is_ram)(unsigned long pfn);
 76
 77int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
 78{
 79	if (mem_pfn_is_ram)
 80		return -EBUSY;
 81	mem_pfn_is_ram = fn;
 82	return 0;
 83}
 84
 85static int pfn_is_ram(unsigned long pfn)
 86{
 87	if (mem_pfn_is_ram)
 88		return mem_pfn_is_ram(pfn);
 89	else
 90		return 1;
 91}
 92
 93/* This doesn't grab kclist_lock, so it should only be used at init time. */
 94void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
 95		       int type)
 96{
 97	new->addr = (unsigned long)addr;
 98	new->size = size;
 99	new->type = type;
100
101	list_add_tail(&new->list, &kclist_head);
102}
103
104static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
105			     size_t *data_offset)
106{
107	size_t try, size;
108	struct kcore_list *m;
109
110	*nphdr = 1; /* PT_NOTE */
111	size = 0;
112
113	list_for_each_entry(m, &kclist_head, list) {
114		try = kc_vaddr_to_offset((size_t)m->addr + m->size);
115		if (try > size)
116			size = try;
117		*nphdr = *nphdr + 1;
118	}
119
120	*phdrs_len = *nphdr * sizeof(struct elf_phdr);
121	*notes_len = (4 * sizeof(struct elf_note) +
122		      3 * ALIGN(sizeof(CORE_STR), 4) +
123		      VMCOREINFO_NOTE_NAME_BYTES +
124		      ALIGN(sizeof(struct elf_prstatus), 4) +
125		      ALIGN(sizeof(struct elf_prpsinfo), 4) +
126		      ALIGN(arch_task_struct_size, 4) +
127		      ALIGN(vmcoreinfo_size, 4));
128	*data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
129				  *notes_len);
130	return *data_offset + size;
131}
132
133#ifdef CONFIG_HIGHMEM
134/*
135 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
136 * because memory hole is not as big as !HIGHMEM case.
137 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
138 */
139static int kcore_ram_list(struct list_head *head)
140{
141	struct kcore_list *ent;
142
143	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
144	if (!ent)
145		return -ENOMEM;
146	ent->addr = (unsigned long)__va(0);
147	ent->size = max_low_pfn << PAGE_SHIFT;
148	ent->type = KCORE_RAM;
149	list_add(&ent->list, head);
150	return 0;
151}
152
153#else /* !CONFIG_HIGHMEM */
154
155#ifdef CONFIG_SPARSEMEM_VMEMMAP
156/* calculate vmemmap's address from given system ram pfn and register it */
157static int
158get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
159{
160	unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
161	unsigned long nr_pages = ent->size >> PAGE_SHIFT;
162	unsigned long start, end;
163	struct kcore_list *vmm, *tmp;
164
165
166	start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
167	end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
168	end = PAGE_ALIGN(end);
169	/* overlap check (because we have to align page */
170	list_for_each_entry(tmp, head, list) {
171		if (tmp->type != KCORE_VMEMMAP)
172			continue;
173		if (start < tmp->addr + tmp->size)
174			if (end > tmp->addr)
175				end = tmp->addr;
176	}
177	if (start < end) {
178		vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
179		if (!vmm)
180			return 0;
181		vmm->addr = start;
182		vmm->size = end - start;
183		vmm->type = KCORE_VMEMMAP;
184		list_add_tail(&vmm->list, head);
185	}
186	return 1;
187
188}
189#else
190static int
191get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
192{
193	return 1;
194}
195
196#endif
197
198static int
199kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
200{
201	struct list_head *head = (struct list_head *)arg;
202	struct kcore_list *ent;
203	struct page *p;
204
205	if (!pfn_valid(pfn))
206		return 1;
207
208	p = pfn_to_page(pfn);
209
210	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
211	if (!ent)
212		return -ENOMEM;
213	ent->addr = (unsigned long)page_to_virt(p);
214	ent->size = nr_pages << PAGE_SHIFT;
215
216	if (!virt_addr_valid((void *)ent->addr))
217		goto free_out;
218
219	/* cut not-mapped area. ....from ppc-32 code. */
220	if (ULONG_MAX - ent->addr < ent->size)
221		ent->size = ULONG_MAX - ent->addr;
222
223	/*
224	 * We've already checked virt_addr_valid so we know this address
225	 * is a valid pointer, therefore we can check against it to determine
226	 * if we need to trim
227	 */
228	if (VMALLOC_START > ent->addr) {
229		if (VMALLOC_START - ent->addr < ent->size)
230			ent->size = VMALLOC_START - ent->addr;
231	}
232
233	ent->type = KCORE_RAM;
234	list_add_tail(&ent->list, head);
235
236	if (!get_sparsemem_vmemmap_info(ent, head)) {
237		list_del(&ent->list);
238		goto free_out;
239	}
240
241	return 0;
242free_out:
243	kfree(ent);
244	return 1;
245}
246
247static int kcore_ram_list(struct list_head *list)
248{
249	int nid, ret;
250	unsigned long end_pfn;
251
252	/* Not initialized....update now */
253	/* find out "max pfn" */
254	end_pfn = 0;
255	for_each_node_state(nid, N_MEMORY) {
256		unsigned long node_end;
257		node_end = node_end_pfn(nid);
258		if (end_pfn < node_end)
259			end_pfn = node_end;
260	}
261	/* scan 0 to max_pfn */
262	ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
263	if (ret)
264		return -ENOMEM;
265	return 0;
266}
267#endif /* CONFIG_HIGHMEM */
268
269static int kcore_update_ram(void)
270{
271	LIST_HEAD(list);
272	LIST_HEAD(garbage);
273	int nphdr;
274	size_t phdrs_len, notes_len, data_offset;
275	struct kcore_list *tmp, *pos;
276	int ret = 0;
277
278	down_write(&kclist_lock);
279	if (!xchg(&kcore_need_update, 0))
280		goto out;
281
282	ret = kcore_ram_list(&list);
283	if (ret) {
284		/* Couldn't get the RAM list, try again next time. */
285		WRITE_ONCE(kcore_need_update, 1);
286		list_splice_tail(&list, &garbage);
287		goto out;
288	}
289
290	list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
291		if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
292			list_move(&pos->list, &garbage);
293	}
294	list_splice_tail(&list, &kclist_head);
295
296	proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
297					       &data_offset);
298
299out:
300	up_write(&kclist_lock);
301	list_for_each_entry_safe(pos, tmp, &garbage, list) {
302		list_del(&pos->list);
303		kfree(pos);
304	}
305	return ret;
306}
307
308static void append_kcore_note(char *notes, size_t *i, const char *name,
309			      unsigned int type, const void *desc,
310			      size_t descsz)
311{
312	struct elf_note *note = (struct elf_note *)&notes[*i];
313
314	note->n_namesz = strlen(name) + 1;
315	note->n_descsz = descsz;
316	note->n_type = type;
317	*i += sizeof(*note);
318	memcpy(&notes[*i], name, note->n_namesz);
319	*i = ALIGN(*i + note->n_namesz, 4);
320	memcpy(&notes[*i], desc, descsz);
321	*i = ALIGN(*i + descsz, 4);
322}
323
324static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
 
325{
326	struct file *file = iocb->ki_filp;
327	char *buf = file->private_data;
328	loff_t *fpos = &iocb->ki_pos;
329	size_t phdrs_offset, notes_offset, data_offset;
330	size_t page_offline_frozen = 1;
331	size_t phdrs_len, notes_len;
332	struct kcore_list *m;
333	size_t tsz;
334	int nphdr;
335	unsigned long start;
336	size_t buflen = iov_iter_count(iter);
337	size_t orig_buflen = buflen;
338	int ret = 0;
339
340	down_read(&kclist_lock);
341	/*
342	 * Don't race against drivers that set PageOffline() and expect no
343	 * further page access.
344	 */
345	page_offline_freeze();
346
347	get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
348	phdrs_offset = sizeof(struct elfhdr);
349	notes_offset = phdrs_offset + phdrs_len;
350
351	/* ELF file header. */
352	if (buflen && *fpos < sizeof(struct elfhdr)) {
353		struct elfhdr ehdr = {
354			.e_ident = {
355				[EI_MAG0] = ELFMAG0,
356				[EI_MAG1] = ELFMAG1,
357				[EI_MAG2] = ELFMAG2,
358				[EI_MAG3] = ELFMAG3,
359				[EI_CLASS] = ELF_CLASS,
360				[EI_DATA] = ELF_DATA,
361				[EI_VERSION] = EV_CURRENT,
362				[EI_OSABI] = ELF_OSABI,
363			},
364			.e_type = ET_CORE,
365			.e_machine = ELF_ARCH,
366			.e_version = EV_CURRENT,
367			.e_phoff = sizeof(struct elfhdr),
368			.e_flags = ELF_CORE_EFLAGS,
369			.e_ehsize = sizeof(struct elfhdr),
370			.e_phentsize = sizeof(struct elf_phdr),
371			.e_phnum = nphdr,
372		};
373
374		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
375		if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) {
376			ret = -EFAULT;
377			goto out;
378		}
379
 
380		buflen -= tsz;
381		*fpos += tsz;
382	}
383
384	/* ELF program headers. */
385	if (buflen && *fpos < phdrs_offset + phdrs_len) {
386		struct elf_phdr *phdrs, *phdr;
387
388		phdrs = kzalloc(phdrs_len, GFP_KERNEL);
389		if (!phdrs) {
390			ret = -ENOMEM;
391			goto out;
392		}
393
394		phdrs[0].p_type = PT_NOTE;
395		phdrs[0].p_offset = notes_offset;
396		phdrs[0].p_filesz = notes_len;
397
398		phdr = &phdrs[1];
399		list_for_each_entry(m, &kclist_head, list) {
400			phdr->p_type = PT_LOAD;
401			phdr->p_flags = PF_R | PF_W | PF_X;
402			phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
403			phdr->p_vaddr = (size_t)m->addr;
404			if (m->type == KCORE_RAM)
405				phdr->p_paddr = __pa(m->addr);
406			else if (m->type == KCORE_TEXT)
407				phdr->p_paddr = __pa_symbol(m->addr);
408			else
409				phdr->p_paddr = (elf_addr_t)-1;
410			phdr->p_filesz = phdr->p_memsz = m->size;
411			phdr->p_align = PAGE_SIZE;
412			phdr++;
413		}
414
415		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
416		if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz,
417				 iter) != tsz) {
418			kfree(phdrs);
419			ret = -EFAULT;
420			goto out;
421		}
422		kfree(phdrs);
423
 
424		buflen -= tsz;
425		*fpos += tsz;
426	}
427
428	/* ELF note segment. */
429	if (buflen && *fpos < notes_offset + notes_len) {
430		struct elf_prstatus prstatus = {};
431		struct elf_prpsinfo prpsinfo = {
432			.pr_sname = 'R',
433			.pr_fname = "vmlinux",
434		};
435		char *notes;
436		size_t i = 0;
437
438		strscpy(prpsinfo.pr_psargs, saved_command_line,
439			sizeof(prpsinfo.pr_psargs));
440
441		notes = kzalloc(notes_len, GFP_KERNEL);
442		if (!notes) {
443			ret = -ENOMEM;
444			goto out;
445		}
446
447		append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
448				  sizeof(prstatus));
449		append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
450				  sizeof(prpsinfo));
451		append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
452				  arch_task_struct_size);
453		/*
454		 * vmcoreinfo_size is mostly constant after init time, but it
455		 * can be changed by crash_save_vmcoreinfo(). Racing here with a
456		 * panic on another CPU before the machine goes down is insanely
457		 * unlikely, but it's better to not leave potential buffer
458		 * overflows lying around, regardless.
459		 */
460		append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
461				  vmcoreinfo_data,
462				  min(vmcoreinfo_size, notes_len - i));
463
464		tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
465		if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {
466			kfree(notes);
467			ret = -EFAULT;
468			goto out;
469		}
470		kfree(notes);
471
 
472		buflen -= tsz;
473		*fpos += tsz;
474	}
475
476	/*
477	 * Check to see if our file offset matches with any of
478	 * the addresses in the elf_phdr on our list.
479	 */
480	start = kc_offset_to_vaddr(*fpos - data_offset);
481	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
482		tsz = buflen;
483
484	m = NULL;
485	while (buflen) {
486		struct page *page;
487		unsigned long pfn;
488		phys_addr_t phys;
489		void *__start;
490
491		/*
492		 * If this is the first iteration or the address is not within
493		 * the previous entry, search for a matching entry.
494		 */
495		if (!m || start < m->addr || start >= m->addr + m->size) {
496			struct kcore_list *pos;
497
498			m = NULL;
499			list_for_each_entry(pos, &kclist_head, list) {
500				if (start >= pos->addr &&
501				    start < pos->addr + pos->size) {
502					m = pos;
503					break;
504				}
505			}
506		}
507
508		if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
509			page_offline_thaw();
510			cond_resched();
511			page_offline_freeze();
512		}
513
514		if (!m) {
515			if (iov_iter_zero(tsz, iter) != tsz) {
516				ret = -EFAULT;
517				goto out;
518			}
519			goto skip;
520		}
521
522		switch (m->type) {
523		case KCORE_VMALLOC:
524		{
525			const char *src = (char *)start;
526			size_t read = 0, left = tsz;
527
528			/*
529			 * vmalloc uses spinlocks, so we optimistically try to
530			 * read memory. If this fails, fault pages in and try
531			 * again until we are done.
532			 */
533			while (true) {
534				read += vread_iter(iter, src, left);
535				if (read == tsz)
536					break;
537
538				src += read;
539				left -= read;
540
541				if (fault_in_iov_iter_writeable(iter, left)) {
542					ret = -EFAULT;
543					goto out;
544				}
545			}
546			break;
547		}
548		case KCORE_USER:
549			/* User page is handled prior to normal kernel page: */
550			if (copy_to_iter((char *)start, tsz, iter) != tsz) {
551				ret = -EFAULT;
552				goto out;
553			}
554			break;
555		case KCORE_RAM:
556			phys = __pa(start);
557			pfn =  phys >> PAGE_SHIFT;
558			page = pfn_to_online_page(pfn);
559
560			/*
561			 * Don't read offline sections, logically offline pages
562			 * (e.g., inflated in a balloon), hwpoisoned pages,
563			 * and explicitly excluded physical ranges.
564			 */
565			if (!page || PageOffline(page) ||
566			    is_page_hwpoison(page) || !pfn_is_ram(pfn) ||
567			    pfn_is_unaccepted_memory(pfn)) {
568				if (iov_iter_zero(tsz, iter) != tsz) {
569					ret = -EFAULT;
570					goto out;
571				}
572				break;
573			}
574			fallthrough;
575		case KCORE_VMEMMAP:
576		case KCORE_TEXT:
577			if (m->type == KCORE_RAM) {
578				__start = kc_xlate_dev_mem_ptr(phys);
579				if (!__start) {
580					ret = -ENOMEM;
581					if (iov_iter_zero(tsz, iter) != tsz)
582						ret = -EFAULT;
 
583					goto out;
584				}
585			} else {
586				__start = (void *)start;
587			}
588
589			/*
590			 * Sadly we must use a bounce buffer here to be able to
591			 * make use of copy_from_kernel_nofault(), as these
592			 * memory regions might not always be mapped on all
593			 * architectures.
594			 */
595			ret = copy_from_kernel_nofault(buf, __start, tsz);
596			if (m->type == KCORE_RAM)
597				kc_unxlate_dev_mem_ptr(phys, __start);
598			if (ret) {
599				if (iov_iter_zero(tsz, iter) != tsz) {
600					ret = -EFAULT;
601					goto out;
602				}
603				ret = 0;
604			/*
605			 * We know the bounce buffer is safe to copy from, so
606			 * use _copy_to_iter() directly.
607			 */
608			} else if (_copy_to_iter(buf, tsz, iter) != tsz) {
609				ret = -EFAULT;
610				goto out;
611			}
612			break;
613		default:
614			pr_warn_once("Unhandled KCORE type: %d\n", m->type);
615			if (iov_iter_zero(tsz, iter) != tsz) {
616				ret = -EFAULT;
617				goto out;
618			}
619		}
620skip:
621		buflen -= tsz;
622		*fpos += tsz;
 
623		start += tsz;
624		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
625	}
626
627out:
628	page_offline_thaw();
629	up_read(&kclist_lock);
630	if (ret)
631		return ret;
632	return orig_buflen - buflen;
633}
634
635static int open_kcore(struct inode *inode, struct file *filp)
636{
637	int ret = security_locked_down(LOCKDOWN_KCORE);
638
639	if (!capable(CAP_SYS_RAWIO))
640		return -EPERM;
641
642	if (ret)
643		return ret;
644
645	filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
646	if (!filp->private_data)
647		return -ENOMEM;
648
649	if (kcore_need_update)
650		kcore_update_ram();
651	if (i_size_read(inode) != proc_root_kcore->size) {
652		inode_lock(inode);
653		i_size_write(inode, proc_root_kcore->size);
654		inode_unlock(inode);
655	}
656	return 0;
657}
658
659static int release_kcore(struct inode *inode, struct file *file)
660{
661	kfree(file->private_data);
662	return 0;
663}
664
665static const struct proc_ops kcore_proc_ops = {
666	.proc_read_iter	= read_kcore_iter,
667	.proc_open	= open_kcore,
668	.proc_release	= release_kcore,
669	.proc_lseek	= default_llseek,
670};
671
672/* just remember that we have to update kcore */
673static int __meminit kcore_callback(struct notifier_block *self,
674				    unsigned long action, void *arg)
675{
676	switch (action) {
677	case MEM_ONLINE:
678	case MEM_OFFLINE:
679		kcore_need_update = 1;
680		break;
681	}
682	return NOTIFY_OK;
683}
684
685
686static struct kcore_list kcore_vmalloc;
687
688#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
689static struct kcore_list kcore_text;
690/*
691 * If defined, special segment is used for mapping kernel text instead of
692 * direct-map area. We need to create special TEXT section.
693 */
694static void __init proc_kcore_text_init(void)
695{
696	kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
697}
698#else
699static void __init proc_kcore_text_init(void)
700{
701}
702#endif
703
704#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
705/*
706 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
707 */
708static struct kcore_list kcore_modules;
709static void __init add_modules_range(void)
710{
711	if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
712		kclist_add(&kcore_modules, (void *)MODULES_VADDR,
713			MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
714	}
715}
716#else
717static void __init add_modules_range(void)
718{
719}
720#endif
721
722static int __init proc_kcore_init(void)
723{
724	proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
725	if (!proc_root_kcore) {
726		pr_err("couldn't create /proc/kcore\n");
727		return 0; /* Always returns 0. */
728	}
729	/* Store text area if it's special */
730	proc_kcore_text_init();
731	/* Store vmalloc area */
732	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
733		VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
734	add_modules_range();
735	/* Store direct-map area from physical memory map */
736	kcore_update_ram();
737	hotplug_memory_notifier(kcore_callback, DEFAULT_CALLBACK_PRI);
738
739	return 0;
740}
741fs_initcall(proc_kcore_init);