Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 *	fs/proc/kcore.c kernel ELF core dumper
  3 *
  4 *	Modelled on fs/exec.c:aout_core_dump()
  5 *	Jeremy Fitzhardinge <jeremy@sw.oz.au>
  6 *	ELF version written by David Howells <David.Howells@nexor.co.uk>
  7 *	Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
  8 *	Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
  9 *	Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
 10 */
 11
 12#include <linux/mm.h>
 13#include <linux/proc_fs.h>
 
 14#include <linux/user.h>
 15#include <linux/capability.h>
 16#include <linux/elf.h>
 17#include <linux/elfcore.h>
 
 18#include <linux/vmalloc.h>
 19#include <linux/highmem.h>
 
 20#include <linux/bootmem.h>
 21#include <linux/init.h>
 22#include <linux/slab.h>
 23#include <asm/uaccess.h>
 24#include <asm/io.h>
 25#include <linux/list.h>
 26#include <linux/ioport.h>
 27#include <linux/memory.h>
 
 28#include <asm/sections.h>
 
 29
 30#define CORE_STR "CORE"
 31
 32#ifndef ELF_CORE_EFLAGS
 33#define ELF_CORE_EFLAGS	0
 34#endif
 35
 36static struct proc_dir_entry *proc_root_kcore;
 37
 38
 39#ifndef kc_vaddr_to_offset
 40#define	kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
 41#endif
 42#ifndef	kc_offset_to_vaddr
 43#define	kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
 44#endif
 45
 46/* An ELF note in memory */
 47struct memelfnote
 48{
 49	const char *name;
 50	int type;
 51	unsigned int datasz;
 52	void *data;
 53};
 54
 55static LIST_HEAD(kclist_head);
 56static DEFINE_RWLOCK(kclist_lock);
 57static int kcore_need_update = 1;
 58
 59void
 60kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
 61{
 62	new->addr = (unsigned long)addr;
 63	new->size = size;
 64	new->type = type;
 65
 66	write_lock(&kclist_lock);
 67	list_add_tail(&new->list, &kclist_head);
 68	write_unlock(&kclist_lock);
 69}
 70
 71static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
 72{
 73	size_t try, size;
 74	struct kcore_list *m;
 75
 76	*nphdr = 1; /* PT_NOTE */
 77	size = 0;
 78
 79	list_for_each_entry(m, &kclist_head, list) {
 80		try = kc_vaddr_to_offset((size_t)m->addr + m->size);
 81		if (try > size)
 82			size = try;
 83		*nphdr = *nphdr + 1;
 84	}
 85	*elf_buflen =	sizeof(struct elfhdr) + 
 86			(*nphdr + 2)*sizeof(struct elf_phdr) + 
 87			3 * ((sizeof(struct elf_note)) +
 88			     roundup(sizeof(CORE_STR), 4)) +
 89			roundup(sizeof(struct elf_prstatus), 4) +
 90			roundup(sizeof(struct elf_prpsinfo), 4) +
 91			roundup(sizeof(struct task_struct), 4);
 92	*elf_buflen = PAGE_ALIGN(*elf_buflen);
 93	return size + *elf_buflen;
 94}
 95
 96static void free_kclist_ents(struct list_head *head)
 97{
 98	struct kcore_list *tmp, *pos;
 99
100	list_for_each_entry_safe(pos, tmp, head, list) {
101		list_del(&pos->list);
102		kfree(pos);
103	}
104}
105/*
106 * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
107 */
108static void __kcore_update_ram(struct list_head *list)
109{
110	int nphdr;
111	size_t size;
112	struct kcore_list *tmp, *pos;
113	LIST_HEAD(garbage);
114
115	write_lock(&kclist_lock);
116	if (kcore_need_update) {
117		list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
118			if (pos->type == KCORE_RAM
119				|| pos->type == KCORE_VMEMMAP)
120				list_move(&pos->list, &garbage);
121		}
122		list_splice_tail(list, &kclist_head);
123	} else
124		list_splice(list, &garbage);
125	kcore_need_update = 0;
126	proc_root_kcore->size = get_kcore_size(&nphdr, &size);
127	write_unlock(&kclist_lock);
128
129	free_kclist_ents(&garbage);
130}
131
132
133#ifdef CONFIG_HIGHMEM
134/*
135 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
136 * because memory hole is not as big as !HIGHMEM case.
137 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
138 */
139static int kcore_update_ram(void)
140{
141	LIST_HEAD(head);
142	struct kcore_list *ent;
143	int ret = 0;
144
145	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
146	if (!ent)
147		return -ENOMEM;
148	ent->addr = (unsigned long)__va(0);
149	ent->size = max_low_pfn << PAGE_SHIFT;
150	ent->type = KCORE_RAM;
151	list_add(&ent->list, &head);
152	__kcore_update_ram(&head);
153	return ret;
154}
155
156#else /* !CONFIG_HIGHMEM */
157
158#ifdef CONFIG_SPARSEMEM_VMEMMAP
159/* calculate vmemmap's address from given system ram pfn and register it */
160static int
161get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
162{
163	unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
164	unsigned long nr_pages = ent->size >> PAGE_SHIFT;
165	unsigned long start, end;
166	struct kcore_list *vmm, *tmp;
167
168
169	start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
170	end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
171	end = ALIGN(end, PAGE_SIZE);
172	/* overlap check (because we have to align page */
173	list_for_each_entry(tmp, head, list) {
174		if (tmp->type != KCORE_VMEMMAP)
175			continue;
176		if (start < tmp->addr + tmp->size)
177			if (end > tmp->addr)
178				end = tmp->addr;
179	}
180	if (start < end) {
181		vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
182		if (!vmm)
183			return 0;
184		vmm->addr = start;
185		vmm->size = end - start;
186		vmm->type = KCORE_VMEMMAP;
187		list_add_tail(&vmm->list, head);
188	}
189	return 1;
190
191}
192#else
193static int
194get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
195{
196	return 1;
197}
198
199#endif
200
201static int
202kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
203{
204	struct list_head *head = (struct list_head *)arg;
205	struct kcore_list *ent;
 
 
 
 
 
 
 
 
206
207	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
208	if (!ent)
209		return -ENOMEM;
210	ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
211	ent->size = nr_pages << PAGE_SHIFT;
212
213	/* Sanity check: Can happen in 32bit arch...maybe */
214	if (ent->addr < (unsigned long) __va(0))
215		goto free_out;
216
217	/* cut not-mapped area. ....from ppc-32 code. */
218	if (ULONG_MAX - ent->addr < ent->size)
219		ent->size = ULONG_MAX - ent->addr;
220
221	/* cut when vmalloc() area is higher than direct-map area */
222	if (VMALLOC_START > (unsigned long)__va(0)) {
223		if (ent->addr > VMALLOC_START)
224			goto free_out;
 
 
225		if (VMALLOC_START - ent->addr < ent->size)
226			ent->size = VMALLOC_START - ent->addr;
227	}
228
229	ent->type = KCORE_RAM;
230	list_add_tail(&ent->list, head);
231
232	if (!get_sparsemem_vmemmap_info(ent, head)) {
233		list_del(&ent->list);
234		goto free_out;
235	}
236
237	return 0;
238free_out:
239	kfree(ent);
240	return 1;
241}
242
243static int kcore_update_ram(void)
244{
245	int nid, ret;
246	unsigned long end_pfn;
247	LIST_HEAD(head);
248
249	/* Not inialized....update now */
250	/* find out "max pfn" */
251	end_pfn = 0;
252	for_each_node_state(nid, N_HIGH_MEMORY) {
253		unsigned long node_end;
254		node_end  = NODE_DATA(nid)->node_start_pfn +
255			NODE_DATA(nid)->node_spanned_pages;
256		if (end_pfn < node_end)
257			end_pfn = node_end;
258	}
259	/* scan 0 to max_pfn */
260	ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
261	if (ret) {
262		free_kclist_ents(&head);
263		return -ENOMEM;
264	}
265	__kcore_update_ram(&head);
266	return ret;
267}
268#endif /* CONFIG_HIGHMEM */
269
270/*****************************************************************************/
271/*
272 * determine size of ELF note
273 */
274static int notesize(struct memelfnote *en)
275{
276	int sz;
277
278	sz = sizeof(struct elf_note);
279	sz += roundup((strlen(en->name) + 1), 4);
280	sz += roundup(en->datasz, 4);
281
282	return sz;
283} /* end notesize() */
284
285/*****************************************************************************/
286/*
287 * store a note in the header buffer
288 */
289static char *storenote(struct memelfnote *men, char *bufp)
290{
291	struct elf_note en;
292
293#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
294
295	en.n_namesz = strlen(men->name) + 1;
296	en.n_descsz = men->datasz;
297	en.n_type = men->type;
298
299	DUMP_WRITE(&en, sizeof(en));
300	DUMP_WRITE(men->name, en.n_namesz);
301
302	/* XXX - cast from long long to long to avoid need for libgcc.a */
303	bufp = (char*) roundup((unsigned long)bufp,4);
304	DUMP_WRITE(men->data, men->datasz);
305	bufp = (char*) roundup((unsigned long)bufp,4);
306
307#undef DUMP_WRITE
308
309	return bufp;
310} /* end storenote() */
311
312/*
313 * store an ELF coredump header in the supplied buffer
314 * nphdr is the number of elf_phdr to insert
315 */
316static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
317{
318	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
319	struct elf_prpsinfo prpsinfo;	/* NT_PRPSINFO */
320	struct elf_phdr *nhdr, *phdr;
321	struct elfhdr *elf;
322	struct memelfnote notes[3];
323	off_t offset = 0;
324	struct kcore_list *m;
325
326	/* setup ELF header */
327	elf = (struct elfhdr *) bufp;
328	bufp += sizeof(struct elfhdr);
329	offset += sizeof(struct elfhdr);
330	memcpy(elf->e_ident, ELFMAG, SELFMAG);
331	elf->e_ident[EI_CLASS]	= ELF_CLASS;
332	elf->e_ident[EI_DATA]	= ELF_DATA;
333	elf->e_ident[EI_VERSION]= EV_CURRENT;
334	elf->e_ident[EI_OSABI] = ELF_OSABI;
335	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
336	elf->e_type	= ET_CORE;
337	elf->e_machine	= ELF_ARCH;
338	elf->e_version	= EV_CURRENT;
339	elf->e_entry	= 0;
340	elf->e_phoff	= sizeof(struct elfhdr);
341	elf->e_shoff	= 0;
342	elf->e_flags	= ELF_CORE_EFLAGS;
343	elf->e_ehsize	= sizeof(struct elfhdr);
344	elf->e_phentsize= sizeof(struct elf_phdr);
345	elf->e_phnum	= nphdr;
346	elf->e_shentsize= 0;
347	elf->e_shnum	= 0;
348	elf->e_shstrndx	= 0;
349
350	/* setup ELF PT_NOTE program header */
351	nhdr = (struct elf_phdr *) bufp;
352	bufp += sizeof(struct elf_phdr);
353	offset += sizeof(struct elf_phdr);
354	nhdr->p_type	= PT_NOTE;
355	nhdr->p_offset	= 0;
356	nhdr->p_vaddr	= 0;
357	nhdr->p_paddr	= 0;
358	nhdr->p_filesz	= 0;
359	nhdr->p_memsz	= 0;
360	nhdr->p_flags	= 0;
361	nhdr->p_align	= 0;
362
363	/* setup ELF PT_LOAD program header for every area */
364	list_for_each_entry(m, &kclist_head, list) {
365		phdr = (struct elf_phdr *) bufp;
366		bufp += sizeof(struct elf_phdr);
367		offset += sizeof(struct elf_phdr);
368
369		phdr->p_type	= PT_LOAD;
370		phdr->p_flags	= PF_R|PF_W|PF_X;
371		phdr->p_offset	= kc_vaddr_to_offset(m->addr) + dataoff;
372		phdr->p_vaddr	= (size_t)m->addr;
373		phdr->p_paddr	= 0;
 
 
 
374		phdr->p_filesz	= phdr->p_memsz	= m->size;
375		phdr->p_align	= PAGE_SIZE;
376	}
377
378	/*
379	 * Set up the notes in similar form to SVR4 core dumps made
380	 * with info from their /proc.
381	 */
382	nhdr->p_offset	= offset;
383
384	/* set up the process status */
385	notes[0].name = CORE_STR;
386	notes[0].type = NT_PRSTATUS;
387	notes[0].datasz = sizeof(struct elf_prstatus);
388	notes[0].data = &prstatus;
389
390	memset(&prstatus, 0, sizeof(struct elf_prstatus));
391
392	nhdr->p_filesz	= notesize(&notes[0]);
393	bufp = storenote(&notes[0], bufp);
394
395	/* set up the process info */
396	notes[1].name	= CORE_STR;
397	notes[1].type	= NT_PRPSINFO;
398	notes[1].datasz	= sizeof(struct elf_prpsinfo);
399	notes[1].data	= &prpsinfo;
400
401	memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
402	prpsinfo.pr_state	= 0;
403	prpsinfo.pr_sname	= 'R';
404	prpsinfo.pr_zomb	= 0;
405
406	strcpy(prpsinfo.pr_fname, "vmlinux");
407	strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
408
409	nhdr->p_filesz	+= notesize(&notes[1]);
410	bufp = storenote(&notes[1], bufp);
411
412	/* set up the task structure */
413	notes[2].name	= CORE_STR;
414	notes[2].type	= NT_TASKSTRUCT;
415	notes[2].datasz	= sizeof(struct task_struct);
416	notes[2].data	= current;
417
418	nhdr->p_filesz	+= notesize(&notes[2]);
419	bufp = storenote(&notes[2], bufp);
420
421} /* end elf_kcore_store_hdr() */
422
423/*****************************************************************************/
424/*
425 * read from the ELF header and then kernel memory
426 */
427static ssize_t
428read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
429{
 
430	ssize_t acc = 0;
431	size_t size, tsz;
432	size_t elf_buflen;
433	int nphdr;
434	unsigned long start;
435
436	read_lock(&kclist_lock);
437	size = get_kcore_size(&nphdr, &elf_buflen);
438
439	if (buflen == 0 || *fpos >= size) {
440		read_unlock(&kclist_lock);
441		return 0;
442	}
443
444	/* trim buflen to not go beyond EOF */
445	if (buflen > size - *fpos)
446		buflen = size - *fpos;
447
448	/* construct an ELF core header if we'll need some of it */
449	if (*fpos < elf_buflen) {
450		char * elf_buf;
451
452		tsz = elf_buflen - *fpos;
453		if (buflen < tsz)
454			tsz = buflen;
455		elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
456		if (!elf_buf) {
457			read_unlock(&kclist_lock);
458			return -ENOMEM;
459		}
460		elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
461		read_unlock(&kclist_lock);
462		if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
463			kfree(elf_buf);
464			return -EFAULT;
465		}
466		kfree(elf_buf);
467		buflen -= tsz;
468		*fpos += tsz;
469		buffer += tsz;
470		acc += tsz;
471
472		/* leave now if filled buffer already */
473		if (buflen == 0)
474			return acc;
475	} else
476		read_unlock(&kclist_lock);
477
478	/*
479	 * Check to see if our file offset matches with any of
480	 * the addresses in the elf_phdr on our list.
481	 */
482	start = kc_offset_to_vaddr(*fpos - elf_buflen);
483	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
484		tsz = buflen;
485		
486	while (buflen) {
487		struct kcore_list *m;
488
489		read_lock(&kclist_lock);
490		list_for_each_entry(m, &kclist_head, list) {
491			if (start >= m->addr && start < (m->addr+m->size))
492				break;
493		}
494		read_unlock(&kclist_lock);
495
496		if (&m->list == &kclist_head) {
497			if (clear_user(buffer, tsz))
498				return -EFAULT;
499		} else if (is_vmalloc_or_module_addr((void *)start)) {
500			char * elf_buf;
501
502			elf_buf = kzalloc(tsz, GFP_KERNEL);
503			if (!elf_buf)
504				return -ENOMEM;
505			vread(elf_buf, (char *)start, tsz);
506			/* we have to zero-fill user buffer even if no read */
507			if (copy_to_user(buffer, elf_buf, tsz)) {
508				kfree(elf_buf);
 
 
 
509				return -EFAULT;
510			}
511			kfree(elf_buf);
512		} else {
513			if (kern_addr_valid(start)) {
514				unsigned long n;
515
516				n = copy_to_user(buffer, (char *)start, tsz);
517				/*
518				 * We cannot distinguish between fault on source
519				 * and fault on destination. When this happens
520				 * we clear too and hope it will trigger the
521				 * EFAULT again.
522				 */
523				if (n) { 
524					if (clear_user(buffer + tsz - n,
525								n))
 
 
526						return -EFAULT;
527				}
528			} else {
529				if (clear_user(buffer, tsz))
530					return -EFAULT;
531			}
532		}
533		buflen -= tsz;
534		*fpos += tsz;
535		buffer += tsz;
536		acc += tsz;
537		start += tsz;
538		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
539	}
540
541	return acc;
542}
543
544
545static int open_kcore(struct inode *inode, struct file *filp)
546{
547	if (!capable(CAP_SYS_RAWIO))
548		return -EPERM;
 
 
 
 
 
549	if (kcore_need_update)
550		kcore_update_ram();
551	if (i_size_read(inode) != proc_root_kcore->size) {
552		mutex_lock(&inode->i_mutex);
553		i_size_write(inode, proc_root_kcore->size);
554		mutex_unlock(&inode->i_mutex);
555	}
556	return 0;
557}
558
 
 
 
 
 
559
560static const struct file_operations proc_kcore_operations = {
561	.read		= read_kcore,
562	.open		= open_kcore,
 
563	.llseek		= default_llseek,
564};
565
566#ifdef CONFIG_MEMORY_HOTPLUG
567/* just remember that we have to update kcore */
568static int __meminit kcore_callback(struct notifier_block *self,
569				    unsigned long action, void *arg)
570{
571	switch (action) {
572	case MEM_ONLINE:
573	case MEM_OFFLINE:
574		write_lock(&kclist_lock);
575		kcore_need_update = 1;
576		write_unlock(&kclist_lock);
577	}
578	return NOTIFY_OK;
579}
580#endif
581
 
 
 
 
582
583static struct kcore_list kcore_vmalloc;
584
585#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
586static struct kcore_list kcore_text;
587/*
588 * If defined, special segment is used for mapping kernel text instead of
589 * direct-map area. We need to create special TEXT section.
590 */
591static void __init proc_kcore_text_init(void)
592{
593	kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
594}
595#else
596static void __init proc_kcore_text_init(void)
597{
598}
599#endif
600
601#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
602/*
603 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
604 */
605struct kcore_list kcore_modules;
606static void __init add_modules_range(void)
607{
608	kclist_add(&kcore_modules, (void *)MODULES_VADDR,
 
609			MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
 
610}
611#else
612static void __init add_modules_range(void)
613{
614}
615#endif
616
617static int __init proc_kcore_init(void)
618{
619	proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
620				      &proc_kcore_operations);
621	if (!proc_root_kcore) {
622		printk(KERN_ERR "couldn't create /proc/kcore\n");
623		return 0; /* Always returns 0. */
624	}
625	/* Store text area if it's special */
626	proc_kcore_text_init();
627	/* Store vmalloc area */
628	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
629		VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
630	add_modules_range();
631	/* Store direct-map area from physical memory map */
632	kcore_update_ram();
633	hotplug_memory_notifier(kcore_callback, 0);
634
635	return 0;
636}
637module_init(proc_kcore_init);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	fs/proc/kcore.c kernel ELF core dumper
  4 *
  5 *	Modelled on fs/exec.c:aout_core_dump()
  6 *	Jeremy Fitzhardinge <jeremy@sw.oz.au>
  7 *	ELF version written by David Howells <David.Howells@nexor.co.uk>
  8 *	Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
  9 *	Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
 10 *	Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
 11 */
 12
 13#include <linux/mm.h>
 14#include <linux/proc_fs.h>
 15#include <linux/kcore.h>
 16#include <linux/user.h>
 17#include <linux/capability.h>
 18#include <linux/elf.h>
 19#include <linux/elfcore.h>
 20#include <linux/notifier.h>
 21#include <linux/vmalloc.h>
 22#include <linux/highmem.h>
 23#include <linux/printk.h>
 24#include <linux/bootmem.h>
 25#include <linux/init.h>
 26#include <linux/slab.h>
 27#include <linux/uaccess.h>
 28#include <asm/io.h>
 29#include <linux/list.h>
 30#include <linux/ioport.h>
 31#include <linux/memory.h>
 32#include <linux/sched/task.h>
 33#include <asm/sections.h>
 34#include "internal.h"
 35
 36#define CORE_STR "CORE"
 37
 38#ifndef ELF_CORE_EFLAGS
 39#define ELF_CORE_EFLAGS	0
 40#endif
 41
 42static struct proc_dir_entry *proc_root_kcore;
 43
 44
 45#ifndef kc_vaddr_to_offset
 46#define	kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
 47#endif
 48#ifndef	kc_offset_to_vaddr
 49#define	kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
 50#endif
 51
 52/* An ELF note in memory */
 53struct memelfnote
 54{
 55	const char *name;
 56	int type;
 57	unsigned int datasz;
 58	void *data;
 59};
 60
 61static LIST_HEAD(kclist_head);
 62static DEFINE_RWLOCK(kclist_lock);
 63static int kcore_need_update = 1;
 64
 65void
 66kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
 67{
 68	new->addr = (unsigned long)addr;
 69	new->size = size;
 70	new->type = type;
 71
 72	write_lock(&kclist_lock);
 73	list_add_tail(&new->list, &kclist_head);
 74	write_unlock(&kclist_lock);
 75}
 76
 77static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
 78{
 79	size_t try, size;
 80	struct kcore_list *m;
 81
 82	*nphdr = 1; /* PT_NOTE */
 83	size = 0;
 84
 85	list_for_each_entry(m, &kclist_head, list) {
 86		try = kc_vaddr_to_offset((size_t)m->addr + m->size);
 87		if (try > size)
 88			size = try;
 89		*nphdr = *nphdr + 1;
 90	}
 91	*elf_buflen =	sizeof(struct elfhdr) + 
 92			(*nphdr + 2)*sizeof(struct elf_phdr) + 
 93			3 * ((sizeof(struct elf_note)) +
 94			     roundup(sizeof(CORE_STR), 4)) +
 95			roundup(sizeof(struct elf_prstatus), 4) +
 96			roundup(sizeof(struct elf_prpsinfo), 4) +
 97			roundup(arch_task_struct_size, 4);
 98	*elf_buflen = PAGE_ALIGN(*elf_buflen);
 99	return size + *elf_buflen;
100}
101
102static void free_kclist_ents(struct list_head *head)
103{
104	struct kcore_list *tmp, *pos;
105
106	list_for_each_entry_safe(pos, tmp, head, list) {
107		list_del(&pos->list);
108		kfree(pos);
109	}
110}
111/*
112 * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
113 */
114static void __kcore_update_ram(struct list_head *list)
115{
116	int nphdr;
117	size_t size;
118	struct kcore_list *tmp, *pos;
119	LIST_HEAD(garbage);
120
121	write_lock(&kclist_lock);
122	if (kcore_need_update) {
123		list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
124			if (pos->type == KCORE_RAM
125				|| pos->type == KCORE_VMEMMAP)
126				list_move(&pos->list, &garbage);
127		}
128		list_splice_tail(list, &kclist_head);
129	} else
130		list_splice(list, &garbage);
131	kcore_need_update = 0;
132	proc_root_kcore->size = get_kcore_size(&nphdr, &size);
133	write_unlock(&kclist_lock);
134
135	free_kclist_ents(&garbage);
136}
137
138
139#ifdef CONFIG_HIGHMEM
140/*
141 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
142 * because memory hole is not as big as !HIGHMEM case.
143 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
144 */
145static int kcore_update_ram(void)
146{
147	LIST_HEAD(head);
148	struct kcore_list *ent;
149	int ret = 0;
150
151	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
152	if (!ent)
153		return -ENOMEM;
154	ent->addr = (unsigned long)__va(0);
155	ent->size = max_low_pfn << PAGE_SHIFT;
156	ent->type = KCORE_RAM;
157	list_add(&ent->list, &head);
158	__kcore_update_ram(&head);
159	return ret;
160}
161
162#else /* !CONFIG_HIGHMEM */
163
164#ifdef CONFIG_SPARSEMEM_VMEMMAP
165/* calculate vmemmap's address from given system ram pfn and register it */
166static int
167get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
168{
169	unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
170	unsigned long nr_pages = ent->size >> PAGE_SHIFT;
171	unsigned long start, end;
172	struct kcore_list *vmm, *tmp;
173
174
175	start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
176	end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
177	end = PAGE_ALIGN(end);
178	/* overlap check (because we have to align page */
179	list_for_each_entry(tmp, head, list) {
180		if (tmp->type != KCORE_VMEMMAP)
181			continue;
182		if (start < tmp->addr + tmp->size)
183			if (end > tmp->addr)
184				end = tmp->addr;
185	}
186	if (start < end) {
187		vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
188		if (!vmm)
189			return 0;
190		vmm->addr = start;
191		vmm->size = end - start;
192		vmm->type = KCORE_VMEMMAP;
193		list_add_tail(&vmm->list, head);
194	}
195	return 1;
196
197}
198#else
199static int
200get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
201{
202	return 1;
203}
204
205#endif
206
207static int
208kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
209{
210	struct list_head *head = (struct list_head *)arg;
211	struct kcore_list *ent;
212	struct page *p;
213
214	if (!pfn_valid(pfn))
215		return 1;
216
217	p = pfn_to_page(pfn);
218	if (!memmap_valid_within(pfn, p, page_zone(p)))
219		return 1;
220
221	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
222	if (!ent)
223		return -ENOMEM;
224	ent->addr = (unsigned long)page_to_virt(p);
225	ent->size = nr_pages << PAGE_SHIFT;
226
227	if (!virt_addr_valid(ent->addr))
 
228		goto free_out;
229
230	/* cut not-mapped area. ....from ppc-32 code. */
231	if (ULONG_MAX - ent->addr < ent->size)
232		ent->size = ULONG_MAX - ent->addr;
233
234	/*
235	 * We've already checked virt_addr_valid so we know this address
236	 * is a valid pointer, therefore we can check against it to determine
237	 * if we need to trim
238	 */
239	if (VMALLOC_START > ent->addr) {
240		if (VMALLOC_START - ent->addr < ent->size)
241			ent->size = VMALLOC_START - ent->addr;
242	}
243
244	ent->type = KCORE_RAM;
245	list_add_tail(&ent->list, head);
246
247	if (!get_sparsemem_vmemmap_info(ent, head)) {
248		list_del(&ent->list);
249		goto free_out;
250	}
251
252	return 0;
253free_out:
254	kfree(ent);
255	return 1;
256}
257
258static int kcore_update_ram(void)
259{
260	int nid, ret;
261	unsigned long end_pfn;
262	LIST_HEAD(head);
263
264	/* Not inialized....update now */
265	/* find out "max pfn" */
266	end_pfn = 0;
267	for_each_node_state(nid, N_MEMORY) {
268		unsigned long node_end;
269		node_end = node_end_pfn(nid);
 
270		if (end_pfn < node_end)
271			end_pfn = node_end;
272	}
273	/* scan 0 to max_pfn */
274	ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
275	if (ret) {
276		free_kclist_ents(&head);
277		return -ENOMEM;
278	}
279	__kcore_update_ram(&head);
280	return ret;
281}
282#endif /* CONFIG_HIGHMEM */
283
284/*****************************************************************************/
285/*
286 * determine size of ELF note
287 */
288static int notesize(struct memelfnote *en)
289{
290	int sz;
291
292	sz = sizeof(struct elf_note);
293	sz += roundup((strlen(en->name) + 1), 4);
294	sz += roundup(en->datasz, 4);
295
296	return sz;
297} /* end notesize() */
298
299/*****************************************************************************/
300/*
301 * store a note in the header buffer
302 */
303static char *storenote(struct memelfnote *men, char *bufp)
304{
305	struct elf_note en;
306
307#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
308
309	en.n_namesz = strlen(men->name) + 1;
310	en.n_descsz = men->datasz;
311	en.n_type = men->type;
312
313	DUMP_WRITE(&en, sizeof(en));
314	DUMP_WRITE(men->name, en.n_namesz);
315
316	/* XXX - cast from long long to long to avoid need for libgcc.a */
317	bufp = (char*) roundup((unsigned long)bufp,4);
318	DUMP_WRITE(men->data, men->datasz);
319	bufp = (char*) roundup((unsigned long)bufp,4);
320
321#undef DUMP_WRITE
322
323	return bufp;
324} /* end storenote() */
325
326/*
327 * store an ELF coredump header in the supplied buffer
328 * nphdr is the number of elf_phdr to insert
329 */
330static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
331{
332	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
333	struct elf_prpsinfo prpsinfo;	/* NT_PRPSINFO */
334	struct elf_phdr *nhdr, *phdr;
335	struct elfhdr *elf;
336	struct memelfnote notes[3];
337	off_t offset = 0;
338	struct kcore_list *m;
339
340	/* setup ELF header */
341	elf = (struct elfhdr *) bufp;
342	bufp += sizeof(struct elfhdr);
343	offset += sizeof(struct elfhdr);
344	memcpy(elf->e_ident, ELFMAG, SELFMAG);
345	elf->e_ident[EI_CLASS]	= ELF_CLASS;
346	elf->e_ident[EI_DATA]	= ELF_DATA;
347	elf->e_ident[EI_VERSION]= EV_CURRENT;
348	elf->e_ident[EI_OSABI] = ELF_OSABI;
349	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
350	elf->e_type	= ET_CORE;
351	elf->e_machine	= ELF_ARCH;
352	elf->e_version	= EV_CURRENT;
353	elf->e_entry	= 0;
354	elf->e_phoff	= sizeof(struct elfhdr);
355	elf->e_shoff	= 0;
356	elf->e_flags	= ELF_CORE_EFLAGS;
357	elf->e_ehsize	= sizeof(struct elfhdr);
358	elf->e_phentsize= sizeof(struct elf_phdr);
359	elf->e_phnum	= nphdr;
360	elf->e_shentsize= 0;
361	elf->e_shnum	= 0;
362	elf->e_shstrndx	= 0;
363
364	/* setup ELF PT_NOTE program header */
365	nhdr = (struct elf_phdr *) bufp;
366	bufp += sizeof(struct elf_phdr);
367	offset += sizeof(struct elf_phdr);
368	nhdr->p_type	= PT_NOTE;
369	nhdr->p_offset	= 0;
370	nhdr->p_vaddr	= 0;
371	nhdr->p_paddr	= 0;
372	nhdr->p_filesz	= 0;
373	nhdr->p_memsz	= 0;
374	nhdr->p_flags	= 0;
375	nhdr->p_align	= 0;
376
377	/* setup ELF PT_LOAD program header for every area */
378	list_for_each_entry(m, &kclist_head, list) {
379		phdr = (struct elf_phdr *) bufp;
380		bufp += sizeof(struct elf_phdr);
381		offset += sizeof(struct elf_phdr);
382
383		phdr->p_type	= PT_LOAD;
384		phdr->p_flags	= PF_R|PF_W|PF_X;
385		phdr->p_offset	= kc_vaddr_to_offset(m->addr) + dataoff;
386		phdr->p_vaddr	= (size_t)m->addr;
387		if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
388			phdr->p_paddr	= __pa(m->addr);
389		else
390			phdr->p_paddr	= (elf_addr_t)-1;
391		phdr->p_filesz	= phdr->p_memsz	= m->size;
392		phdr->p_align	= PAGE_SIZE;
393	}
394
395	/*
396	 * Set up the notes in similar form to SVR4 core dumps made
397	 * with info from their /proc.
398	 */
399	nhdr->p_offset	= offset;
400
401	/* set up the process status */
402	notes[0].name = CORE_STR;
403	notes[0].type = NT_PRSTATUS;
404	notes[0].datasz = sizeof(struct elf_prstatus);
405	notes[0].data = &prstatus;
406
407	memset(&prstatus, 0, sizeof(struct elf_prstatus));
408
409	nhdr->p_filesz	= notesize(&notes[0]);
410	bufp = storenote(&notes[0], bufp);
411
412	/* set up the process info */
413	notes[1].name	= CORE_STR;
414	notes[1].type	= NT_PRPSINFO;
415	notes[1].datasz	= sizeof(struct elf_prpsinfo);
416	notes[1].data	= &prpsinfo;
417
418	memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
419	prpsinfo.pr_state	= 0;
420	prpsinfo.pr_sname	= 'R';
421	prpsinfo.pr_zomb	= 0;
422
423	strcpy(prpsinfo.pr_fname, "vmlinux");
424	strlcpy(prpsinfo.pr_psargs, saved_command_line, sizeof(prpsinfo.pr_psargs));
425
426	nhdr->p_filesz	+= notesize(&notes[1]);
427	bufp = storenote(&notes[1], bufp);
428
429	/* set up the task structure */
430	notes[2].name	= CORE_STR;
431	notes[2].type	= NT_TASKSTRUCT;
432	notes[2].datasz	= arch_task_struct_size;
433	notes[2].data	= current;
434
435	nhdr->p_filesz	+= notesize(&notes[2]);
436	bufp = storenote(&notes[2], bufp);
437
438} /* end elf_kcore_store_hdr() */
439
440/*****************************************************************************/
441/*
442 * read from the ELF header and then kernel memory
443 */
444static ssize_t
445read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
446{
447	char *buf = file->private_data;
448	ssize_t acc = 0;
449	size_t size, tsz;
450	size_t elf_buflen;
451	int nphdr;
452	unsigned long start;
453
454	read_lock(&kclist_lock);
455	size = get_kcore_size(&nphdr, &elf_buflen);
456
457	if (buflen == 0 || *fpos >= size) {
458		read_unlock(&kclist_lock);
459		return 0;
460	}
461
462	/* trim buflen to not go beyond EOF */
463	if (buflen > size - *fpos)
464		buflen = size - *fpos;
465
466	/* construct an ELF core header if we'll need some of it */
467	if (*fpos < elf_buflen) {
468		char * elf_buf;
469
470		tsz = elf_buflen - *fpos;
471		if (buflen < tsz)
472			tsz = buflen;
473		elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
474		if (!elf_buf) {
475			read_unlock(&kclist_lock);
476			return -ENOMEM;
477		}
478		elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
479		read_unlock(&kclist_lock);
480		if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
481			kfree(elf_buf);
482			return -EFAULT;
483		}
484		kfree(elf_buf);
485		buflen -= tsz;
486		*fpos += tsz;
487		buffer += tsz;
488		acc += tsz;
489
490		/* leave now if filled buffer already */
491		if (buflen == 0)
492			return acc;
493	} else
494		read_unlock(&kclist_lock);
495
496	/*
497	 * Check to see if our file offset matches with any of
498	 * the addresses in the elf_phdr on our list.
499	 */
500	start = kc_offset_to_vaddr(*fpos - elf_buflen);
501	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
502		tsz = buflen;
503		
504	while (buflen) {
505		struct kcore_list *m;
506
507		read_lock(&kclist_lock);
508		list_for_each_entry(m, &kclist_head, list) {
509			if (start >= m->addr && start < (m->addr+m->size))
510				break;
511		}
512		read_unlock(&kclist_lock);
513
514		if (&m->list == &kclist_head) {
515			if (clear_user(buffer, tsz))
516				return -EFAULT;
517		} else if (m->type == KCORE_VMALLOC) {
518			vread(buf, (char *)start, tsz);
 
 
 
 
 
519			/* we have to zero-fill user buffer even if no read */
520			if (copy_to_user(buffer, buf, tsz))
521				return -EFAULT;
522		} else if (m->type == KCORE_USER) {
523			/* User page is handled prior to normal kernel page: */
524			if (copy_to_user(buffer, (char *)start, tsz))
525				return -EFAULT;
 
 
526		} else {
527			if (kern_addr_valid(start)) {
 
 
 
528				/*
529				 * Using bounce buffer to bypass the
530				 * hardened user copy kernel text checks.
 
 
531				 */
532				if (probe_kernel_read(buf, (void *) start, tsz)) {
533					if (clear_user(buffer, tsz))
534						return -EFAULT;
535				} else {
536					if (copy_to_user(buffer, buf, tsz))
537						return -EFAULT;
538				}
539			} else {
540				if (clear_user(buffer, tsz))
541					return -EFAULT;
542			}
543		}
544		buflen -= tsz;
545		*fpos += tsz;
546		buffer += tsz;
547		acc += tsz;
548		start += tsz;
549		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
550	}
551
552	return acc;
553}
554
555
556static int open_kcore(struct inode *inode, struct file *filp)
557{
558	if (!capable(CAP_SYS_RAWIO))
559		return -EPERM;
560
561	filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
562	if (!filp->private_data)
563		return -ENOMEM;
564
565	if (kcore_need_update)
566		kcore_update_ram();
567	if (i_size_read(inode) != proc_root_kcore->size) {
568		inode_lock(inode);
569		i_size_write(inode, proc_root_kcore->size);
570		inode_unlock(inode);
571	}
572	return 0;
573}
574
575static int release_kcore(struct inode *inode, struct file *file)
576{
577	kfree(file->private_data);
578	return 0;
579}
580
581static const struct file_operations proc_kcore_operations = {
582	.read		= read_kcore,
583	.open		= open_kcore,
584	.release	= release_kcore,
585	.llseek		= default_llseek,
586};
587
 
588/* just remember that we have to update kcore */
589static int __meminit kcore_callback(struct notifier_block *self,
590				    unsigned long action, void *arg)
591{
592	switch (action) {
593	case MEM_ONLINE:
594	case MEM_OFFLINE:
595		write_lock(&kclist_lock);
596		kcore_need_update = 1;
597		write_unlock(&kclist_lock);
598	}
599	return NOTIFY_OK;
600}
 
601
602static struct notifier_block kcore_callback_nb __meminitdata = {
603	.notifier_call = kcore_callback,
604	.priority = 0,
605};
606
607static struct kcore_list kcore_vmalloc;
608
609#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
610static struct kcore_list kcore_text;
611/*
612 * If defined, special segment is used for mapping kernel text instead of
613 * direct-map area. We need to create special TEXT section.
614 */
615static void __init proc_kcore_text_init(void)
616{
617	kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
618}
619#else
620static void __init proc_kcore_text_init(void)
621{
622}
623#endif
624
625#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
626/*
627 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
628 */
629struct kcore_list kcore_modules;
630static void __init add_modules_range(void)
631{
632	if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
633		kclist_add(&kcore_modules, (void *)MODULES_VADDR,
634			MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
635	}
636}
637#else
638static void __init add_modules_range(void)
639{
640}
641#endif
642
643static int __init proc_kcore_init(void)
644{
645	proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
646				      &proc_kcore_operations);
647	if (!proc_root_kcore) {
648		pr_err("couldn't create /proc/kcore\n");
649		return 0; /* Always returns 0. */
650	}
651	/* Store text area if it's special */
652	proc_kcore_text_init();
653	/* Store vmalloc area */
654	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
655		VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
656	add_modules_range();
657	/* Store direct-map area from physical memory map */
658	kcore_update_ram();
659	register_hotmemory_notifier(&kcore_callback_nb);
660
661	return 0;
662}
663fs_initcall(proc_kcore_init);