Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * zcore module to export memory content and register sets for creating system
  3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
  4 * dump format as s390 standalone dumps.
  5 *
  6 * For more information please refer to Documentation/s390/zfcpdump.txt
  7 *
  8 * Copyright IBM Corp. 2003,2008
  9 * Author(s): Michael Holzheu
 10 */
 11
 12#define KMSG_COMPONENT "zdump"
 13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 14
 15#include <linux/init.h>
 16#include <linux/slab.h>
 17#include <linux/miscdevice.h>
 18#include <linux/debugfs.h>
 
 
 
 
 19#include <asm/asm-offsets.h>
 20#include <asm/ipl.h>
 21#include <asm/sclp.h>
 22#include <asm/setup.h>
 23#include <asm/sigp.h>
 24#include <asm/uaccess.h>
 25#include <asm/debug.h>
 26#include <asm/processor.h>
 27#include <asm/irqflags.h>
 28#include <asm/checksum.h>
 
 
 29#include "sclp.h"
 30
 31#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
 32
 33#define TO_USER		0
 34#define TO_KERNEL	1
 35#define CHUNK_INFO_SIZE	34 /* 2 16-byte char, each followed by blank */
 36
 37enum arch_id {
 38	ARCH_S390	= 0,
 39	ARCH_S390X	= 1,
 40};
 41
 42/* dump system info */
 43
 44struct sys_info {
 45	enum arch_id	 arch;
 46	unsigned long	 sa_base;
 47	u32		 sa_size;
 48	int		 cpu_map[NR_CPUS];
 49	unsigned long	 mem_size;
 50	struct save_area lc_mask;
 51};
 52
 53struct ipib_info {
 54	unsigned long	ipib;
 55	u32		checksum;
 56}  __attribute__((packed));
 57
 58static struct sys_info sys_info;
 59static struct debug_info *zcore_dbf;
 60static int hsa_available;
 61static struct dentry *zcore_dir;
 62static struct dentry *zcore_file;
 63static struct dentry *zcore_memmap_file;
 64static struct dentry *zcore_reipl_file;
 65static struct ipl_parameter_block *ipl_block;
 
 
 
 
 
 66
 67/*
 68 * Copy memory from HSA to kernel or user memory (not reentrant):
 69 *
 70 * @dest:  Kernel or user buffer where memory should be copied to
 71 * @src:   Start address within HSA where data should be copied
 72 * @count: Size of buffer, which should be copied
 73 * @mode:  Either TO_KERNEL or TO_USER
 74 */
 75static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
 76{
 77	int offs, blk_num;
 78	static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
 79
 80	if (count == 0)
 81		return 0;
 82
 83	/* copy first block */
 84	offs = 0;
 85	if ((src % PAGE_SIZE) != 0) {
 86		blk_num = src / PAGE_SIZE + 2;
 87		if (sclp_sdias_copy(buf, blk_num, 1)) {
 88			TRACE("sclp_sdias_copy() failed\n");
 89			return -EIO;
 90		}
 91		offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
 92		if (mode == TO_USER) {
 93			if (copy_to_user((__force __user void*) dest,
 94					 buf + (src % PAGE_SIZE), offs))
 95				return -EFAULT;
 96		} else
 97			memcpy(dest, buf + (src % PAGE_SIZE), offs);
 98	}
 99	if (offs == count)
100		goto out;
101
102	/* copy middle */
103	for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
104		blk_num = (src + offs) / PAGE_SIZE + 2;
105		if (sclp_sdias_copy(buf, blk_num, 1)) {
106			TRACE("sclp_sdias_copy() failed\n");
107			return -EIO;
108		}
109		if (mode == TO_USER) {
110			if (copy_to_user((__force __user void*) dest + offs,
111					 buf, PAGE_SIZE))
112				return -EFAULT;
113		} else
114			memcpy(dest + offs, buf, PAGE_SIZE);
115	}
116	if (offs == count)
117		goto out;
118
119	/* copy last block */
120	blk_num = (src + offs) / PAGE_SIZE + 2;
121	if (sclp_sdias_copy(buf, blk_num, 1)) {
122		TRACE("sclp_sdias_copy() failed\n");
123		return -EIO;
124	}
125	if (mode == TO_USER) {
126		if (copy_to_user((__force __user void*) dest + offs, buf,
127				 PAGE_SIZE))
128			return -EFAULT;
129	} else
130		memcpy(dest + offs, buf, count - offs);
131out:
132	return 0;
133}
134
135static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
136{
137	return memcpy_hsa((void __force *) dest, src, count, TO_USER);
138}
139
140static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
141{
142	return memcpy_hsa(dest, src, count, TO_KERNEL);
143}
144
145static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
 
 
146{
147	static char buf[4096];
148	int offs = 0, size;
149
150	while (offs < count) {
151		size = min(sizeof(buf), count - offs);
152		if (memcpy_real(buf, (void *) src + offs, size))
153			return -EFAULT;
154		if (copy_to_user(dest + offs, buf, size))
155			return -EFAULT;
156		offs += size;
157	}
158	return 0;
159}
160
161static int __init init_cpu_info(enum arch_id arch)
162{
163	struct save_area *sa;
164
165	/* get info for boot cpu from lowcore, stored in the HSA */
166
167	sa = kmalloc(sizeof(*sa), GFP_KERNEL);
168	if (!sa)
169		return -ENOMEM;
170	if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
171		TRACE("could not copy from HSA\n");
172		kfree(sa);
173		return -EIO;
174	}
175	zfcpdump_save_areas[0] = sa;
176	return 0;
177}
178
179static DEFINE_MUTEX(zcore_mutex);
180
181#define DUMP_VERSION	0x5
182#define DUMP_MAGIC	0xa8190173618f23fdULL
183#define DUMP_ARCH_S390X	2
184#define DUMP_ARCH_S390	1
185#define HEADER_SIZE	4096
186
187/* dump header dumped according to s390 crash dump format */
188
189struct zcore_header {
190	u64 magic;
191	u32 version;
192	u32 header_size;
193	u32 dump_level;
194	u32 page_size;
195	u64 mem_size;
196	u64 mem_start;
197	u64 mem_end;
198	u32 num_pages;
199	u32 pad1;
200	u64 tod;
201	struct cpuid cpu_id;
202	u32 arch_id;
203	u32 volnr;
204	u32 build_arch;
205	u64 rmem_size;
206	u8 mvdump;
207	u16 cpu_cnt;
208	u16 real_cpu_cnt;
209	u8 end_pad1[0x200-0x061];
210	u64 mvdump_sign;
211	u64 mvdump_zipl_time;
212	u8 end_pad2[0x800-0x210];
213	u32 lc_vec[512];
214} __attribute__((packed,__aligned__(16)));
215
216static struct zcore_header zcore_header = {
217	.magic		= DUMP_MAGIC,
218	.version	= DUMP_VERSION,
219	.header_size	= 4096,
220	.dump_level	= 0,
221	.page_size	= PAGE_SIZE,
222	.mem_start	= 0,
223#ifdef CONFIG_64BIT
224	.build_arch	= DUMP_ARCH_S390X,
225#else
226	.build_arch	= DUMP_ARCH_S390,
227#endif
228};
229
230/*
231 * Copy lowcore info to buffer. Use map in order to copy only register parts.
232 *
233 * @buf:    User buffer
234 * @sa:     Pointer to save area
235 * @sa_off: Offset in save area to copy
236 * @len:    Number of bytes to copy
237 */
238static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
239{
240	int i;
241	char *lc_mask = (char*)&sys_info.lc_mask;
242
243	for (i = 0; i < len; i++) {
244		if (!lc_mask[i + sa_off])
245			continue;
246		if (copy_to_user(buf + i, sa + sa_off + i, 1))
247			return -EFAULT;
248	}
249	return 0;
250}
251
252/*
253 * Copy lowcores info to memory, if necessary
254 *
255 * @buf:   User buffer
256 * @addr:  Start address of buffer in dump memory
257 * @count: Size of buffer
258 */
259static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
260{
261	unsigned long end;
262	int i = 0;
263
264	if (count == 0)
265		return 0;
266
267	end = start + count;
268	while (zfcpdump_save_areas[i]) {
269		unsigned long cp_start, cp_end; /* copy range */
270		unsigned long sa_start, sa_end; /* save area range */
271		unsigned long prefix;
272		unsigned long sa_off, len, buf_off;
273
274		prefix = zfcpdump_save_areas[i]->pref_reg;
275		sa_start = prefix + sys_info.sa_base;
276		sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
277
278		if ((end < sa_start) || (start > sa_end))
279			goto next;
280		cp_start = max(start, sa_start);
281		cp_end = min(end, sa_end);
282
283		buf_off = cp_start - start;
284		sa_off = cp_start - sa_start;
285		len = cp_end - cp_start;
286
287		TRACE("copy_lc for: %lx\n", start);
288		if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
289			return -EFAULT;
290next:
291		i++;
292	}
293	return 0;
294}
295
296/*
297 * Read routine for zcore character device
298 * First 4K are dump header
299 * Next 32MB are HSA Memory
300 * Rest is read from absolute Memory
301 */
302static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
303			  loff_t *ppos)
304{
305	unsigned long mem_start; /* Start address in memory */
306	size_t mem_offs;	 /* Offset in dump memory */
307	size_t hdr_count;	 /* Size of header part of output buffer */
308	size_t size;
309	int rc;
310
311	mutex_lock(&zcore_mutex);
312
313	if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
314		rc = -EINVAL;
315		goto fail;
316	}
317
318	count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
319
320	/* Copy dump header */
321	if (*ppos < HEADER_SIZE) {
322		size = min(count, (size_t) (HEADER_SIZE - *ppos));
323		if (copy_to_user(buf, &zcore_header + *ppos, size)) {
324			rc = -EFAULT;
325			goto fail;
326		}
327		hdr_count = size;
328		mem_start = 0;
329	} else {
330		hdr_count = 0;
331		mem_start = *ppos - HEADER_SIZE;
332	}
333
334	mem_offs = 0;
335
336	/* Copy from HSA data */
337	if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
338		size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
339			   - mem_start));
340		rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
341		if (rc)
342			goto fail;
343
344		mem_offs += size;
345	}
346
347	/* Copy from real mem */
348	size = count - mem_offs - hdr_count;
349	rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
350			      size);
351	if (rc)
352		goto fail;
353
354	/*
355	 * Since s390 dump analysis tools like lcrash or crash
356	 * expect register sets in the prefix pages of the cpus,
357	 * we copy them into the read buffer, if necessary.
358	 * buf + hdr_count: Start of memory part of output buffer
359	 * mem_start: Start memory address to copy from
360	 * count - hdr_count: Size of memory area to copy
361	 */
362	if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
363		rc = -EFAULT;
364		goto fail;
365	}
366	*ppos += count;
367fail:
368	mutex_unlock(&zcore_mutex);
369	return (rc < 0) ? rc : count;
370}
371
372static int zcore_open(struct inode *inode, struct file *filp)
373{
374	if (!hsa_available)
375		return -ENODATA;
376	else
377		return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
378}
379
380static int zcore_release(struct inode *inode, struct file *filep)
381{
382	diag308(DIAG308_REL_HSA, NULL);
383	hsa_available = 0;
384	return 0;
385}
386
387static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
388{
389	loff_t rc;
390
391	mutex_lock(&zcore_mutex);
392	switch (orig) {
393	case 0:
394		file->f_pos = offset;
395		rc = file->f_pos;
396		break;
397	case 1:
398		file->f_pos += offset;
399		rc = file->f_pos;
400		break;
401	default:
402		rc = -EINVAL;
403	}
404	mutex_unlock(&zcore_mutex);
405	return rc;
406}
407
408static const struct file_operations zcore_fops = {
409	.owner		= THIS_MODULE,
410	.llseek		= zcore_lseek,
411	.read		= zcore_read,
412	.open		= zcore_open,
413	.release	= zcore_release,
414};
415
416static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
417				 size_t count, loff_t *ppos)
418{
419	return simple_read_from_buffer(buf, count, ppos, filp->private_data,
420				       MEMORY_CHUNKS * CHUNK_INFO_SIZE);
421}
422
423static int zcore_memmap_open(struct inode *inode, struct file *filp)
424{
425	int i;
426	char *buf;
427	struct mem_chunk *chunk_array;
428
429	chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
430			      GFP_KERNEL);
431	if (!chunk_array)
432		return -ENOMEM;
433	detect_memory_layout(chunk_array);
434	buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
435	if (!buf) {
436		kfree(chunk_array);
437		return -ENOMEM;
438	}
439	for (i = 0; i < MEMORY_CHUNKS; i++) {
440		sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ",
441			(unsigned long long) chunk_array[i].addr,
442			(unsigned long long) chunk_array[i].size);
443		if (chunk_array[i].size == 0)
444			break;
445	}
446	kfree(chunk_array);
447	filp->private_data = buf;
448	return nonseekable_open(inode, filp);
449}
450
451static int zcore_memmap_release(struct inode *inode, struct file *filp)
452{
453	kfree(filp->private_data);
454	return 0;
455}
456
457static const struct file_operations zcore_memmap_fops = {
458	.owner		= THIS_MODULE,
459	.read		= zcore_memmap_read,
460	.open		= zcore_memmap_open,
461	.release	= zcore_memmap_release,
462	.llseek		= no_llseek,
463};
464
465static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
466				 size_t count, loff_t *ppos)
467{
468	if (ipl_block) {
469		diag308(DIAG308_SET, ipl_block);
470		diag308(DIAG308_IPL, NULL);
 
 
 
 
 
 
471	}
472	return count;
473}
474
475static int zcore_reipl_open(struct inode *inode, struct file *filp)
476{
477	return nonseekable_open(inode, filp);
478}
479
480static int zcore_reipl_release(struct inode *inode, struct file *filp)
481{
482	return 0;
483}
484
485static const struct file_operations zcore_reipl_fops = {
486	.owner		= THIS_MODULE,
487	.write		= zcore_reipl_write,
488	.open		= zcore_reipl_open,
489	.release	= zcore_reipl_release,
490	.llseek		= no_llseek,
491};
492
493#ifdef CONFIG_32BIT
494
495static void __init set_lc_mask(struct save_area *map)
496{
497	memset(&map->ext_save, 0xff, sizeof(map->ext_save));
498	memset(&map->timer, 0xff, sizeof(map->timer));
499	memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
500	memset(&map->psw, 0xff, sizeof(map->psw));
501	memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
502	memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
503	memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
504	memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
505	memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
506}
507
508#else /* CONFIG_32BIT */
509
510static void __init set_lc_mask(struct save_area *map)
511{
512	memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
513	memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
514	memset(&map->psw, 0xff, sizeof(map->psw));
515	memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
516	memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
517	memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
518	memset(&map->timer, 0xff, sizeof(map->timer));
519	memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
520	memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
521	memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
522}
523
524#endif /* CONFIG_32BIT */
525
526/*
527 * Initialize dump globals for a given architecture
528 */
529static int __init sys_info_init(enum arch_id arch)
530{
531	int rc;
532
533	switch (arch) {
534	case ARCH_S390X:
535		pr_alert("DETECTED 'S390X (64 bit) OS'\n");
536		break;
537	case ARCH_S390:
538		pr_alert("DETECTED 'S390 (32 bit) OS'\n");
539		break;
540	default:
541		pr_alert("0x%x is an unknown architecture.\n",arch);
542		return -EINVAL;
543	}
544	sys_info.sa_base = SAVE_AREA_BASE;
545	sys_info.sa_size = sizeof(struct save_area);
546	sys_info.arch = arch;
547	set_lc_mask(&sys_info.lc_mask);
548	rc = init_cpu_info(arch);
549	if (rc)
550		return rc;
551	sys_info.mem_size = real_memory_size;
552
553	return 0;
554}
555
556static int __init check_sdias(void)
 
557{
558	int rc, act_hsa_size;
559
560	rc = sclp_sdias_blk_count();
561	if (rc < 0) {
562		TRACE("Could not determine HSA size\n");
563		return rc;
564	}
565	act_hsa_size = (rc - 1) * PAGE_SIZE;
566	if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
567		TRACE("HSA size too small: %i\n", act_hsa_size);
568		return -EINVAL;
569	}
570	return 0;
571}
572
573static int __init get_mem_size(unsigned long *mem)
574{
575	int i;
576	struct mem_chunk *chunk_array;
577
578	chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
579			      GFP_KERNEL);
580	if (!chunk_array)
581		return -ENOMEM;
582	detect_memory_layout(chunk_array);
583	for (i = 0; i < MEMORY_CHUNKS; i++) {
584		if (chunk_array[i].size == 0)
585			break;
586		*mem += chunk_array[i].size;
587	}
588	kfree(chunk_array);
589	return 0;
590}
591
592static int __init zcore_header_init(int arch, struct zcore_header *hdr)
593{
594	int rc, i;
595	unsigned long memory = 0;
596	u32 prefix;
597
598	if (arch == ARCH_S390X)
599		hdr->arch_id = DUMP_ARCH_S390X;
600	else
601		hdr->arch_id = DUMP_ARCH_S390;
602	rc = get_mem_size(&memory);
603	if (rc)
604		return rc;
605	hdr->mem_size = memory;
606	hdr->rmem_size = memory;
607	hdr->mem_end = sys_info.mem_size;
608	hdr->num_pages = memory / PAGE_SIZE;
609	hdr->tod = get_clock();
610	get_cpu_id(&hdr->cpu_id);
611	for (i = 0; zfcpdump_save_areas[i]; i++) {
612		prefix = zfcpdump_save_areas[i]->pref_reg;
613		hdr->real_cpu_cnt++;
614		if (!prefix)
615			continue;
616		hdr->lc_vec[hdr->cpu_cnt] = prefix;
617		hdr->cpu_cnt++;
618	}
619	return 0;
620}
621
622/*
623 * Provide IPL parameter information block from either HSA or memory
624 * for future reipl
625 */
626static int __init zcore_reipl_init(void)
627{
 
628	struct ipib_info ipib_info;
 
 
629	int rc;
630
631	rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
632	if (rc)
633		return rc;
634	if (ipib_info.ipib == 0)
635		return 0;
636	ipl_block = (void *) __get_free_page(GFP_KERNEL);
637	if (!ipl_block)
638		return -ENOMEM;
639	if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
640		rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
 
641	else
642		rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
643	if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
644	    ipib_info.checksum) {
645		TRACE("Checksum does not match\n");
646		free_page((unsigned long) ipl_block);
647		ipl_block = NULL;
648	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
649	return 0;
650}
651
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652static int __init zcore_init(void)
653{
654	unsigned char arch;
655	int rc;
656
657	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
 
 
658		return -ENODATA;
659
660	zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
661	debug_register_view(zcore_dbf, &debug_sprintf_view);
662	debug_set_level(zcore_dbf, 6);
663
664	TRACE("devno:  %x\n", ipl_info.data.fcp.dev_id.devno);
665	TRACE("wwpn:   %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
666	TRACE("lun:    %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
 
 
 
 
 
 
 
 
 
 
 
667
668	rc = sclp_sdias_init();
669	if (rc)
670		goto fail;
671
672	rc = check_sdias();
673	if (rc)
674		goto fail;
 
675
676	rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
677	if (rc)
678		goto fail;
679
680#ifdef CONFIG_64BIT
681	if (arch == ARCH_S390) {
682		pr_alert("The 64-bit dump tool cannot be used for a "
683			 "32-bit system\n");
684		rc = -EINVAL;
685		goto fail;
686	}
687#else /* CONFIG_64BIT */
688	if (arch == ARCH_S390X) {
689		pr_alert("The 32-bit dump tool cannot be used for a "
690			 "64-bit system\n");
691		rc = -EINVAL;
692		goto fail;
693	}
694#endif /* CONFIG_64BIT */
695
696	rc = sys_info_init(arch);
697	if (rc)
698		goto fail;
699
700	rc = zcore_header_init(arch, &zcore_header);
701	if (rc)
702		goto fail;
703
704	rc = zcore_reipl_init();
705	if (rc)
706		goto fail;
707
708	zcore_dir = debugfs_create_dir("zcore" , NULL);
709	if (!zcore_dir) {
710		rc = -ENOMEM;
711		goto fail;
712	}
713	zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
714					 &zcore_fops);
715	if (!zcore_file) {
716		rc = -ENOMEM;
717		goto fail_dir;
718	}
719	zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
720						NULL, &zcore_memmap_fops);
721	if (!zcore_memmap_file) {
722		rc = -ENOMEM;
723		goto fail_file;
724	}
725	zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
726						NULL, &zcore_reipl_fops);
727	if (!zcore_reipl_file) {
728		rc = -ENOMEM;
729		goto fail_memmap_file;
730	}
731	hsa_available = 1;
732	return 0;
733
734fail_memmap_file:
735	debugfs_remove(zcore_memmap_file);
736fail_file:
737	debugfs_remove(zcore_file);
738fail_dir:
739	debugfs_remove(zcore_dir);
740fail:
741	diag308(DIAG308_REL_HSA, NULL);
742	return rc;
743}
744
745static void __exit zcore_exit(void)
746{
747	debug_unregister(zcore_dbf);
748	sclp_sdias_exit();
749	free_page((unsigned long) ipl_block);
750	debugfs_remove(zcore_reipl_file);
751	debugfs_remove(zcore_memmap_file);
752	debugfs_remove(zcore_file);
753	debugfs_remove(zcore_dir);
754	diag308(DIAG308_REL_HSA, NULL);
755}
756
757MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
758MODULE_DESCRIPTION("zcore module for zfcpdump support");
759MODULE_LICENSE("GPL");
760
761subsys_initcall(zcore_init);
762module_exit(zcore_exit);
v6.9.4
  1// SPDX-License-Identifier: GPL-1.0+
  2/*
  3 * zcore module to export memory content and register sets for creating system
  4 * dumps on SCSI/NVMe disks (zfcp/nvme dump).
 
  5 *
  6 * For more information please refer to Documentation/arch/s390/zfcpdump.rst
  7 *
  8 * Copyright IBM Corp. 2003, 2008
  9 * Author(s): Michael Holzheu
 10 */
 11
 12#define KMSG_COMPONENT "zdump"
 13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 14
 15#include <linux/init.h>
 16#include <linux/slab.h>
 
 17#include <linux/debugfs.h>
 18#include <linux/panic_notifier.h>
 19#include <linux/reboot.h>
 20#include <linux/uio.h>
 21
 22#include <asm/asm-offsets.h>
 23#include <asm/ipl.h>
 24#include <asm/sclp.h>
 25#include <asm/setup.h>
 26#include <linux/uaccess.h>
 
 27#include <asm/debug.h>
 28#include <asm/processor.h>
 29#include <asm/irqflags.h>
 30#include <asm/checksum.h>
 31#include <asm/os_info.h>
 32#include <asm/maccess.h>
 33#include "sclp.h"
 34
 35#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
 36
 
 
 
 
 37enum arch_id {
 38	ARCH_S390	= 0,
 39	ARCH_S390X	= 1,
 40};
 41
 
 
 
 
 
 
 
 
 
 
 
 42struct ipib_info {
 43	unsigned long	ipib;
 44	u32		checksum;
 45}  __attribute__((packed));
 46
 
 47static struct debug_info *zcore_dbf;
 48static int hsa_available;
 49static struct dentry *zcore_dir;
 
 
 50static struct dentry *zcore_reipl_file;
 51static struct dentry *zcore_hsa_file;
 52static struct ipl_parameter_block *zcore_ipl_block;
 53static unsigned long os_info_flags;
 54
 55static DEFINE_MUTEX(hsa_buf_mutex);
 56static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
 57
 58/*
 59 * Copy memory from HSA to iterator (not reentrant):
 60 *
 61 * @iter:  Iterator where memory should be copied to
 62 * @src:   Start address within HSA where data should be copied
 63 * @count: Size of buffer, which should be copied
 
 64 */
 65size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count)
 66{
 67	size_t bytes, copied, res = 0;
 68	unsigned long offset;
 69
 70	if (!hsa_available)
 71		return 0;
 72
 73	mutex_lock(&hsa_buf_mutex);
 74	while (count) {
 75		if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76			TRACE("sclp_sdias_copy() failed\n");
 77			break;
 78		}
 79		offset = src % PAGE_SIZE;
 80		bytes = min(PAGE_SIZE - offset, count);
 81		copied = copy_to_iter(hsa_buf + offset, bytes, iter);
 82		count -= copied;
 83		src += copied;
 84		res += copied;
 85		if (copied < bytes)
 86			break;
 
 
 
 
 
 
 
 87	}
 88	mutex_unlock(&hsa_buf_mutex);
 89	return res;
 
 
 
 
 
 
 
 
 
 
 
 90}
 91
 92/*
 93 * Copy memory from HSA to kernel memory (not reentrant):
 94 *
 95 * @dest:  Kernel or user buffer where memory should be copied to
 96 * @src:   Start address within HSA where data should be copied
 97 * @count: Size of buffer, which should be copied
 98 */
 99static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count)
100{
101	struct iov_iter iter;
102	struct kvec kvec;
103
104	kvec.iov_base = dst;
105	kvec.iov_len = count;
106	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
107	if (memcpy_hsa_iter(&iter, src, count) < count)
108		return -EIO;
 
 
 
109	return 0;
110}
111
112static int __init init_cpu_info(void)
113{
114	struct save_area *sa;
115
116	/* get info for boot cpu from lowcore, stored in the HSA */
117	sa = save_area_boot_cpu();
 
118	if (!sa)
119		return -ENOMEM;
120	if (memcpy_hsa_kernel(hsa_buf, __LC_FPREGS_SAVE_AREA, 512) < 0) {
121		TRACE("could not copy from HSA\n");
 
122		return -EIO;
123	}
124	save_area_add_regs(sa, hsa_buf); /* vx registers are saved in smp.c */
125	return 0;
126}
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128/*
129 * Release the HSA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130 */
131static void release_hsa(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132{
133	diag308(DIAG308_REL_HSA, NULL);
134	hsa_available = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135}
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
138				 size_t count, loff_t *ppos)
139{
140	if (zcore_ipl_block) {
141		diag308(DIAG308_SET, zcore_ipl_block);
142		if (os_info_flags & OS_INFO_FLAG_REIPL_CLEAR)
143			diag308(DIAG308_LOAD_CLEAR, NULL);
144		/* Use special diag308 subcode for CCW normal ipl */
145		if (zcore_ipl_block->pb0_hdr.pbt == IPL_PBT_CCW)
146			diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
147		else
148			diag308(DIAG308_LOAD_NORMAL, NULL);
149	}
150	return count;
151}
152
153static int zcore_reipl_open(struct inode *inode, struct file *filp)
154{
155	return stream_open(inode, filp);
156}
157
158static int zcore_reipl_release(struct inode *inode, struct file *filp)
159{
160	return 0;
161}
162
163static const struct file_operations zcore_reipl_fops = {
164	.owner		= THIS_MODULE,
165	.write		= zcore_reipl_write,
166	.open		= zcore_reipl_open,
167	.release	= zcore_reipl_release,
168	.llseek		= no_llseek,
169};
170
171static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
172			      size_t count, loff_t *ppos)
 
173{
174	static char str[18];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
176	if (hsa_available)
177		snprintf(str, sizeof(str), "%lx\n", sclp.hsa_size);
178	else
179		snprintf(str, sizeof(str), "0\n");
180	return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181}
182
183static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
184			       size_t count, loff_t *ppos)
185{
186	char value;
187
188	if (*ppos != 0)
189		return -EPIPE;
190	if (copy_from_user(&value, buf, 1))
191		return -EFAULT;
192	if (value != '0')
 
 
 
193		return -EINVAL;
194	release_hsa();
195	return count;
196}
197
198static const struct file_operations zcore_hsa_fops = {
199	.owner		= THIS_MODULE,
200	.write		= zcore_hsa_write,
201	.read		= zcore_hsa_read,
202	.open		= nonseekable_open,
203	.llseek		= no_llseek,
204};
 
 
 
 
 
 
 
 
 
 
 
205
206static int __init check_sdias(void)
207{
208	if (!sclp.hsa_size) {
209		TRACE("Could not determine HSA size\n");
210		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211	}
212	return 0;
213}
214
215/*
216 * Provide IPL parameter information block from either HSA or memory
217 * for future reipl
218 */
219static int __init zcore_reipl_init(void)
220{
221	struct os_info_entry *entry;
222	struct ipib_info ipib_info;
223	unsigned long os_info_addr;
224	struct os_info *os_info;
225	int rc;
226
227	rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
228	if (rc)
229		return rc;
230	if (ipib_info.ipib == 0)
231		return 0;
232	zcore_ipl_block = (void *) __get_free_page(GFP_KERNEL);
233	if (!zcore_ipl_block)
234		return -ENOMEM;
235	if (ipib_info.ipib < sclp.hsa_size)
236		rc = memcpy_hsa_kernel(zcore_ipl_block, ipib_info.ipib,
237				       PAGE_SIZE);
238	else
239		rc = memcpy_real(zcore_ipl_block, ipib_info.ipib, PAGE_SIZE);
240	if (rc || (__force u32)csum_partial(zcore_ipl_block, zcore_ipl_block->hdr.len, 0) !=
241	    ipib_info.checksum) {
242		TRACE("Checksum does not match\n");
243		free_page((unsigned long) zcore_ipl_block);
244		zcore_ipl_block = NULL;
245	}
246	/*
247	 * Read the bit-flags field from os_info flags entry.
248	 * Return zero even for os_info read or entry checksum errors in order
249	 * to continue dump processing, considering that os_info could be
250	 * corrupted on the panicked system.
251	 */
252	os_info = (void *)__get_free_page(GFP_KERNEL);
253	if (!os_info)
254		return -ENOMEM;
255	rc = memcpy_hsa_kernel(&os_info_addr, __LC_OS_INFO, sizeof(os_info_addr));
256	if (rc)
257		goto out;
258	if (os_info_addr < sclp.hsa_size)
259		rc = memcpy_hsa_kernel(os_info, os_info_addr, PAGE_SIZE);
260	else
261		rc = memcpy_real(os_info, os_info_addr, PAGE_SIZE);
262	if (rc || os_info_csum(os_info) != os_info->csum)
263		goto out;
264	entry = &os_info->entry[OS_INFO_FLAGS_ENTRY];
265	if (entry->addr && entry->size) {
266		if (entry->addr < sclp.hsa_size)
267			rc = memcpy_hsa_kernel(&os_info_flags, entry->addr, sizeof(os_info_flags));
268		else
269			rc = memcpy_real(&os_info_flags, entry->addr, sizeof(os_info_flags));
270		if (rc || (__force u32)csum_partial(&os_info_flags, entry->size, 0) != entry->csum)
271			os_info_flags = 0;
272	}
273out:
274	free_page((unsigned long)os_info);
275	return 0;
276}
277
278static int zcore_reboot_and_on_panic_handler(struct notifier_block *self,
279					     unsigned long	   event,
280					     void		   *data)
281{
282	if (hsa_available)
283		release_hsa();
284
285	return NOTIFY_OK;
286}
287
288static struct notifier_block zcore_reboot_notifier = {
289	.notifier_call	= zcore_reboot_and_on_panic_handler,
290	/* we need to be notified before reipl and kdump */
291	.priority	= INT_MAX,
292};
293
294static struct notifier_block zcore_on_panic_notifier = {
295	.notifier_call	= zcore_reboot_and_on_panic_handler,
296	/* we need to be notified before reipl and kdump */
297	.priority	= INT_MAX,
298};
299
300static int __init zcore_init(void)
301{
302	unsigned char arch;
303	int rc;
304
305	if (!is_ipl_type_dump())
306		return -ENODATA;
307	if (oldmem_data.start)
308		return -ENODATA;
309
310	zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
311	debug_register_view(zcore_dbf, &debug_sprintf_view);
312	debug_set_level(zcore_dbf, 6);
313
314	if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
315		TRACE("type:   fcp\n");
316		TRACE("devno:  %x\n", ipl_info.data.fcp.dev_id.devno);
317		TRACE("wwpn:   %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
318		TRACE("lun:    %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
319	} else if (ipl_info.type == IPL_TYPE_NVME_DUMP) {
320		TRACE("type:   nvme\n");
321		TRACE("fid:    %x\n", ipl_info.data.nvme.fid);
322		TRACE("nsid:   %x\n", ipl_info.data.nvme.nsid);
323	} else if (ipl_info.type == IPL_TYPE_ECKD_DUMP) {
324		TRACE("type:   eckd\n");
325		TRACE("devno:  %x\n", ipl_info.data.eckd.dev_id.devno);
326		TRACE("ssid:   %x\n", ipl_info.data.eckd.dev_id.ssid);
327	}
328
329	rc = sclp_sdias_init();
330	if (rc)
331		goto fail;
332
333	rc = check_sdias();
334	if (rc)
335		goto fail;
336	hsa_available = 1;
337
338	rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
339	if (rc)
340		goto fail;
341
 
342	if (arch == ARCH_S390) {
343		pr_alert("The 64-bit dump tool cannot be used for a "
344			 "32-bit system\n");
345		rc = -EINVAL;
346		goto fail;
347	}
 
 
 
 
 
 
 
 
348
349	pr_alert("The dump process started for a 64-bit operating system\n");
350	rc = init_cpu_info();
 
 
 
351	if (rc)
352		goto fail;
353
354	rc = zcore_reipl_init();
355	if (rc)
356		goto fail;
357
358	zcore_dir = debugfs_create_dir("zcore" , NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359	zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
360						NULL, &zcore_reipl_fops);
361	zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
362					     NULL, &zcore_hsa_fops);
363
364	register_reboot_notifier(&zcore_reboot_notifier);
365	atomic_notifier_chain_register(&panic_notifier_list, &zcore_on_panic_notifier);
 
366
367	return 0;
 
 
 
 
 
368fail:
369	diag308(DIAG308_REL_HSA, NULL);
370	return rc;
371}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372subsys_initcall(zcore_init);