Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 *
 14 * based on machine_kexec.c from other architectures in linux-2.6.18
 15 */
 16
 17#include <linux/mm.h>
 18#include <linux/kexec.h>
 19#include <linux/delay.h>
 20#include <linux/reboot.h>
 21#include <linux/errno.h>
 22#include <linux/vmalloc.h>
 23#include <linux/cpumask.h>
 24#include <linux/kernel.h>
 25#include <linux/elf.h>
 26#include <linux/highmem.h>
 27#include <linux/mmu_context.h>
 28#include <linux/io.h>
 29#include <linux/timex.h>
 30#include <asm/pgtable.h>
 31#include <asm/pgalloc.h>
 32#include <asm/cacheflush.h>
 33#include <asm/checksum.h>
 34#include <asm/tlbflush.h>
 35#include <asm/homecache.h>
 36#include <hv/hypervisor.h>
 37
 38
 39/*
 40 * This stuff is not in elf.h and is not in any other kernel include.
 41 * This stuff is needed below in the little boot notes parser to
 42 * extract the command line so we can pass it to the hypervisor.
 43 */
 44struct Elf32_Bhdr {
 45	Elf32_Word b_signature;
 46	Elf32_Word b_size;
 47	Elf32_Half b_checksum;
 48	Elf32_Half b_records;
 49};
 50#define ELF_BOOT_MAGIC		0x0E1FB007
 51#define EBN_COMMAND_LINE	0x00000004
 52#define roundupsz(X) (((X) + 3) & ~3)
 53
 54/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 55
 56
 57void machine_shutdown(void)
 58{
 59	/*
 60	 * Normally we would stop all the other processors here, but
 61	 * the check in machine_kexec_prepare below ensures we'll only
 62	 * get this far if we've been booted with "nosmp" on the
 63	 * command line or without CONFIG_SMP so there's nothing to do
 64	 * here (for now).
 65	 */
 66}
 67
 68void machine_crash_shutdown(struct pt_regs *regs)
 69{
 70	/*
 71	 * Cannot happen.  This type of kexec is disabled on this
 72	 * architecture (and enforced in machine_kexec_prepare below).
 73	 */
 74}
 75
 76
 77int machine_kexec_prepare(struct kimage *image)
 78{
 79	if (num_online_cpus() > 1) {
 80		pr_warning("%s: detected attempt to kexec "
 81		       "with num_online_cpus() > 1\n",
 82		       __func__);
 83		return -ENOSYS;
 84	}
 85	if (image->type != KEXEC_TYPE_DEFAULT) {
 86		pr_warning("%s: detected attempt to kexec "
 87		       "with unsupported type: %d\n",
 88		       __func__,
 89		       image->type);
 90		return -ENOSYS;
 91	}
 92	return 0;
 93}
 94
 95void machine_kexec_cleanup(struct kimage *image)
 96{
 97	/*
 98	 * We did nothing in machine_kexec_prepare,
 99	 * so we have nothing to do here.
100	 */
101}
102
103/*
104 * If we can find elf boot notes on this page, return the command
105 * line.  Otherwise, silently return null.  Somewhat kludgy, but no
106 * good way to do this without significantly rearchitecting the
107 * architecture-independent kexec code.
108 */
109
110static unsigned char *kexec_bn2cl(void *pg)
111{
112	struct Elf32_Bhdr *bhdrp;
113	Elf32_Nhdr *nhdrp;
114	unsigned char *desc;
115	unsigned char *command_line;
116	__sum16 csum;
117
118	bhdrp = (struct Elf32_Bhdr *) pg;
119
120	/*
121	 * This routine is invoked for every source page, so make
122	 * sure to quietly ignore every impossible page.
123	 */
124	if (bhdrp->b_signature != ELF_BOOT_MAGIC ||
125	    bhdrp->b_size > PAGE_SIZE)
126		return 0;
127
128	/*
129	 * If we get a checksum mismatch, warn with the checksum
130	 * so we can diagnose better.
131	 */
132	csum = ip_compute_csum(pg, bhdrp->b_size);
133	if (csum != 0) {
134		pr_warning("%s: bad checksum %#x (size %d)\n",
135			   __func__, csum, bhdrp->b_size);
136		return 0;
137	}
138
139	nhdrp = (Elf32_Nhdr *) (bhdrp + 1);
140
141	while (nhdrp->n_type != EBN_COMMAND_LINE) {
142
143		desc = (unsigned char *) (nhdrp + 1);
144		desc += roundupsz(nhdrp->n_descsz);
145
146		nhdrp = (Elf32_Nhdr *) desc;
147
148		/* still in bounds? */
149		if ((unsigned char *) (nhdrp + 1) >
150		    ((unsigned char *) pg) + bhdrp->b_size) {
151
152			pr_info("%s: out of bounds\n", __func__);
153			return 0;
154		}
155	}
156
157	command_line = (unsigned char *) (nhdrp + 1);
158	desc = command_line;
159
160	while (*desc != '\0') {
161		desc++;
162		if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
163			pr_info("%s: ran off end of page\n",
164			       __func__);
165			return 0;
166		}
167	}
168
169	return command_line;
170}
171
172static void kexec_find_and_set_command_line(struct kimage *image)
173{
174	kimage_entry_t *ptr, entry;
175
176	unsigned char *command_line = 0;
177	unsigned char *r;
178	HV_Errno hverr;
179
180	for (ptr = &image->head;
181	     (entry = *ptr) && !(entry & IND_DONE);
182	     ptr = (entry & IND_INDIRECTION) ?
183		     phys_to_virt((entry & PAGE_MASK)) : ptr + 1) {
184
185		if ((entry & IND_SOURCE)) {
186			void *va =
187				kmap_atomic_pfn(entry >> PAGE_SHIFT);
188			r = kexec_bn2cl(va);
189			if (r) {
190				command_line = r;
191				break;
192			}
193			kunmap_atomic(va);
194		}
195	}
196
197	if (command_line != 0) {
198		pr_info("setting new command line to \"%s\"\n",
199		       command_line);
200
201		hverr = hv_set_command_line(
202			(HV_VirtAddr) command_line, strlen(command_line));
203		kunmap_atomic(command_line);
204	} else {
205		pr_info("%s: no command line found; making empty\n",
206		       __func__);
207		hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
208	}
209	if (hverr)
210		pr_warning("%s: hv_set_command_line returned error: %d\n",
211			   __func__, hverr);
212}
213
214/*
215 * The kexec code range-checks all its PAs, so to avoid having it run
216 * amok and allocate memory and then sequester it from every other
217 * controller, we force it to come from controller zero.  We also
218 * disable the oom-killer since if we do end up running out of memory,
219 * that almost certainly won't help.
220 */
221struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
222{
223	gfp_mask |= __GFP_THISNODE | __GFP_NORETRY;
224	return alloc_pages_node(0, gfp_mask, order);
225}
226
227/*
228 * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
229 * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
230 * for tilepro, while for tilegx, we limit it to entire middle level page
231 * table which we assume has been allocated and is undoubtedly large enough.
232 */
233#ifndef __tilegx__
234#define	QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
235#else
236#define	QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
237#endif
238
239static void setup_quasi_va_is_pa(void)
240{
241	HV_PTE pte;
242	unsigned long i;
243
244	/*
245	 * Flush our TLB to prevent conflicts between the previous contents
246	 * and the new stuff we're about to add.
247	 */
248	local_flush_tlb_all();
249
250	/*
251	 * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
252	 * Note here we assume that level-1 page table is defined by
253	 * HPAGE_SIZE.
254	 */
255	pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
256	pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
257	for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
258		unsigned long vaddr = i << HPAGE_SHIFT;
259		pgd_t *pgd = pgd_offset(current->mm, vaddr);
260		pud_t *pud = pud_offset(pgd, vaddr);
261		pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
262		unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
263
264		if (pfn_valid(pfn))
265			__set_pte(ptep, pfn_pte(pfn, pte));
266	}
267}
268
269
270void machine_kexec(struct kimage *image)
271{
272	void *reboot_code_buffer;
273	pte_t *ptep;
274	void (*rnk)(unsigned long, void *, unsigned long)
275		__noreturn;
276
277	/* Mask all interrupts before starting to reboot. */
278	interrupt_mask_set_mask(~0ULL);
279
280	kexec_find_and_set_command_line(image);
281
282	/*
283	 * Adjust the home caching of the control page to be cached on
284	 * this cpu, and copy the assembly helper into the control
285	 * code page, which we map in the vmalloc area.
286	 */
287	homecache_change_page_home(image->control_code_page, 0,
288				   smp_processor_id());
289	reboot_code_buffer = page_address(image->control_code_page);
290	BUG_ON(reboot_code_buffer == NULL);
291	ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
292	__set_pte(ptep, pte_mkexec(*ptep));
293	memcpy(reboot_code_buffer, relocate_new_kernel,
294	       relocate_new_kernel_size);
295	__flush_icache_range(
296		(unsigned long) reboot_code_buffer,
297		(unsigned long) reboot_code_buffer + relocate_new_kernel_size);
298
299	setup_quasi_va_is_pa();
300
301	/* now call it */
302	rnk = reboot_code_buffer;
303	(*rnk)(image->head, reboot_code_buffer, image->start);
304}