Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2005-2017 Andes Technology Corporation
  3
  4#include <linux/extable.h>
  5#include <linux/module.h>
  6#include <linux/signal.h>
  7#include <linux/ptrace.h>
  8#include <linux/mm.h>
  9#include <linux/init.h>
 10#include <linux/hardirq.h>
 11#include <linux/uaccess.h>
 12#include <linux/perf_event.h>
 13
 14#include <asm/pgtable.h>
 15#include <asm/tlbflush.h>
 16
 17extern void die(const char *str, struct pt_regs *regs, long err);
 18
 19/*
 20 * This is useful to dump out the page tables associated with
 21 * 'addr' in mm 'mm'.
 22 */
 23void show_pte(struct mm_struct *mm, unsigned long addr)
 24{
 25	pgd_t *pgd;
 26	if (!mm)
 27		mm = &init_mm;
 28
 29	pr_alert("pgd = %p\n", mm->pgd);
 30	pgd = pgd_offset(mm, addr);
 31	pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
 32
 33	do {
 34		pmd_t *pmd;
 35
 36		if (pgd_none(*pgd))
 37			break;
 38
 39		if (pgd_bad(*pgd)) {
 40			pr_alert("(bad)");
 41			break;
 42		}
 43
 44		pmd = pmd_offset(pgd, addr);
 45#if PTRS_PER_PMD != 1
 46		pr_alert(", *pmd=%08lx", pmd_val(*pmd));
 47#endif
 48
 49		if (pmd_none(*pmd))
 50			break;
 51
 52		if (pmd_bad(*pmd)) {
 53			pr_alert("(bad)");
 54			break;
 55		}
 56
 57		if (IS_ENABLED(CONFIG_HIGHMEM))
 58		{
 59			pte_t *pte;
 60			/* We must not map this if we have highmem enabled */
 61			pte = pte_offset_map(pmd, addr);
 62			pr_alert(", *pte=%08lx", pte_val(*pte));
 63			pte_unmap(pte);
 64		}
 65	} while (0);
 66
 67	pr_alert("\n");
 68}
 69
 70void do_page_fault(unsigned long entry, unsigned long addr,
 71		   unsigned int error_code, struct pt_regs *regs)
 72{
 73	struct task_struct *tsk;
 74	struct mm_struct *mm;
 75	struct vm_area_struct *vma;
 76	int si_code;
 77	vm_fault_t fault;
 78	unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
 79	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 80
 81	error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
 82	tsk = current;
 83	mm = tsk->mm;
 84	si_code = SEGV_MAPERR;
 85	/*
 86	 * We fault-in kernel-space virtual memory on-demand. The
 87	 * 'reference' page table is init_mm.pgd.
 88	 *
 89	 * NOTE! We MUST NOT take any locks for this case. We may
 90	 * be in an interrupt or a critical region, and should
 91	 * only copy the information from the master page table,
 92	 * nothing more.
 93	 */
 94	if (addr >= TASK_SIZE) {
 95		if (user_mode(regs))
 96			goto bad_area_nosemaphore;
 97
 98		if (addr >= TASK_SIZE && addr < VMALLOC_END
 99		    && (entry == ENTRY_PTE_NOT_PRESENT))
100			goto vmalloc_fault;
101		else
102			goto no_context;
103	}
104
105	/* Send a signal to the task for handling the unalignment access. */
106	if (entry == ENTRY_GENERAL_EXCPETION
107	    && error_code == ETYPE_ALIGNMENT_CHECK) {
108		if (user_mode(regs))
109			goto bad_area_nosemaphore;
110		else
111			goto no_context;
112	}
113
114	/*
115	 * If we're in an interrupt or have no user
116	 * context, we must not take the fault..
117	 */
118	if (unlikely(faulthandler_disabled() || !mm))
119		goto no_context;
120
121	/*
122	 * As per x86, we may deadlock here. However, since the kernel only
123	 * validly references user space from well defined areas of the code,
124	 * we can bug out early if this is from code which shouldn't.
125	 */
126	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
127		if (!user_mode(regs) &&
128		    !search_exception_tables(instruction_pointer(regs)))
129			goto no_context;
130retry:
131		down_read(&mm->mmap_sem);
132	} else {
133		/*
134		 * The above down_read_trylock() might have succeeded in which
135		 * case, we'll have missed the might_sleep() from down_read().
136		 */
137		might_sleep();
138		if (IS_ENABLED(CONFIG_DEBUG_VM)) {
139			if (!user_mode(regs) &&
140			    !search_exception_tables(instruction_pointer(regs)))
141				goto no_context;
142		}
143	}
144
145	vma = find_vma(mm, addr);
146
147	if (unlikely(!vma))
148		goto bad_area;
149
150	if (vma->vm_start <= addr)
151		goto good_area;
152
153	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
154		goto bad_area;
155
156	if (unlikely(expand_stack(vma, addr)))
157		goto bad_area;
158
159	/*
160	 * Ok, we have a good vm_area for this memory access, so
161	 * we can handle it..
162	 */
163
164good_area:
165	si_code = SEGV_ACCERR;
166
167	/* first do some preliminary protection checks */
168	if (entry == ENTRY_PTE_NOT_PRESENT) {
169		if (error_code & ITYPE_mskINST)
170			mask = VM_EXEC;
171		else {
172			mask = VM_READ | VM_WRITE;
 
 
173		}
174	} else if (entry == ENTRY_TLB_MISC) {
175		switch (error_code & ITYPE_mskETYPE) {
176		case RD_PROT:
177			mask = VM_READ;
178			break;
179		case WRT_PROT:
180			mask = VM_WRITE;
181			flags |= FAULT_FLAG_WRITE;
182			break;
183		case NOEXEC:
184			mask = VM_EXEC;
185			break;
186		case PAGE_MODIFY:
187			mask = VM_WRITE;
188			flags |= FAULT_FLAG_WRITE;
189			break;
190		case ACC_BIT:
191			BUG();
192		default:
193			break;
194		}
195
196	}
197	if (!(vma->vm_flags & mask))
198		goto bad_area;
199
200	/*
201	 * If for any reason at all we couldn't handle the fault,
202	 * make sure we exit gracefully rather than endlessly redo
203	 * the fault.
204	 */
205
206	fault = handle_mm_fault(vma, addr, flags);
207
208	/*
209	 * If we need to retry but a fatal signal is pending, handle the
210	 * signal first. We do not need to release the mmap_sem because it
211	 * would already be released in __lock_page_or_retry in mm/filemap.c.
212	 */
213	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
214		if (!user_mode(regs))
215			goto no_context;
216		return;
217	}
218
219	if (unlikely(fault & VM_FAULT_ERROR)) {
220		if (fault & VM_FAULT_OOM)
221			goto out_of_memory;
222		else if (fault & VM_FAULT_SIGBUS)
223			goto do_sigbus;
224		else
225			goto bad_area;
226	}
227
228	/*
229	 * Major/minor page fault accounting is only done on the initial
230	 * attempt. If we go through a retry, it is extremely likely that the
231	 * page will be found in page cache at that point.
232	 */
233	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
234	if (flags & FAULT_FLAG_ALLOW_RETRY) {
235		if (fault & VM_FAULT_MAJOR) {
236			tsk->maj_flt++;
237			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
238				      1, regs, addr);
239		} else {
240			tsk->min_flt++;
241			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
242				      1, regs, addr);
243		}
244		if (fault & VM_FAULT_RETRY) {
245			flags &= ~FAULT_FLAG_ALLOW_RETRY;
246			flags |= FAULT_FLAG_TRIED;
247
248			/* No need to up_read(&mm->mmap_sem) as we would
249			 * have already released it in __lock_page_or_retry
250			 * in mm/filemap.c.
251			 */
252			goto retry;
253		}
254	}
255
256	up_read(&mm->mmap_sem);
257	return;
258
259	/*
260	 * Something tried to access memory that isn't in our memory map..
261	 * Fix it, but check if it's kernel or user first..
262	 */
263bad_area:
264	up_read(&mm->mmap_sem);
265
266bad_area_nosemaphore:
267
268	/* User mode accesses just cause a SIGSEGV */
269
270	if (user_mode(regs)) {
271		tsk->thread.address = addr;
272		tsk->thread.error_code = error_code;
273		tsk->thread.trap_no = entry;
274		force_sig_fault(SIGSEGV, si_code, (void __user *)addr);
 
 
 
 
275		return;
276	}
277
278no_context:
279
280	/* Are we prepared to handle this kernel fault?
281	 *
282	 * (The kernel has valid exception-points in the source
283	 *  when it acesses user-memory. When it fails in one
284	 *  of those points, we find it in a table and do a jump
285	 *  to some fixup code that loads an appropriate error
286	 *  code)
287	 */
288
289	{
290		const struct exception_table_entry *entry;
291
292		if ((entry =
293		     search_exception_tables(instruction_pointer(regs))) !=
294		    NULL) {
295			/* Adjust the instruction pointer in the stackframe */
296			instruction_pointer(regs) = entry->fixup;
297			return;
298		}
299	}
300
301	/*
302	 * Oops. The kernel tried to access some bad page. We'll have to
303	 * terminate things with extreme prejudice.
304	 */
305
306	bust_spinlocks(1);
307	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
308		 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
309		 "paging request", addr);
310
311	show_pte(mm, addr);
312	die("Oops", regs, error_code);
313	bust_spinlocks(0);
314	do_exit(SIGKILL);
315
316	return;
317
318	/*
319	 * We ran out of memory, or some other thing happened to us that made
320	 * us unable to handle the page fault gracefully.
321	 */
322
323out_of_memory:
324	up_read(&mm->mmap_sem);
325	if (!user_mode(regs))
326		goto no_context;
327	pagefault_out_of_memory();
328	return;
329
330do_sigbus:
331	up_read(&mm->mmap_sem);
332
333	/* Kernel mode? Handle exceptions or die */
334	if (!user_mode(regs))
335		goto no_context;
336
337	/*
338	 * Send a sigbus
339	 */
340	tsk->thread.address = addr;
341	tsk->thread.error_code = error_code;
342	tsk->thread.trap_no = entry;
343	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr);
 
 
 
 
344
345	return;
346
347vmalloc_fault:
348	{
349		/*
350		 * Synchronize this task's top level page-table
351		 * with the 'reference' page table.
352		 *
353		 * Use current_pgd instead of tsk->active_mm->pgd
354		 * since the latter might be unavailable if this
355		 * code is executed in a misfortunately run irq
356		 * (like inside schedule() between switch_mm and
357		 *  switch_to...).
358		 */
359
360		unsigned int index = pgd_index(addr);
361		pgd_t *pgd, *pgd_k;
362		pud_t *pud, *pud_k;
363		pmd_t *pmd, *pmd_k;
364		pte_t *pte_k;
365
366		pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
367		pgd_k = init_mm.pgd + index;
368
369		if (!pgd_present(*pgd_k))
370			goto no_context;
371
372		pud = pud_offset(pgd, addr);
373		pud_k = pud_offset(pgd_k, addr);
374		if (!pud_present(*pud_k))
375			goto no_context;
376
377		pmd = pmd_offset(pud, addr);
378		pmd_k = pmd_offset(pud_k, addr);
379		if (!pmd_present(*pmd_k))
380			goto no_context;
381
382		if (!pmd_present(*pmd))
383			set_pmd(pmd, *pmd_k);
384		else
385			BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
386
387		/*
388		 * Since the vmalloc area is global, we don't
389		 * need to copy individual PTE's, it is enough to
390		 * copy the pgd pointer into the pte page of the
391		 * root task. If that is there, we'll find our pte if
392		 * it exists.
393		 */
394
395		/* Make sure the actual PTE exists as well to
396		 * catch kernel vmalloc-area accesses to non-mapped
397		 * addres. If we don't do this, this will just
398		 * silently loop forever.
399		 */
400
401		pte_k = pte_offset_kernel(pmd_k, addr);
402		if (!pte_present(*pte_k))
403			goto no_context;
404
405		return;
406	}
407}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2005-2017 Andes Technology Corporation
  3
  4#include <linux/extable.h>
  5#include <linux/module.h>
  6#include <linux/signal.h>
  7#include <linux/ptrace.h>
  8#include <linux/mm.h>
  9#include <linux/init.h>
 10#include <linux/hardirq.h>
 11#include <linux/uaccess.h>
 
 12
 13#include <asm/pgtable.h>
 14#include <asm/tlbflush.h>
 15
 16extern void die(const char *str, struct pt_regs *regs, long err);
 17
 18/*
 19 * This is useful to dump out the page tables associated with
 20 * 'addr' in mm 'mm'.
 21 */
 22void show_pte(struct mm_struct *mm, unsigned long addr)
 23{
 24	pgd_t *pgd;
 25	if (!mm)
 26		mm = &init_mm;
 27
 28	pr_alert("pgd = %p\n", mm->pgd);
 29	pgd = pgd_offset(mm, addr);
 30	pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
 31
 32	do {
 33		pmd_t *pmd;
 34
 35		if (pgd_none(*pgd))
 36			break;
 37
 38		if (pgd_bad(*pgd)) {
 39			pr_alert("(bad)");
 40			break;
 41		}
 42
 43		pmd = pmd_offset(pgd, addr);
 44#if PTRS_PER_PMD != 1
 45		pr_alert(", *pmd=%08lx", pmd_val(*pmd));
 46#endif
 47
 48		if (pmd_none(*pmd))
 49			break;
 50
 51		if (pmd_bad(*pmd)) {
 52			pr_alert("(bad)");
 53			break;
 54		}
 55
 56		if (IS_ENABLED(CONFIG_HIGHMEM))
 57		{
 58			pte_t *pte;
 59			/* We must not map this if we have highmem enabled */
 60			pte = pte_offset_map(pmd, addr);
 61			pr_alert(", *pte=%08lx", pte_val(*pte));
 62			pte_unmap(pte);
 63		}
 64	} while (0);
 65
 66	pr_alert("\n");
 67}
 68
 69void do_page_fault(unsigned long entry, unsigned long addr,
 70		   unsigned int error_code, struct pt_regs *regs)
 71{
 72	struct task_struct *tsk;
 73	struct mm_struct *mm;
 74	struct vm_area_struct *vma;
 75	siginfo_t info;
 76	int fault;
 77	unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
 78	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 79
 80	error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
 81	tsk = current;
 82	mm = tsk->mm;
 83	info.si_code = SEGV_MAPERR;
 84	/*
 85	 * We fault-in kernel-space virtual memory on-demand. The
 86	 * 'reference' page table is init_mm.pgd.
 87	 *
 88	 * NOTE! We MUST NOT take any locks for this case. We may
 89	 * be in an interrupt or a critical region, and should
 90	 * only copy the information from the master page table,
 91	 * nothing more.
 92	 */
 93	if (addr >= TASK_SIZE) {
 94		if (user_mode(regs))
 95			goto bad_area_nosemaphore;
 96
 97		if (addr >= TASK_SIZE && addr < VMALLOC_END
 98		    && (entry == ENTRY_PTE_NOT_PRESENT))
 99			goto vmalloc_fault;
100		else
101			goto no_context;
102	}
103
104	/* Send a signal to the task for handling the unalignment access. */
105	if (entry == ENTRY_GENERAL_EXCPETION
106	    && error_code == ETYPE_ALIGNMENT_CHECK) {
107		if (user_mode(regs))
108			goto bad_area_nosemaphore;
109		else
110			goto no_context;
111	}
112
113	/*
114	 * If we're in an interrupt or have no user
115	 * context, we must not take the fault..
116	 */
117	if (unlikely(faulthandler_disabled() || !mm))
118		goto no_context;
119
120	/*
121	 * As per x86, we may deadlock here. However, since the kernel only
122	 * validly references user space from well defined areas of the code,
123	 * we can bug out early if this is from code which shouldn't.
124	 */
125	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
126		if (!user_mode(regs) &&
127		    !search_exception_tables(instruction_pointer(regs)))
128			goto no_context;
129retry:
130		down_read(&mm->mmap_sem);
131	} else {
132		/*
133		 * The above down_read_trylock() might have succeeded in which
134		 * case, we'll have missed the might_sleep() from down_read().
135		 */
136		might_sleep();
137		if (IS_ENABLED(CONFIG_DEBUG_VM)) {
138			if (!user_mode(regs) &&
139			    !search_exception_tables(instruction_pointer(regs)))
140				goto no_context;
141		}
142	}
143
144	vma = find_vma(mm, addr);
145
146	if (unlikely(!vma))
147		goto bad_area;
148
149	if (vma->vm_start <= addr)
150		goto good_area;
151
152	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
153		goto bad_area;
154
155	if (unlikely(expand_stack(vma, addr)))
156		goto bad_area;
157
158	/*
159	 * Ok, we have a good vm_area for this memory access, so
160	 * we can handle it..
161	 */
162
163good_area:
164	info.si_code = SEGV_ACCERR;
165
166	/* first do some preliminary protection checks */
167	if (entry == ENTRY_PTE_NOT_PRESENT) {
168		if (error_code & ITYPE_mskINST)
169			mask = VM_EXEC;
170		else {
171			mask = VM_READ | VM_WRITE;
172			if (vma->vm_flags & VM_WRITE)
173				flags |= FAULT_FLAG_WRITE;
174		}
175	} else if (entry == ENTRY_TLB_MISC) {
176		switch (error_code & ITYPE_mskETYPE) {
177		case RD_PROT:
178			mask = VM_READ;
179			break;
180		case WRT_PROT:
181			mask = VM_WRITE;
182			flags |= FAULT_FLAG_WRITE;
183			break;
184		case NOEXEC:
185			mask = VM_EXEC;
186			break;
187		case PAGE_MODIFY:
188			mask = VM_WRITE;
189			flags |= FAULT_FLAG_WRITE;
190			break;
191		case ACC_BIT:
192			BUG();
193		default:
194			break;
195		}
196
197	}
198	if (!(vma->vm_flags & mask))
199		goto bad_area;
200
201	/*
202	 * If for any reason at all we couldn't handle the fault,
203	 * make sure we exit gracefully rather than endlessly redo
204	 * the fault.
205	 */
206
207	fault = handle_mm_fault(vma, addr, flags);
208
209	/*
210	 * If we need to retry but a fatal signal is pending, handle the
211	 * signal first. We do not need to release the mmap_sem because it
212	 * would already be released in __lock_page_or_retry in mm/filemap.c.
213	 */
214	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
215		if (!user_mode(regs))
216			goto no_context;
217		return;
218	}
219
220	if (unlikely(fault & VM_FAULT_ERROR)) {
221		if (fault & VM_FAULT_OOM)
222			goto out_of_memory;
223		else if (fault & VM_FAULT_SIGBUS)
224			goto do_sigbus;
225		else
226			goto bad_area;
227	}
228
229	/*
230	 * Major/minor page fault accounting is only done on the initial
231	 * attempt. If we go through a retry, it is extremely likely that the
232	 * page will be found in page cache at that point.
233	 */
 
234	if (flags & FAULT_FLAG_ALLOW_RETRY) {
235		if (fault & VM_FAULT_MAJOR)
236			tsk->maj_flt++;
237		else
 
 
238			tsk->min_flt++;
 
 
 
239		if (fault & VM_FAULT_RETRY) {
240			flags &= ~FAULT_FLAG_ALLOW_RETRY;
241			flags |= FAULT_FLAG_TRIED;
242
243			/* No need to up_read(&mm->mmap_sem) as we would
244			 * have already released it in __lock_page_or_retry
245			 * in mm/filemap.c.
246			 */
247			goto retry;
248		}
249	}
250
251	up_read(&mm->mmap_sem);
252	return;
253
254	/*
255	 * Something tried to access memory that isn't in our memory map..
256	 * Fix it, but check if it's kernel or user first..
257	 */
258bad_area:
259	up_read(&mm->mmap_sem);
260
261bad_area_nosemaphore:
262
263	/* User mode accesses just cause a SIGSEGV */
264
265	if (user_mode(regs)) {
266		tsk->thread.address = addr;
267		tsk->thread.error_code = error_code;
268		tsk->thread.trap_no = entry;
269		info.si_signo = SIGSEGV;
270		info.si_errno = 0;
271		/* info.si_code has been set above */
272		info.si_addr = (void *)addr;
273		force_sig_info(SIGSEGV, &info, tsk);
274		return;
275	}
276
277no_context:
278
279	/* Are we prepared to handle this kernel fault?
280	 *
281	 * (The kernel has valid exception-points in the source
282	 *  when it acesses user-memory. When it fails in one
283	 *  of those points, we find it in a table and do a jump
284	 *  to some fixup code that loads an appropriate error
285	 *  code)
286	 */
287
288	{
289		const struct exception_table_entry *entry;
290
291		if ((entry =
292		     search_exception_tables(instruction_pointer(regs))) !=
293		    NULL) {
294			/* Adjust the instruction pointer in the stackframe */
295			instruction_pointer(regs) = entry->fixup;
296			return;
297		}
298	}
299
300	/*
301	 * Oops. The kernel tried to access some bad page. We'll have to
302	 * terminate things with extreme prejudice.
303	 */
304
305	bust_spinlocks(1);
306	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
307		 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
308		 "paging request", addr);
309
310	show_pte(mm, addr);
311	die("Oops", regs, error_code);
312	bust_spinlocks(0);
313	do_exit(SIGKILL);
314
315	return;
316
317	/*
318	 * We ran out of memory, or some other thing happened to us that made
319	 * us unable to handle the page fault gracefully.
320	 */
321
322out_of_memory:
323	up_read(&mm->mmap_sem);
324	if (!user_mode(regs))
325		goto no_context;
326	pagefault_out_of_memory();
327	return;
328
329do_sigbus:
330	up_read(&mm->mmap_sem);
331
332	/* Kernel mode? Handle exceptions or die */
333	if (!user_mode(regs))
334		goto no_context;
335
336	/*
337	 * Send a sigbus
338	 */
339	tsk->thread.address = addr;
340	tsk->thread.error_code = error_code;
341	tsk->thread.trap_no = entry;
342	info.si_signo = SIGBUS;
343	info.si_errno = 0;
344	info.si_code = BUS_ADRERR;
345	info.si_addr = (void *)addr;
346	force_sig_info(SIGBUS, &info, tsk);
347
348	return;
349
350vmalloc_fault:
351	{
352		/*
353		 * Synchronize this task's top level page-table
354		 * with the 'reference' page table.
355		 *
356		 * Use current_pgd instead of tsk->active_mm->pgd
357		 * since the latter might be unavailable if this
358		 * code is executed in a misfortunately run irq
359		 * (like inside schedule() between switch_mm and
360		 *  switch_to...).
361		 */
362
363		unsigned int index = pgd_index(addr);
364		pgd_t *pgd, *pgd_k;
365		pud_t *pud, *pud_k;
366		pmd_t *pmd, *pmd_k;
367		pte_t *pte_k;
368
369		pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
370		pgd_k = init_mm.pgd + index;
371
372		if (!pgd_present(*pgd_k))
373			goto no_context;
374
375		pud = pud_offset(pgd, addr);
376		pud_k = pud_offset(pgd_k, addr);
377		if (!pud_present(*pud_k))
378			goto no_context;
379
380		pmd = pmd_offset(pud, addr);
381		pmd_k = pmd_offset(pud_k, addr);
382		if (!pmd_present(*pmd_k))
383			goto no_context;
384
385		if (!pmd_present(*pmd))
386			set_pmd(pmd, *pmd_k);
387		else
388			BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
389
390		/*
391		 * Since the vmalloc area is global, we don't
392		 * need to copy individual PTE's, it is enough to
393		 * copy the pgd pointer into the pte page of the
394		 * root task. If that is there, we'll find our pte if
395		 * it exists.
396		 */
397
398		/* Make sure the actual PTE exists as well to
399		 * catch kernel vmalloc-area accesses to non-mapped
400		 * addres. If we don't do this, this will just
401		 * silently loop forever.
402		 */
403
404		pte_k = pte_offset_kernel(pmd_k, addr);
405		if (!pte_present(*pte_k))
406			goto no_context;
407
408		return;
409	}
410}