Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Licensed under the GPL
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/module.h>
  8#include <linux/sched/signal.h>
  9
 10#include <asm/pgtable.h>
 11#include <asm/tlbflush.h>
 12#include <as-layout.h>
 13#include <mem_user.h>
 14#include <os.h>
 15#include <skas.h>
 16#include <kern_util.h>
 17
 18struct host_vm_change {
 19	struct host_vm_op {
 20		enum { NONE, MMAP, MUNMAP, MPROTECT } type;
 21		union {
 22			struct {
 23				unsigned long addr;
 24				unsigned long len;
 25				unsigned int prot;
 26				int fd;
 27				__u64 offset;
 28			} mmap;
 29			struct {
 30				unsigned long addr;
 31				unsigned long len;
 32			} munmap;
 33			struct {
 34				unsigned long addr;
 35				unsigned long len;
 36				unsigned int prot;
 37			} mprotect;
 38		} u;
 39	} ops[1];
 
 40	int index;
 41	struct mm_id *id;
 42	void *data;
 43	int force;
 44};
 45
 46#define INIT_HVC(mm, force) \
 47	((struct host_vm_change) \
 48	 { .ops		= { { .type = NONE } },	\
 49	   .id		= &mm->context.id, \
 50       	   .data	= NULL, \
 
 51	   .index	= 0, \
 52	   .force	= force })
 53
 54static void report_enomem(void)
 55{
 56	printk(KERN_ERR "UML ran out of memory on the host side! "
 57			"This can happen due to a memory limitation or "
 58			"vm.max_map_count has been reached.\n");
 59}
 60
 61static int do_ops(struct host_vm_change *hvc, int end,
 62		  int finished)
 63{
 64	struct host_vm_op *op;
 65	int i, ret = 0;
 66
 67	for (i = 0; i < end && !ret; i++) {
 68		op = &hvc->ops[i];
 69		switch (op->type) {
 70		case MMAP:
 71			ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
 72				  op->u.mmap.prot, op->u.mmap.fd,
 73				  op->u.mmap.offset, finished, &hvc->data);
 
 
 
 
 
 
 74			break;
 75		case MUNMAP:
 76			ret = unmap(hvc->id, op->u.munmap.addr,
 77				    op->u.munmap.len, finished, &hvc->data);
 
 
 
 
 
 
 
 
 78			break;
 79		case MPROTECT:
 80			ret = protect(hvc->id, op->u.mprotect.addr,
 81				      op->u.mprotect.len, op->u.mprotect.prot,
 82				      finished, &hvc->data);
 
 
 
 
 
 
 
 
 83			break;
 84		default:
 85			printk(KERN_ERR "Unknown op type %d in do_ops\n",
 86			       op->type);
 87			BUG();
 88			break;
 89		}
 90	}
 91
 92	if (ret == -ENOMEM)
 93		report_enomem();
 94
 95	return ret;
 96}
 97
 98static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
 99		    unsigned int prot, struct host_vm_change *hvc)
100{
101	__u64 offset;
102	struct host_vm_op *last;
103	int fd, ret = 0;
104
105	fd = phys_mapping(phys, &offset);
 
 
 
106	if (hvc->index != 0) {
107		last = &hvc->ops[hvc->index - 1];
108		if ((last->type == MMAP) &&
109		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
110		   (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
111		   (last->u.mmap.offset + last->u.mmap.len == offset)) {
112			last->u.mmap.len += len;
113			return 0;
114		}
115	}
116
117	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
118		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
119		hvc->index = 0;
120	}
121
122	hvc->ops[hvc->index++] = ((struct host_vm_op)
123				  { .type	= MMAP,
124				    .u = { .mmap = { .addr	= virt,
125						     .len	= len,
126						     .prot	= prot,
127						     .fd	= fd,
128						     .offset	= offset }
129			   } });
130	return ret;
131}
132
133static int add_munmap(unsigned long addr, unsigned long len,
134		      struct host_vm_change *hvc)
135{
136	struct host_vm_op *last;
137	int ret = 0;
138
139	if ((addr >= STUB_START) && (addr < STUB_END))
140		return -EINVAL;
141
142	if (hvc->index != 0) {
143		last = &hvc->ops[hvc->index - 1];
144		if ((last->type == MUNMAP) &&
145		   (last->u.munmap.addr + last->u.mmap.len == addr)) {
146			last->u.munmap.len += len;
147			return 0;
148		}
149	}
150
151	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
152		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
153		hvc->index = 0;
154	}
155
156	hvc->ops[hvc->index++] = ((struct host_vm_op)
157				  { .type	= MUNMAP,
158			     	    .u = { .munmap = { .addr	= addr,
159						       .len	= len } } });
160	return ret;
161}
162
163static int add_mprotect(unsigned long addr, unsigned long len,
164			unsigned int prot, struct host_vm_change *hvc)
165{
166	struct host_vm_op *last;
167	int ret = 0;
168
169	if (hvc->index != 0) {
170		last = &hvc->ops[hvc->index - 1];
171		if ((last->type == MPROTECT) &&
172		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
173		   (last->u.mprotect.prot == prot)) {
174			last->u.mprotect.len += len;
175			return 0;
176		}
177	}
178
179	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
180		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
181		hvc->index = 0;
182	}
183
184	hvc->ops[hvc->index++] = ((struct host_vm_op)
185				  { .type	= MPROTECT,
186			     	    .u = { .mprotect = { .addr	= addr,
187							 .len	= len,
188							 .prot	= prot } } });
189	return ret;
190}
191
192#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
193
194static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
195				   unsigned long end,
196				   struct host_vm_change *hvc)
197{
198	pte_t *pte;
199	int r, w, x, prot, ret = 0;
200
201	pte = pte_offset_kernel(pmd, addr);
202	do {
203		if ((addr >= STUB_START) && (addr < STUB_END))
204			continue;
205
206		r = pte_read(*pte);
207		w = pte_write(*pte);
208		x = pte_exec(*pte);
209		if (!pte_young(*pte)) {
210			r = 0;
211			w = 0;
212		} else if (!pte_dirty(*pte))
213			w = 0;
214
215		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
216			(x ? UM_PROT_EXEC : 0));
217		if (hvc->force || pte_newpage(*pte)) {
218			if (pte_present(*pte))
219				ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
220					       PAGE_SIZE, prot, hvc);
221			else
 
222				ret = add_munmap(addr, PAGE_SIZE, hvc);
223		} else if (pte_newprot(*pte))
224			ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
225		*pte = pte_mkuptodate(*pte);
226	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
227	return ret;
228}
229
230static inline int update_pmd_range(pud_t *pud, unsigned long addr,
231				   unsigned long end,
232				   struct host_vm_change *hvc)
233{
234	pmd_t *pmd;
235	unsigned long next;
236	int ret = 0;
237
238	pmd = pmd_offset(pud, addr);
239	do {
240		next = pmd_addr_end(addr, end);
241		if (!pmd_present(*pmd)) {
242			if (hvc->force || pmd_newpage(*pmd)) {
243				ret = add_munmap(addr, next - addr, hvc);
244				pmd_mkuptodate(*pmd);
245			}
246		}
247		else ret = update_pte_range(pmd, addr, next, hvc);
248	} while (pmd++, addr = next, ((addr < end) && !ret));
249	return ret;
250}
251
252static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
253				   unsigned long end,
254				   struct host_vm_change *hvc)
255{
256	pud_t *pud;
257	unsigned long next;
258	int ret = 0;
259
260	pud = pud_offset(pgd, addr);
261	do {
262		next = pud_addr_end(addr, end);
263		if (!pud_present(*pud)) {
264			if (hvc->force || pud_newpage(*pud)) {
265				ret = add_munmap(addr, next - addr, hvc);
266				pud_mkuptodate(*pud);
267			}
268		}
269		else ret = update_pmd_range(pud, addr, next, hvc);
270	} while (pud++, addr = next, ((addr < end) && !ret));
271	return ret;
272}
273
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
275		      unsigned long end_addr, int force)
276{
277	pgd_t *pgd;
278	struct host_vm_change hvc;
279	unsigned long addr = start_addr, next;
280	int ret = 0;
281
282	hvc = INIT_HVC(mm, force);
283	pgd = pgd_offset(mm, addr);
284	do {
285		next = pgd_addr_end(addr, end_addr);
286		if (!pgd_present(*pgd)) {
287			if (force || pgd_newpage(*pgd)) {
288				ret = add_munmap(addr, next - addr, &hvc);
289				pgd_mkuptodate(*pgd);
290			}
291		}
292		else ret = update_pud_range(pgd, addr, next, &hvc);
293	} while (pgd++, addr = next, ((addr < end_addr) && !ret));
294
295	if (!ret)
296		ret = do_ops(&hvc, hvc.index, 1);
297
298	/* This is not an else because ret is modified above */
299	if (ret) {
 
 
300		printk(KERN_ERR "fix_range_common: failed, killing current "
301		       "process: %d\n", task_tgid_vnr(current));
302		/* We are under mmap_sem, release it such that current can terminate */
303		up_write(&current->mm->mmap_sem);
304		force_sig(SIGKILL, current);
305		do_signal(&current->thread.regs);
306	}
307}
308
309static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
310{
311	struct mm_struct *mm;
312	pgd_t *pgd;
 
313	pud_t *pud;
314	pmd_t *pmd;
315	pte_t *pte;
316	unsigned long addr, last;
317	int updated = 0, err;
 
318
319	mm = &init_mm;
 
320	for (addr = start; addr < end;) {
321		pgd = pgd_offset(mm, addr);
322		if (!pgd_present(*pgd)) {
323			last = ADD_ROUND(addr, PGDIR_SIZE);
324			if (last > end)
325				last = end;
326			if (pgd_newpage(*pgd)) {
327				updated = 1;
328				err = os_unmap_memory((void *) addr,
329						      last - addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330				if (err < 0)
331					panic("munmap failed, errno = %d\n",
332					      -err);
333			}
334			addr = last;
335			continue;
336		}
337
338		pud = pud_offset(pgd, addr);
339		if (!pud_present(*pud)) {
340			last = ADD_ROUND(addr, PUD_SIZE);
341			if (last > end)
342				last = end;
343			if (pud_newpage(*pud)) {
344				updated = 1;
345				err = os_unmap_memory((void *) addr,
346						      last - addr);
347				if (err < 0)
348					panic("munmap failed, errno = %d\n",
349					      -err);
350			}
351			addr = last;
352			continue;
353		}
354
355		pmd = pmd_offset(pud, addr);
356		if (!pmd_present(*pmd)) {
357			last = ADD_ROUND(addr, PMD_SIZE);
358			if (last > end)
359				last = end;
360			if (pmd_newpage(*pmd)) {
361				updated = 1;
362				err = os_unmap_memory((void *) addr,
363						      last - addr);
364				if (err < 0)
365					panic("munmap failed, errno = %d\n",
366					      -err);
367			}
368			addr = last;
369			continue;
370		}
371
372		pte = pte_offset_kernel(pmd, addr);
373		if (!pte_present(*pte) || pte_newpage(*pte)) {
374			updated = 1;
375			err = os_unmap_memory((void *) addr,
376					      PAGE_SIZE);
377			if (err < 0)
378				panic("munmap failed, errno = %d\n",
379				      -err);
380			if (pte_present(*pte))
381				map_memory(addr,
382					   pte_val(*pte) & PAGE_MASK,
383					   PAGE_SIZE, 1, 1, 1);
384		}
385		else if (pte_newprot(*pte)) {
386			updated = 1;
387			os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
388		}
389		addr += PAGE_SIZE;
390	}
 
 
 
 
 
391	return updated;
392}
393
394void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
395{
396	pgd_t *pgd;
 
397	pud_t *pud;
398	pmd_t *pmd;
399	pte_t *pte;
400	struct mm_struct *mm = vma->vm_mm;
401	void *flush = NULL;
402	int r, w, x, prot, err = 0;
403	struct mm_id *mm_id;
404
405	address &= PAGE_MASK;
 
406	pgd = pgd_offset(mm, address);
407	if (!pgd_present(*pgd))
408		goto kill;
409
410	pud = pud_offset(pgd, address);
 
 
 
 
411	if (!pud_present(*pud))
412		goto kill;
413
414	pmd = pmd_offset(pud, address);
415	if (!pmd_present(*pmd))
416		goto kill;
417
418	pte = pte_offset_kernel(pmd, address);
419
420	r = pte_read(*pte);
421	w = pte_write(*pte);
422	x = pte_exec(*pte);
423	if (!pte_young(*pte)) {
424		r = 0;
425		w = 0;
426	} else if (!pte_dirty(*pte)) {
427		w = 0;
428	}
429
430	mm_id = &mm->context.id;
431	prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
432		(x ? UM_PROT_EXEC : 0));
433	if (pte_newpage(*pte)) {
434		if (pte_present(*pte)) {
435			unsigned long long offset;
436			int fd;
437
438			fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
439			err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
440				  1, &flush);
441		}
442		else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
443	}
444	else if (pte_newprot(*pte))
445		err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
446
447	if (err) {
448		if (err == -ENOMEM)
449			report_enomem();
450
451		goto kill;
452	}
453
454	*pte = pte_mkuptodate(*pte);
455
456	return;
457
458kill:
459	printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
460	force_sig(SIGKILL, current);
461}
462
463pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
464{
465	return pgd_offset(mm, address);
466}
467
468pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
469{
470	return pud_offset(pgd, address);
471}
472
473pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
474{
475	return pmd_offset(pud, address);
476}
477
478pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
479{
480	return pte_offset_kernel(pmd, address);
481}
482
483pte_t *addr_pte(struct task_struct *task, unsigned long addr)
484{
485	pgd_t *pgd = pgd_offset(task->mm, addr);
486	pud_t *pud = pud_offset(pgd, addr);
487	pmd_t *pmd = pmd_offset(pud, addr);
488
489	return pte_offset_map(pmd, addr);
490}
491
492void flush_tlb_all(void)
493{
 
 
 
 
 
 
 
494	flush_tlb_mm(current->mm);
495}
496
497void flush_tlb_kernel_range(unsigned long start, unsigned long end)
498{
499	flush_tlb_kernel_range_common(start, end);
500}
501
502void flush_tlb_kernel_vm(void)
503{
504	flush_tlb_kernel_range_common(start_vm, end_vm);
505}
506
507void __flush_tlb_one(unsigned long addr)
508{
509	flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
510}
511
512static void fix_range(struct mm_struct *mm, unsigned long start_addr,
513		      unsigned long end_addr, int force)
514{
 
 
 
 
 
 
 
515	fix_range_common(mm, start_addr, end_addr, force);
516}
517
518void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
519		     unsigned long end)
520{
521	if (vma->vm_mm == NULL)
522		flush_tlb_kernel_range_common(start, end);
523	else fix_range(vma->vm_mm, start, end, 0);
524}
525EXPORT_SYMBOL(flush_tlb_range);
526
527void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
528			unsigned long end)
529{
530	/*
531	 * Don't bother flushing if this address space is about to be
532	 * destroyed.
533	 */
534	if (atomic_read(&mm->mm_users) == 0)
535		return;
536
537	fix_range(mm, start, end, 0);
538}
539
540void flush_tlb_mm(struct mm_struct *mm)
541{
542	struct vm_area_struct *vma = mm->mmap;
 
543
544	while (vma != NULL) {
545		fix_range(mm, vma->vm_start, vma->vm_end, 0);
546		vma = vma->vm_next;
547	}
548}
549
550void force_flush_all(void)
551{
552	struct mm_struct *mm = current->mm;
553	struct vm_area_struct *vma = mm->mmap;
 
554
555	while (vma != NULL) {
556		fix_range(mm, vma->vm_start, vma->vm_end, 1);
557		vma = vma->vm_next;
558	}
559}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/module.h>
  8#include <linux/sched/signal.h>
  9
 
 10#include <asm/tlbflush.h>
 11#include <as-layout.h>
 12#include <mem_user.h>
 13#include <os.h>
 14#include <skas.h>
 15#include <kern_util.h>
 16
 17struct host_vm_change {
 18	struct host_vm_op {
 19		enum { NONE, MMAP, MUNMAP, MPROTECT } type;
 20		union {
 21			struct {
 22				unsigned long addr;
 23				unsigned long len;
 24				unsigned int prot;
 25				int fd;
 26				__u64 offset;
 27			} mmap;
 28			struct {
 29				unsigned long addr;
 30				unsigned long len;
 31			} munmap;
 32			struct {
 33				unsigned long addr;
 34				unsigned long len;
 35				unsigned int prot;
 36			} mprotect;
 37		} u;
 38	} ops[1];
 39	int userspace;
 40	int index;
 41	struct mm_struct *mm;
 42	void *data;
 43	int force;
 44};
 45
 46#define INIT_HVC(mm, force, userspace) \
 47	((struct host_vm_change) \
 48	 { .ops		= { { .type = NONE } },	\
 49	   .mm		= mm, \
 50       	   .data	= NULL, \
 51	   .userspace	= userspace, \
 52	   .index	= 0, \
 53	   .force	= force })
 54
 55static void report_enomem(void)
 56{
 57	printk(KERN_ERR "UML ran out of memory on the host side! "
 58			"This can happen due to a memory limitation or "
 59			"vm.max_map_count has been reached.\n");
 60}
 61
 62static int do_ops(struct host_vm_change *hvc, int end,
 63		  int finished)
 64{
 65	struct host_vm_op *op;
 66	int i, ret = 0;
 67
 68	for (i = 0; i < end && !ret; i++) {
 69		op = &hvc->ops[i];
 70		switch (op->type) {
 71		case MMAP:
 72			if (hvc->userspace)
 73				ret = map(&hvc->mm->context.id, op->u.mmap.addr,
 74					  op->u.mmap.len, op->u.mmap.prot,
 75					  op->u.mmap.fd,
 76					  op->u.mmap.offset, finished,
 77					  &hvc->data);
 78			else
 79				map_memory(op->u.mmap.addr, op->u.mmap.offset,
 80					   op->u.mmap.len, 1, 1, 1);
 81			break;
 82		case MUNMAP:
 83			if (hvc->userspace)
 84				ret = unmap(&hvc->mm->context.id,
 85					    op->u.munmap.addr,
 86					    op->u.munmap.len, finished,
 87					    &hvc->data);
 88			else
 89				ret = os_unmap_memory(
 90					(void *) op->u.munmap.addr,
 91						      op->u.munmap.len);
 92
 93			break;
 94		case MPROTECT:
 95			if (hvc->userspace)
 96				ret = protect(&hvc->mm->context.id,
 97					      op->u.mprotect.addr,
 98					      op->u.mprotect.len,
 99					      op->u.mprotect.prot,
100					      finished, &hvc->data);
101			else
102				ret = os_protect_memory(
103					(void *) op->u.mprotect.addr,
104							op->u.mprotect.len,
105							1, 1, 1);
106			break;
107		default:
108			printk(KERN_ERR "Unknown op type %d in do_ops\n",
109			       op->type);
110			BUG();
111			break;
112		}
113	}
114
115	if (ret == -ENOMEM)
116		report_enomem();
117
118	return ret;
119}
120
121static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
122		    unsigned int prot, struct host_vm_change *hvc)
123{
124	__u64 offset;
125	struct host_vm_op *last;
126	int fd = -1, ret = 0;
127
128	if (hvc->userspace)
129		fd = phys_mapping(phys, &offset);
130	else
131		offset = phys;
132	if (hvc->index != 0) {
133		last = &hvc->ops[hvc->index - 1];
134		if ((last->type == MMAP) &&
135		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
136		   (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
137		   (last->u.mmap.offset + last->u.mmap.len == offset)) {
138			last->u.mmap.len += len;
139			return 0;
140		}
141	}
142
143	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
144		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
145		hvc->index = 0;
146	}
147
148	hvc->ops[hvc->index++] = ((struct host_vm_op)
149				  { .type	= MMAP,
150				    .u = { .mmap = { .addr	= virt,
151						     .len	= len,
152						     .prot	= prot,
153						     .fd	= fd,
154						     .offset	= offset }
155			   } });
156	return ret;
157}
158
159static int add_munmap(unsigned long addr, unsigned long len,
160		      struct host_vm_change *hvc)
161{
162	struct host_vm_op *last;
163	int ret = 0;
164
 
 
 
165	if (hvc->index != 0) {
166		last = &hvc->ops[hvc->index - 1];
167		if ((last->type == MUNMAP) &&
168		   (last->u.munmap.addr + last->u.mmap.len == addr)) {
169			last->u.munmap.len += len;
170			return 0;
171		}
172	}
173
174	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
175		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
176		hvc->index = 0;
177	}
178
179	hvc->ops[hvc->index++] = ((struct host_vm_op)
180				  { .type	= MUNMAP,
181			     	    .u = { .munmap = { .addr	= addr,
182						       .len	= len } } });
183	return ret;
184}
185
186static int add_mprotect(unsigned long addr, unsigned long len,
187			unsigned int prot, struct host_vm_change *hvc)
188{
189	struct host_vm_op *last;
190	int ret = 0;
191
192	if (hvc->index != 0) {
193		last = &hvc->ops[hvc->index - 1];
194		if ((last->type == MPROTECT) &&
195		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
196		   (last->u.mprotect.prot == prot)) {
197			last->u.mprotect.len += len;
198			return 0;
199		}
200	}
201
202	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
203		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
204		hvc->index = 0;
205	}
206
207	hvc->ops[hvc->index++] = ((struct host_vm_op)
208				  { .type	= MPROTECT,
209			     	    .u = { .mprotect = { .addr	= addr,
210							 .len	= len,
211							 .prot	= prot } } });
212	return ret;
213}
214
215#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
216
217static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
218				   unsigned long end,
219				   struct host_vm_change *hvc)
220{
221	pte_t *pte;
222	int r, w, x, prot, ret = 0;
223
224	pte = pte_offset_kernel(pmd, addr);
225	do {
 
 
 
226		r = pte_read(*pte);
227		w = pte_write(*pte);
228		x = pte_exec(*pte);
229		if (!pte_young(*pte)) {
230			r = 0;
231			w = 0;
232		} else if (!pte_dirty(*pte))
233			w = 0;
234
235		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
236			(x ? UM_PROT_EXEC : 0));
237		if (hvc->force || pte_newpage(*pte)) {
238			if (pte_present(*pte)) {
239				if (pte_newpage(*pte))
240					ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
241						       PAGE_SIZE, prot, hvc);
242			} else
243				ret = add_munmap(addr, PAGE_SIZE, hvc);
244		} else if (pte_newprot(*pte))
245			ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
246		*pte = pte_mkuptodate(*pte);
247	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
248	return ret;
249}
250
251static inline int update_pmd_range(pud_t *pud, unsigned long addr,
252				   unsigned long end,
253				   struct host_vm_change *hvc)
254{
255	pmd_t *pmd;
256	unsigned long next;
257	int ret = 0;
258
259	pmd = pmd_offset(pud, addr);
260	do {
261		next = pmd_addr_end(addr, end);
262		if (!pmd_present(*pmd)) {
263			if (hvc->force || pmd_newpage(*pmd)) {
264				ret = add_munmap(addr, next - addr, hvc);
265				pmd_mkuptodate(*pmd);
266			}
267		}
268		else ret = update_pte_range(pmd, addr, next, hvc);
269	} while (pmd++, addr = next, ((addr < end) && !ret));
270	return ret;
271}
272
273static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
274				   unsigned long end,
275				   struct host_vm_change *hvc)
276{
277	pud_t *pud;
278	unsigned long next;
279	int ret = 0;
280
281	pud = pud_offset(p4d, addr);
282	do {
283		next = pud_addr_end(addr, end);
284		if (!pud_present(*pud)) {
285			if (hvc->force || pud_newpage(*pud)) {
286				ret = add_munmap(addr, next - addr, hvc);
287				pud_mkuptodate(*pud);
288			}
289		}
290		else ret = update_pmd_range(pud, addr, next, hvc);
291	} while (pud++, addr = next, ((addr < end) && !ret));
292	return ret;
293}
294
295static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
296				   unsigned long end,
297				   struct host_vm_change *hvc)
298{
299	p4d_t *p4d;
300	unsigned long next;
301	int ret = 0;
302
303	p4d = p4d_offset(pgd, addr);
304	do {
305		next = p4d_addr_end(addr, end);
306		if (!p4d_present(*p4d)) {
307			if (hvc->force || p4d_newpage(*p4d)) {
308				ret = add_munmap(addr, next - addr, hvc);
309				p4d_mkuptodate(*p4d);
310			}
311		} else
312			ret = update_pud_range(p4d, addr, next, hvc);
313	} while (p4d++, addr = next, ((addr < end) && !ret));
314	return ret;
315}
316
317void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
318		      unsigned long end_addr, int force)
319{
320	pgd_t *pgd;
321	struct host_vm_change hvc;
322	unsigned long addr = start_addr, next;
323	int ret = 0, userspace = 1;
324
325	hvc = INIT_HVC(mm, force, userspace);
326	pgd = pgd_offset(mm, addr);
327	do {
328		next = pgd_addr_end(addr, end_addr);
329		if (!pgd_present(*pgd)) {
330			if (force || pgd_newpage(*pgd)) {
331				ret = add_munmap(addr, next - addr, &hvc);
332				pgd_mkuptodate(*pgd);
333			}
334		} else
335			ret = update_p4d_range(pgd, addr, next, &hvc);
336	} while (pgd++, addr = next, ((addr < end_addr) && !ret));
337
338	if (!ret)
339		ret = do_ops(&hvc, hvc.index, 1);
340
341	/* This is not an else because ret is modified above */
342	if (ret) {
343		struct mm_id *mm_idp = &current->mm->context.id;
344
345		printk(KERN_ERR "fix_range_common: failed, killing current "
346		       "process: %d\n", task_tgid_vnr(current));
347		mm_idp->kill = 1;
 
 
 
348	}
349}
350
351static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
352{
353	struct mm_struct *mm;
354	pgd_t *pgd;
355	p4d_t *p4d;
356	pud_t *pud;
357	pmd_t *pmd;
358	pte_t *pte;
359	unsigned long addr, last;
360	int updated = 0, err = 0, force = 0, userspace = 0;
361	struct host_vm_change hvc;
362
363	mm = &init_mm;
364	hvc = INIT_HVC(mm, force, userspace);
365	for (addr = start; addr < end;) {
366		pgd = pgd_offset(mm, addr);
367		if (!pgd_present(*pgd)) {
368			last = ADD_ROUND(addr, PGDIR_SIZE);
369			if (last > end)
370				last = end;
371			if (pgd_newpage(*pgd)) {
372				updated = 1;
373				err = add_munmap(addr, last - addr, &hvc);
374				if (err < 0)
375					panic("munmap failed, errno = %d\n",
376					      -err);
377			}
378			addr = last;
379			continue;
380		}
381
382		p4d = p4d_offset(pgd, addr);
383		if (!p4d_present(*p4d)) {
384			last = ADD_ROUND(addr, P4D_SIZE);
385			if (last > end)
386				last = end;
387			if (p4d_newpage(*p4d)) {
388				updated = 1;
389				err = add_munmap(addr, last - addr, &hvc);
390				if (err < 0)
391					panic("munmap failed, errno = %d\n",
392					      -err);
393			}
394			addr = last;
395			continue;
396		}
397
398		pud = pud_offset(p4d, addr);
399		if (!pud_present(*pud)) {
400			last = ADD_ROUND(addr, PUD_SIZE);
401			if (last > end)
402				last = end;
403			if (pud_newpage(*pud)) {
404				updated = 1;
405				err = add_munmap(addr, last - addr, &hvc);
 
406				if (err < 0)
407					panic("munmap failed, errno = %d\n",
408					      -err);
409			}
410			addr = last;
411			continue;
412		}
413
414		pmd = pmd_offset(pud, addr);
415		if (!pmd_present(*pmd)) {
416			last = ADD_ROUND(addr, PMD_SIZE);
417			if (last > end)
418				last = end;
419			if (pmd_newpage(*pmd)) {
420				updated = 1;
421				err = add_munmap(addr, last - addr, &hvc);
 
422				if (err < 0)
423					panic("munmap failed, errno = %d\n",
424					      -err);
425			}
426			addr = last;
427			continue;
428		}
429
430		pte = pte_offset_kernel(pmd, addr);
431		if (!pte_present(*pte) || pte_newpage(*pte)) {
432			updated = 1;
433			err = add_munmap(addr, PAGE_SIZE, &hvc);
 
434			if (err < 0)
435				panic("munmap failed, errno = %d\n",
436				      -err);
437			if (pte_present(*pte))
438				err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
439					       PAGE_SIZE, 0, &hvc);
 
440		}
441		else if (pte_newprot(*pte)) {
442			updated = 1;
443			err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
444		}
445		addr += PAGE_SIZE;
446	}
447	if (!err)
448		err = do_ops(&hvc, hvc.index, 1);
449
450	if (err < 0)
451		panic("flush_tlb_kernel failed, errno = %d\n", err);
452	return updated;
453}
454
455void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
456{
457	pgd_t *pgd;
458	p4d_t *p4d;
459	pud_t *pud;
460	pmd_t *pmd;
461	pte_t *pte;
462	struct mm_struct *mm = vma->vm_mm;
463	void *flush = NULL;
464	int r, w, x, prot, err = 0;
465	struct mm_id *mm_id;
466
467	address &= PAGE_MASK;
468
469	pgd = pgd_offset(mm, address);
470	if (!pgd_present(*pgd))
471		goto kill;
472
473	p4d = p4d_offset(pgd, address);
474	if (!p4d_present(*p4d))
475		goto kill;
476
477	pud = pud_offset(p4d, address);
478	if (!pud_present(*pud))
479		goto kill;
480
481	pmd = pmd_offset(pud, address);
482	if (!pmd_present(*pmd))
483		goto kill;
484
485	pte = pte_offset_kernel(pmd, address);
486
487	r = pte_read(*pte);
488	w = pte_write(*pte);
489	x = pte_exec(*pte);
490	if (!pte_young(*pte)) {
491		r = 0;
492		w = 0;
493	} else if (!pte_dirty(*pte)) {
494		w = 0;
495	}
496
497	mm_id = &mm->context.id;
498	prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
499		(x ? UM_PROT_EXEC : 0));
500	if (pte_newpage(*pte)) {
501		if (pte_present(*pte)) {
502			unsigned long long offset;
503			int fd;
504
505			fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
506			err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
507				  1, &flush);
508		}
509		else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
510	}
511	else if (pte_newprot(*pte))
512		err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
513
514	if (err) {
515		if (err == -ENOMEM)
516			report_enomem();
517
518		goto kill;
519	}
520
521	*pte = pte_mkuptodate(*pte);
522
523	return;
524
525kill:
526	printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
527	force_sig(SIGKILL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
528}
529
530void flush_tlb_all(void)
531{
532	/*
533	 * Don't bother flushing if this address space is about to be
534	 * destroyed.
535	 */
536	if (atomic_read(&current->mm->mm_users) == 0)
537		return;
538
539	flush_tlb_mm(current->mm);
540}
541
542void flush_tlb_kernel_range(unsigned long start, unsigned long end)
543{
544	flush_tlb_kernel_range_common(start, end);
545}
546
547void flush_tlb_kernel_vm(void)
548{
549	flush_tlb_kernel_range_common(start_vm, end_vm);
550}
551
552void __flush_tlb_one(unsigned long addr)
553{
554	flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
555}
556
557static void fix_range(struct mm_struct *mm, unsigned long start_addr,
558		      unsigned long end_addr, int force)
559{
560	/*
561	 * Don't bother flushing if this address space is about to be
562	 * destroyed.
563	 */
564	if (atomic_read(&mm->mm_users) == 0)
565		return;
566
567	fix_range_common(mm, start_addr, end_addr, force);
568}
569
570void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
571		     unsigned long end)
572{
573	if (vma->vm_mm == NULL)
574		flush_tlb_kernel_range_common(start, end);
575	else fix_range(vma->vm_mm, start, end, 0);
576}
577EXPORT_SYMBOL(flush_tlb_range);
578
579void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
580			unsigned long end)
581{
 
 
 
 
 
 
 
582	fix_range(mm, start, end, 0);
583}
584
585void flush_tlb_mm(struct mm_struct *mm)
586{
587	struct vm_area_struct *vma;
588	VMA_ITERATOR(vmi, mm, 0);
589
590	for_each_vma(vmi, vma)
591		fix_range(mm, vma->vm_start, vma->vm_end, 0);
 
 
592}
593
594void force_flush_all(void)
595{
596	struct mm_struct *mm = current->mm;
597	struct vm_area_struct *vma;
598	VMA_ITERATOR(vmi, mm, 0);
599
600	for_each_vma(vmi, vma)
601		fix_range(mm, vma->vm_start, vma->vm_end, 1);
 
 
602}