Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Licensed under the GPL
  4 */
  5
  6#include <linux/mm.h>
 
  7#include <linux/sched.h>
  8#include <asm/pgtable.h>
  9#include <asm/tlbflush.h>
 10#include "as-layout.h"
 11#include "mem_user.h"
 12#include "os.h"
 13#include "skas.h"
 14#include "tlb.h"
 15
 16struct host_vm_change {
 17	struct host_vm_op {
 18		enum { NONE, MMAP, MUNMAP, MPROTECT } type;
 19		union {
 20			struct {
 21				unsigned long addr;
 22				unsigned long len;
 23				unsigned int prot;
 24				int fd;
 25				__u64 offset;
 26			} mmap;
 27			struct {
 28				unsigned long addr;
 29				unsigned long len;
 30			} munmap;
 31			struct {
 32				unsigned long addr;
 33				unsigned long len;
 34				unsigned int prot;
 35			} mprotect;
 36		} u;
 37	} ops[1];
 38	int index;
 39	struct mm_id *id;
 40	void *data;
 41	int force;
 42};
 43
 44#define INIT_HVC(mm, force) \
 45	((struct host_vm_change) \
 46	 { .ops		= { { .type = NONE } },	\
 47	   .id		= &mm->context.id, \
 48       	   .data	= NULL, \
 49	   .index	= 0, \
 50	   .force	= force })
 51
 
 
 
 
 
 
 
 52static int do_ops(struct host_vm_change *hvc, int end,
 53		  int finished)
 54{
 55	struct host_vm_op *op;
 56	int i, ret = 0;
 57
 58	for (i = 0; i < end && !ret; i++) {
 59		op = &hvc->ops[i];
 60		switch (op->type) {
 61		case MMAP:
 62			ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
 63				  op->u.mmap.prot, op->u.mmap.fd,
 64				  op->u.mmap.offset, finished, &hvc->data);
 65			break;
 66		case MUNMAP:
 67			ret = unmap(hvc->id, op->u.munmap.addr,
 68				    op->u.munmap.len, finished, &hvc->data);
 69			break;
 70		case MPROTECT:
 71			ret = protect(hvc->id, op->u.mprotect.addr,
 72				      op->u.mprotect.len, op->u.mprotect.prot,
 73				      finished, &hvc->data);
 74			break;
 75		default:
 76			printk(KERN_ERR "Unknown op type %d in do_ops\n",
 77			       op->type);
 
 78			break;
 79		}
 80	}
 81
 
 
 
 82	return ret;
 83}
 84
 85static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
 86		    unsigned int prot, struct host_vm_change *hvc)
 87{
 88	__u64 offset;
 89	struct host_vm_op *last;
 90	int fd, ret = 0;
 91
 92	fd = phys_mapping(phys, &offset);
 93	if (hvc->index != 0) {
 94		last = &hvc->ops[hvc->index - 1];
 95		if ((last->type == MMAP) &&
 96		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
 97		   (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
 98		   (last->u.mmap.offset + last->u.mmap.len == offset)) {
 99			last->u.mmap.len += len;
100			return 0;
101		}
102	}
103
104	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
105		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
106		hvc->index = 0;
107	}
108
109	hvc->ops[hvc->index++] = ((struct host_vm_op)
110				  { .type	= MMAP,
111				    .u = { .mmap = { .addr	= virt,
112						     .len	= len,
113						     .prot	= prot,
114						     .fd	= fd,
115						     .offset	= offset }
116			   } });
117	return ret;
118}
119
120static int add_munmap(unsigned long addr, unsigned long len,
121		      struct host_vm_change *hvc)
122{
123	struct host_vm_op *last;
124	int ret = 0;
125
 
 
 
126	if (hvc->index != 0) {
127		last = &hvc->ops[hvc->index - 1];
128		if ((last->type == MUNMAP) &&
129		   (last->u.munmap.addr + last->u.mmap.len == addr)) {
130			last->u.munmap.len += len;
131			return 0;
132		}
133	}
134
135	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
136		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
137		hvc->index = 0;
138	}
139
140	hvc->ops[hvc->index++] = ((struct host_vm_op)
141				  { .type	= MUNMAP,
142			     	    .u = { .munmap = { .addr	= addr,
143						       .len	= len } } });
144	return ret;
145}
146
147static int add_mprotect(unsigned long addr, unsigned long len,
148			unsigned int prot, struct host_vm_change *hvc)
149{
150	struct host_vm_op *last;
151	int ret = 0;
152
153	if (hvc->index != 0) {
154		last = &hvc->ops[hvc->index - 1];
155		if ((last->type == MPROTECT) &&
156		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
157		   (last->u.mprotect.prot == prot)) {
158			last->u.mprotect.len += len;
159			return 0;
160		}
161	}
162
163	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
164		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
165		hvc->index = 0;
166	}
167
168	hvc->ops[hvc->index++] = ((struct host_vm_op)
169				  { .type	= MPROTECT,
170			     	    .u = { .mprotect = { .addr	= addr,
171							 .len	= len,
172							 .prot	= prot } } });
173	return ret;
174}
175
176#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
177
178static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
179				   unsigned long end,
180				   struct host_vm_change *hvc)
181{
182	pte_t *pte;
183	int r, w, x, prot, ret = 0;
184
185	pte = pte_offset_kernel(pmd, addr);
186	do {
187		if ((addr >= STUB_START) && (addr < STUB_END))
188			continue;
189
190		r = pte_read(*pte);
191		w = pte_write(*pte);
192		x = pte_exec(*pte);
193		if (!pte_young(*pte)) {
194			r = 0;
195			w = 0;
196		} else if (!pte_dirty(*pte))
197			w = 0;
198
199		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
200			(x ? UM_PROT_EXEC : 0));
201		if (hvc->force || pte_newpage(*pte)) {
202			if (pte_present(*pte))
203				ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
204					       PAGE_SIZE, prot, hvc);
205			else
206				ret = add_munmap(addr, PAGE_SIZE, hvc);
207		} else if (pte_newprot(*pte))
208			ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
209		*pte = pte_mkuptodate(*pte);
210	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
211	return ret;
212}
213
214static inline int update_pmd_range(pud_t *pud, unsigned long addr,
215				   unsigned long end,
216				   struct host_vm_change *hvc)
217{
218	pmd_t *pmd;
219	unsigned long next;
220	int ret = 0;
221
222	pmd = pmd_offset(pud, addr);
223	do {
224		next = pmd_addr_end(addr, end);
225		if (!pmd_present(*pmd)) {
226			if (hvc->force || pmd_newpage(*pmd)) {
227				ret = add_munmap(addr, next - addr, hvc);
228				pmd_mkuptodate(*pmd);
229			}
230		}
231		else ret = update_pte_range(pmd, addr, next, hvc);
232	} while (pmd++, addr = next, ((addr < end) && !ret));
233	return ret;
234}
235
236static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
237				   unsigned long end,
238				   struct host_vm_change *hvc)
239{
240	pud_t *pud;
241	unsigned long next;
242	int ret = 0;
243
244	pud = pud_offset(pgd, addr);
245	do {
246		next = pud_addr_end(addr, end);
247		if (!pud_present(*pud)) {
248			if (hvc->force || pud_newpage(*pud)) {
249				ret = add_munmap(addr, next - addr, hvc);
250				pud_mkuptodate(*pud);
251			}
252		}
253		else ret = update_pmd_range(pud, addr, next, hvc);
254	} while (pud++, addr = next, ((addr < end) && !ret));
255	return ret;
256}
257
258void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
259		      unsigned long end_addr, int force)
260{
261	pgd_t *pgd;
262	struct host_vm_change hvc;
263	unsigned long addr = start_addr, next;
264	int ret = 0;
265
266	hvc = INIT_HVC(mm, force);
267	pgd = pgd_offset(mm, addr);
268	do {
269		next = pgd_addr_end(addr, end_addr);
270		if (!pgd_present(*pgd)) {
271			if (force || pgd_newpage(*pgd)) {
272				ret = add_munmap(addr, next - addr, &hvc);
273				pgd_mkuptodate(*pgd);
274			}
275		}
276		else ret = update_pud_range(pgd, addr, next, &hvc);
277	} while (pgd++, addr = next, ((addr < end_addr) && !ret));
278
279	if (!ret)
280		ret = do_ops(&hvc, hvc.index, 1);
281
282	/* This is not an else because ret is modified above */
283	if (ret) {
284		printk(KERN_ERR "fix_range_common: failed, killing current "
285		       "process\n");
 
 
286		force_sig(SIGKILL, current);
 
287	}
288}
289
290int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
291{
292	struct mm_struct *mm;
293	pgd_t *pgd;
294	pud_t *pud;
295	pmd_t *pmd;
296	pte_t *pte;
297	unsigned long addr, last;
298	int updated = 0, err;
299
300	mm = &init_mm;
301	for (addr = start; addr < end;) {
302		pgd = pgd_offset(mm, addr);
303		if (!pgd_present(*pgd)) {
304			last = ADD_ROUND(addr, PGDIR_SIZE);
305			if (last > end)
306				last = end;
307			if (pgd_newpage(*pgd)) {
308				updated = 1;
309				err = os_unmap_memory((void *) addr,
310						      last - addr);
311				if (err < 0)
312					panic("munmap failed, errno = %d\n",
313					      -err);
314			}
315			addr = last;
316			continue;
317		}
318
319		pud = pud_offset(pgd, addr);
320		if (!pud_present(*pud)) {
321			last = ADD_ROUND(addr, PUD_SIZE);
322			if (last > end)
323				last = end;
324			if (pud_newpage(*pud)) {
325				updated = 1;
326				err = os_unmap_memory((void *) addr,
327						      last - addr);
328				if (err < 0)
329					panic("munmap failed, errno = %d\n",
330					      -err);
331			}
332			addr = last;
333			continue;
334		}
335
336		pmd = pmd_offset(pud, addr);
337		if (!pmd_present(*pmd)) {
338			last = ADD_ROUND(addr, PMD_SIZE);
339			if (last > end)
340				last = end;
341			if (pmd_newpage(*pmd)) {
342				updated = 1;
343				err = os_unmap_memory((void *) addr,
344						      last - addr);
345				if (err < 0)
346					panic("munmap failed, errno = %d\n",
347					      -err);
348			}
349			addr = last;
350			continue;
351		}
352
353		pte = pte_offset_kernel(pmd, addr);
354		if (!pte_present(*pte) || pte_newpage(*pte)) {
355			updated = 1;
356			err = os_unmap_memory((void *) addr,
357					      PAGE_SIZE);
358			if (err < 0)
359				panic("munmap failed, errno = %d\n",
360				      -err);
361			if (pte_present(*pte))
362				map_memory(addr,
363					   pte_val(*pte) & PAGE_MASK,
364					   PAGE_SIZE, 1, 1, 1);
365		}
366		else if (pte_newprot(*pte)) {
367			updated = 1;
368			os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
369		}
370		addr += PAGE_SIZE;
371	}
372	return updated;
373}
374
375void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
376{
377	pgd_t *pgd;
378	pud_t *pud;
379	pmd_t *pmd;
380	pte_t *pte;
381	struct mm_struct *mm = vma->vm_mm;
382	void *flush = NULL;
383	int r, w, x, prot, err = 0;
384	struct mm_id *mm_id;
385
386	address &= PAGE_MASK;
387	pgd = pgd_offset(mm, address);
388	if (!pgd_present(*pgd))
389		goto kill;
390
391	pud = pud_offset(pgd, address);
392	if (!pud_present(*pud))
393		goto kill;
394
395	pmd = pmd_offset(pud, address);
396	if (!pmd_present(*pmd))
397		goto kill;
398
399	pte = pte_offset_kernel(pmd, address);
400
401	r = pte_read(*pte);
402	w = pte_write(*pte);
403	x = pte_exec(*pte);
404	if (!pte_young(*pte)) {
405		r = 0;
406		w = 0;
407	} else if (!pte_dirty(*pte)) {
408		w = 0;
409	}
410
411	mm_id = &mm->context.id;
412	prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
413		(x ? UM_PROT_EXEC : 0));
414	if (pte_newpage(*pte)) {
415		if (pte_present(*pte)) {
416			unsigned long long offset;
417			int fd;
418
419			fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
420			err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
421				  1, &flush);
422		}
423		else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
424	}
425	else if (pte_newprot(*pte))
426		err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
427
428	if (err)
 
 
 
429		goto kill;
 
430
431	*pte = pte_mkuptodate(*pte);
432
433	return;
434
435kill:
436	printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
437	force_sig(SIGKILL, current);
438}
439
440pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
441{
442	return pgd_offset(mm, address);
443}
444
445pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
446{
447	return pud_offset(pgd, address);
448}
449
450pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
451{
452	return pmd_offset(pud, address);
453}
454
455pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
456{
457	return pte_offset_kernel(pmd, address);
458}
459
460pte_t *addr_pte(struct task_struct *task, unsigned long addr)
461{
462	pgd_t *pgd = pgd_offset(task->mm, addr);
463	pud_t *pud = pud_offset(pgd, addr);
464	pmd_t *pmd = pmd_offset(pud, addr);
465
466	return pte_offset_map(pmd, addr);
467}
468
469void flush_tlb_all(void)
470{
471	flush_tlb_mm(current->mm);
472}
473
474void flush_tlb_kernel_range(unsigned long start, unsigned long end)
475{
476	flush_tlb_kernel_range_common(start, end);
477}
478
479void flush_tlb_kernel_vm(void)
480{
481	flush_tlb_kernel_range_common(start_vm, end_vm);
482}
483
484void __flush_tlb_one(unsigned long addr)
485{
486	flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
487}
488
489static void fix_range(struct mm_struct *mm, unsigned long start_addr,
490		      unsigned long end_addr, int force)
491{
492	fix_range_common(mm, start_addr, end_addr, force);
493}
494
495void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
496		     unsigned long end)
497{
498	if (vma->vm_mm == NULL)
499		flush_tlb_kernel_range_common(start, end);
500	else fix_range(vma->vm_mm, start, end, 0);
501}
 
502
503void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
504			unsigned long end)
505{
506	/*
507	 * Don't bother flushing if this address space is about to be
508	 * destroyed.
509	 */
510	if (atomic_read(&mm->mm_users) == 0)
511		return;
512
513	fix_range(mm, start, end, 0);
514}
515
516void flush_tlb_mm(struct mm_struct *mm)
517{
518	struct vm_area_struct *vma = mm->mmap;
519
520	while (vma != NULL) {
521		fix_range(mm, vma->vm_start, vma->vm_end, 0);
522		vma = vma->vm_next;
523	}
524}
525
526void force_flush_all(void)
527{
528	struct mm_struct *mm = current->mm;
529	struct vm_area_struct *vma = mm->mmap;
530
531	while (vma != NULL) {
532		fix_range(mm, vma->vm_start, vma->vm_end, 1);
533		vma = vma->vm_next;
534	}
535}
v4.6
  1/*
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Licensed under the GPL
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/module.h>
  8#include <linux/sched.h>
  9#include <asm/pgtable.h>
 10#include <asm/tlbflush.h>
 11#include <as-layout.h>
 12#include <mem_user.h>
 13#include <os.h>
 14#include <skas.h>
 15#include <kern_util.h>
 16
 17struct host_vm_change {
 18	struct host_vm_op {
 19		enum { NONE, MMAP, MUNMAP, MPROTECT } type;
 20		union {
 21			struct {
 22				unsigned long addr;
 23				unsigned long len;
 24				unsigned int prot;
 25				int fd;
 26				__u64 offset;
 27			} mmap;
 28			struct {
 29				unsigned long addr;
 30				unsigned long len;
 31			} munmap;
 32			struct {
 33				unsigned long addr;
 34				unsigned long len;
 35				unsigned int prot;
 36			} mprotect;
 37		} u;
 38	} ops[1];
 39	int index;
 40	struct mm_id *id;
 41	void *data;
 42	int force;
 43};
 44
 45#define INIT_HVC(mm, force) \
 46	((struct host_vm_change) \
 47	 { .ops		= { { .type = NONE } },	\
 48	   .id		= &mm->context.id, \
 49       	   .data	= NULL, \
 50	   .index	= 0, \
 51	   .force	= force })
 52
 53static void report_enomem(void)
 54{
 55	printk(KERN_ERR "UML ran out of memory on the host side! "
 56			"This can happen due to a memory limitation or "
 57			"vm.max_map_count has been reached.\n");
 58}
 59
 60static int do_ops(struct host_vm_change *hvc, int end,
 61		  int finished)
 62{
 63	struct host_vm_op *op;
 64	int i, ret = 0;
 65
 66	for (i = 0; i < end && !ret; i++) {
 67		op = &hvc->ops[i];
 68		switch (op->type) {
 69		case MMAP:
 70			ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
 71				  op->u.mmap.prot, op->u.mmap.fd,
 72				  op->u.mmap.offset, finished, &hvc->data);
 73			break;
 74		case MUNMAP:
 75			ret = unmap(hvc->id, op->u.munmap.addr,
 76				    op->u.munmap.len, finished, &hvc->data);
 77			break;
 78		case MPROTECT:
 79			ret = protect(hvc->id, op->u.mprotect.addr,
 80				      op->u.mprotect.len, op->u.mprotect.prot,
 81				      finished, &hvc->data);
 82			break;
 83		default:
 84			printk(KERN_ERR "Unknown op type %d in do_ops\n",
 85			       op->type);
 86			BUG();
 87			break;
 88		}
 89	}
 90
 91	if (ret == -ENOMEM)
 92		report_enomem();
 93
 94	return ret;
 95}
 96
 97static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
 98		    unsigned int prot, struct host_vm_change *hvc)
 99{
100	__u64 offset;
101	struct host_vm_op *last;
102	int fd, ret = 0;
103
104	fd = phys_mapping(phys, &offset);
105	if (hvc->index != 0) {
106		last = &hvc->ops[hvc->index - 1];
107		if ((last->type == MMAP) &&
108		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
109		   (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
110		   (last->u.mmap.offset + last->u.mmap.len == offset)) {
111			last->u.mmap.len += len;
112			return 0;
113		}
114	}
115
116	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
117		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
118		hvc->index = 0;
119	}
120
121	hvc->ops[hvc->index++] = ((struct host_vm_op)
122				  { .type	= MMAP,
123				    .u = { .mmap = { .addr	= virt,
124						     .len	= len,
125						     .prot	= prot,
126						     .fd	= fd,
127						     .offset	= offset }
128			   } });
129	return ret;
130}
131
132static int add_munmap(unsigned long addr, unsigned long len,
133		      struct host_vm_change *hvc)
134{
135	struct host_vm_op *last;
136	int ret = 0;
137
138	if ((addr >= STUB_START) && (addr < STUB_END))
139		return -EINVAL;
140
141	if (hvc->index != 0) {
142		last = &hvc->ops[hvc->index - 1];
143		if ((last->type == MUNMAP) &&
144		   (last->u.munmap.addr + last->u.mmap.len == addr)) {
145			last->u.munmap.len += len;
146			return 0;
147		}
148	}
149
150	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
151		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
152		hvc->index = 0;
153	}
154
155	hvc->ops[hvc->index++] = ((struct host_vm_op)
156				  { .type	= MUNMAP,
157			     	    .u = { .munmap = { .addr	= addr,
158						       .len	= len } } });
159	return ret;
160}
161
162static int add_mprotect(unsigned long addr, unsigned long len,
163			unsigned int prot, struct host_vm_change *hvc)
164{
165	struct host_vm_op *last;
166	int ret = 0;
167
168	if (hvc->index != 0) {
169		last = &hvc->ops[hvc->index - 1];
170		if ((last->type == MPROTECT) &&
171		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
172		   (last->u.mprotect.prot == prot)) {
173			last->u.mprotect.len += len;
174			return 0;
175		}
176	}
177
178	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
179		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
180		hvc->index = 0;
181	}
182
183	hvc->ops[hvc->index++] = ((struct host_vm_op)
184				  { .type	= MPROTECT,
185			     	    .u = { .mprotect = { .addr	= addr,
186							 .len	= len,
187							 .prot	= prot } } });
188	return ret;
189}
190
191#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
192
193static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
194				   unsigned long end,
195				   struct host_vm_change *hvc)
196{
197	pte_t *pte;
198	int r, w, x, prot, ret = 0;
199
200	pte = pte_offset_kernel(pmd, addr);
201	do {
202		if ((addr >= STUB_START) && (addr < STUB_END))
203			continue;
204
205		r = pte_read(*pte);
206		w = pte_write(*pte);
207		x = pte_exec(*pte);
208		if (!pte_young(*pte)) {
209			r = 0;
210			w = 0;
211		} else if (!pte_dirty(*pte))
212			w = 0;
213
214		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
215			(x ? UM_PROT_EXEC : 0));
216		if (hvc->force || pte_newpage(*pte)) {
217			if (pte_present(*pte))
218				ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
219					       PAGE_SIZE, prot, hvc);
220			else
221				ret = add_munmap(addr, PAGE_SIZE, hvc);
222		} else if (pte_newprot(*pte))
223			ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
224		*pte = pte_mkuptodate(*pte);
225	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
226	return ret;
227}
228
229static inline int update_pmd_range(pud_t *pud, unsigned long addr,
230				   unsigned long end,
231				   struct host_vm_change *hvc)
232{
233	pmd_t *pmd;
234	unsigned long next;
235	int ret = 0;
236
237	pmd = pmd_offset(pud, addr);
238	do {
239		next = pmd_addr_end(addr, end);
240		if (!pmd_present(*pmd)) {
241			if (hvc->force || pmd_newpage(*pmd)) {
242				ret = add_munmap(addr, next - addr, hvc);
243				pmd_mkuptodate(*pmd);
244			}
245		}
246		else ret = update_pte_range(pmd, addr, next, hvc);
247	} while (pmd++, addr = next, ((addr < end) && !ret));
248	return ret;
249}
250
251static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
252				   unsigned long end,
253				   struct host_vm_change *hvc)
254{
255	pud_t *pud;
256	unsigned long next;
257	int ret = 0;
258
259	pud = pud_offset(pgd, addr);
260	do {
261		next = pud_addr_end(addr, end);
262		if (!pud_present(*pud)) {
263			if (hvc->force || pud_newpage(*pud)) {
264				ret = add_munmap(addr, next - addr, hvc);
265				pud_mkuptodate(*pud);
266			}
267		}
268		else ret = update_pmd_range(pud, addr, next, hvc);
269	} while (pud++, addr = next, ((addr < end) && !ret));
270	return ret;
271}
272
273void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
274		      unsigned long end_addr, int force)
275{
276	pgd_t *pgd;
277	struct host_vm_change hvc;
278	unsigned long addr = start_addr, next;
279	int ret = 0;
280
281	hvc = INIT_HVC(mm, force);
282	pgd = pgd_offset(mm, addr);
283	do {
284		next = pgd_addr_end(addr, end_addr);
285		if (!pgd_present(*pgd)) {
286			if (force || pgd_newpage(*pgd)) {
287				ret = add_munmap(addr, next - addr, &hvc);
288				pgd_mkuptodate(*pgd);
289			}
290		}
291		else ret = update_pud_range(pgd, addr, next, &hvc);
292	} while (pgd++, addr = next, ((addr < end_addr) && !ret));
293
294	if (!ret)
295		ret = do_ops(&hvc, hvc.index, 1);
296
297	/* This is not an else because ret is modified above */
298	if (ret) {
299		printk(KERN_ERR "fix_range_common: failed, killing current "
300		       "process: %d\n", task_tgid_vnr(current));
301		/* We are under mmap_sem, release it such that current can terminate */
302		up_write(&current->mm->mmap_sem);
303		force_sig(SIGKILL, current);
304		do_signal(&current->thread.regs);
305	}
306}
307
308static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
309{
310	struct mm_struct *mm;
311	pgd_t *pgd;
312	pud_t *pud;
313	pmd_t *pmd;
314	pte_t *pte;
315	unsigned long addr, last;
316	int updated = 0, err;
317
318	mm = &init_mm;
319	for (addr = start; addr < end;) {
320		pgd = pgd_offset(mm, addr);
321		if (!pgd_present(*pgd)) {
322			last = ADD_ROUND(addr, PGDIR_SIZE);
323			if (last > end)
324				last = end;
325			if (pgd_newpage(*pgd)) {
326				updated = 1;
327				err = os_unmap_memory((void *) addr,
328						      last - addr);
329				if (err < 0)
330					panic("munmap failed, errno = %d\n",
331					      -err);
332			}
333			addr = last;
334			continue;
335		}
336
337		pud = pud_offset(pgd, addr);
338		if (!pud_present(*pud)) {
339			last = ADD_ROUND(addr, PUD_SIZE);
340			if (last > end)
341				last = end;
342			if (pud_newpage(*pud)) {
343				updated = 1;
344				err = os_unmap_memory((void *) addr,
345						      last - addr);
346				if (err < 0)
347					panic("munmap failed, errno = %d\n",
348					      -err);
349			}
350			addr = last;
351			continue;
352		}
353
354		pmd = pmd_offset(pud, addr);
355		if (!pmd_present(*pmd)) {
356			last = ADD_ROUND(addr, PMD_SIZE);
357			if (last > end)
358				last = end;
359			if (pmd_newpage(*pmd)) {
360				updated = 1;
361				err = os_unmap_memory((void *) addr,
362						      last - addr);
363				if (err < 0)
364					panic("munmap failed, errno = %d\n",
365					      -err);
366			}
367			addr = last;
368			continue;
369		}
370
371		pte = pte_offset_kernel(pmd, addr);
372		if (!pte_present(*pte) || pte_newpage(*pte)) {
373			updated = 1;
374			err = os_unmap_memory((void *) addr,
375					      PAGE_SIZE);
376			if (err < 0)
377				panic("munmap failed, errno = %d\n",
378				      -err);
379			if (pte_present(*pte))
380				map_memory(addr,
381					   pte_val(*pte) & PAGE_MASK,
382					   PAGE_SIZE, 1, 1, 1);
383		}
384		else if (pte_newprot(*pte)) {
385			updated = 1;
386			os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
387		}
388		addr += PAGE_SIZE;
389	}
390	return updated;
391}
392
393void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
394{
395	pgd_t *pgd;
396	pud_t *pud;
397	pmd_t *pmd;
398	pte_t *pte;
399	struct mm_struct *mm = vma->vm_mm;
400	void *flush = NULL;
401	int r, w, x, prot, err = 0;
402	struct mm_id *mm_id;
403
404	address &= PAGE_MASK;
405	pgd = pgd_offset(mm, address);
406	if (!pgd_present(*pgd))
407		goto kill;
408
409	pud = pud_offset(pgd, address);
410	if (!pud_present(*pud))
411		goto kill;
412
413	pmd = pmd_offset(pud, address);
414	if (!pmd_present(*pmd))
415		goto kill;
416
417	pte = pte_offset_kernel(pmd, address);
418
419	r = pte_read(*pte);
420	w = pte_write(*pte);
421	x = pte_exec(*pte);
422	if (!pte_young(*pte)) {
423		r = 0;
424		w = 0;
425	} else if (!pte_dirty(*pte)) {
426		w = 0;
427	}
428
429	mm_id = &mm->context.id;
430	prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
431		(x ? UM_PROT_EXEC : 0));
432	if (pte_newpage(*pte)) {
433		if (pte_present(*pte)) {
434			unsigned long long offset;
435			int fd;
436
437			fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
438			err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
439				  1, &flush);
440		}
441		else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
442	}
443	else if (pte_newprot(*pte))
444		err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
445
446	if (err) {
447		if (err == -ENOMEM)
448			report_enomem();
449
450		goto kill;
451	}
452
453	*pte = pte_mkuptodate(*pte);
454
455	return;
456
457kill:
458	printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
459	force_sig(SIGKILL, current);
460}
461
462pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
463{
464	return pgd_offset(mm, address);
465}
466
467pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
468{
469	return pud_offset(pgd, address);
470}
471
472pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
473{
474	return pmd_offset(pud, address);
475}
476
477pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
478{
479	return pte_offset_kernel(pmd, address);
480}
481
482pte_t *addr_pte(struct task_struct *task, unsigned long addr)
483{
484	pgd_t *pgd = pgd_offset(task->mm, addr);
485	pud_t *pud = pud_offset(pgd, addr);
486	pmd_t *pmd = pmd_offset(pud, addr);
487
488	return pte_offset_map(pmd, addr);
489}
490
491void flush_tlb_all(void)
492{
493	flush_tlb_mm(current->mm);
494}
495
496void flush_tlb_kernel_range(unsigned long start, unsigned long end)
497{
498	flush_tlb_kernel_range_common(start, end);
499}
500
501void flush_tlb_kernel_vm(void)
502{
503	flush_tlb_kernel_range_common(start_vm, end_vm);
504}
505
506void __flush_tlb_one(unsigned long addr)
507{
508	flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
509}
510
511static void fix_range(struct mm_struct *mm, unsigned long start_addr,
512		      unsigned long end_addr, int force)
513{
514	fix_range_common(mm, start_addr, end_addr, force);
515}
516
517void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
518		     unsigned long end)
519{
520	if (vma->vm_mm == NULL)
521		flush_tlb_kernel_range_common(start, end);
522	else fix_range(vma->vm_mm, start, end, 0);
523}
524EXPORT_SYMBOL(flush_tlb_range);
525
526void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
527			unsigned long end)
528{
529	/*
530	 * Don't bother flushing if this address space is about to be
531	 * destroyed.
532	 */
533	if (atomic_read(&mm->mm_users) == 0)
534		return;
535
536	fix_range(mm, start, end, 0);
537}
538
539void flush_tlb_mm(struct mm_struct *mm)
540{
541	struct vm_area_struct *vma = mm->mmap;
542
543	while (vma != NULL) {
544		fix_range(mm, vma->vm_start, vma->vm_end, 0);
545		vma = vma->vm_next;
546	}
547}
548
549void force_flush_all(void)
550{
551	struct mm_struct *mm = current->mm;
552	struct vm_area_struct *vma = mm->mmap;
553
554	while (vma != NULL) {
555		fix_range(mm, vma->vm_start, vma->vm_end, 1);
556		vma = vma->vm_next;
557	}
558}