Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Licensed under the GPL
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/module.h>
  8#include <linux/sched.h>
  9#include <asm/pgtable.h>
 10#include <asm/tlbflush.h>
 11#include "as-layout.h"
 12#include "mem_user.h"
 13#include "os.h"
 14#include "skas.h"
 15
 16struct host_vm_change {
 17	struct host_vm_op {
 18		enum { NONE, MMAP, MUNMAP, MPROTECT } type;
 19		union {
 20			struct {
 21				unsigned long addr;
 22				unsigned long len;
 23				unsigned int prot;
 24				int fd;
 25				__u64 offset;
 26			} mmap;
 27			struct {
 28				unsigned long addr;
 29				unsigned long len;
 30			} munmap;
 31			struct {
 32				unsigned long addr;
 33				unsigned long len;
 34				unsigned int prot;
 35			} mprotect;
 36		} u;
 37	} ops[1];
 38	int index;
 39	struct mm_id *id;
 40	void *data;
 41	int force;
 42};
 43
 44#define INIT_HVC(mm, force) \
 45	((struct host_vm_change) \
 46	 { .ops		= { { .type = NONE } },	\
 47	   .id		= &mm->context.id, \
 48       	   .data	= NULL, \
 49	   .index	= 0, \
 50	   .force	= force })
 51
 52static int do_ops(struct host_vm_change *hvc, int end,
 53		  int finished)
 54{
 55	struct host_vm_op *op;
 56	int i, ret = 0;
 57
 58	for (i = 0; i < end && !ret; i++) {
 59		op = &hvc->ops[i];
 60		switch (op->type) {
 61		case MMAP:
 62			ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
 63				  op->u.mmap.prot, op->u.mmap.fd,
 64				  op->u.mmap.offset, finished, &hvc->data);
 65			break;
 66		case MUNMAP:
 67			ret = unmap(hvc->id, op->u.munmap.addr,
 68				    op->u.munmap.len, finished, &hvc->data);
 69			break;
 70		case MPROTECT:
 71			ret = protect(hvc->id, op->u.mprotect.addr,
 72				      op->u.mprotect.len, op->u.mprotect.prot,
 73				      finished, &hvc->data);
 74			break;
 75		default:
 76			printk(KERN_ERR "Unknown op type %d in do_ops\n",
 77			       op->type);
 78			BUG();
 79			break;
 80		}
 81	}
 82
 83	return ret;
 84}
 85
 86static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
 87		    unsigned int prot, struct host_vm_change *hvc)
 88{
 89	__u64 offset;
 90	struct host_vm_op *last;
 91	int fd, ret = 0;
 92
 93	fd = phys_mapping(phys, &offset);
 94	if (hvc->index != 0) {
 95		last = &hvc->ops[hvc->index - 1];
 96		if ((last->type == MMAP) &&
 97		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
 98		   (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
 99		   (last->u.mmap.offset + last->u.mmap.len == offset)) {
100			last->u.mmap.len += len;
101			return 0;
102		}
103	}
104
105	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
106		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
107		hvc->index = 0;
108	}
109
110	hvc->ops[hvc->index++] = ((struct host_vm_op)
111				  { .type	= MMAP,
112				    .u = { .mmap = { .addr	= virt,
113						     .len	= len,
114						     .prot	= prot,
115						     .fd	= fd,
116						     .offset	= offset }
117			   } });
118	return ret;
119}
120
121static int add_munmap(unsigned long addr, unsigned long len,
122		      struct host_vm_change *hvc)
123{
124	struct host_vm_op *last;
125	int ret = 0;
126
127	if (hvc->index != 0) {
128		last = &hvc->ops[hvc->index - 1];
129		if ((last->type == MUNMAP) &&
130		   (last->u.munmap.addr + last->u.mmap.len == addr)) {
131			last->u.munmap.len += len;
132			return 0;
133		}
134	}
135
136	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
137		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
138		hvc->index = 0;
139	}
140
141	hvc->ops[hvc->index++] = ((struct host_vm_op)
142				  { .type	= MUNMAP,
143			     	    .u = { .munmap = { .addr	= addr,
144						       .len	= len } } });
145	return ret;
146}
147
148static int add_mprotect(unsigned long addr, unsigned long len,
149			unsigned int prot, struct host_vm_change *hvc)
150{
151	struct host_vm_op *last;
152	int ret = 0;
153
154	if (hvc->index != 0) {
155		last = &hvc->ops[hvc->index - 1];
156		if ((last->type == MPROTECT) &&
157		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
158		   (last->u.mprotect.prot == prot)) {
159			last->u.mprotect.len += len;
160			return 0;
161		}
162	}
163
164	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
165		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
166		hvc->index = 0;
167	}
168
169	hvc->ops[hvc->index++] = ((struct host_vm_op)
170				  { .type	= MPROTECT,
171			     	    .u = { .mprotect = { .addr	= addr,
172							 .len	= len,
173							 .prot	= prot } } });
174	return ret;
175}
176
177#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
178
179static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
180				   unsigned long end,
181				   struct host_vm_change *hvc)
182{
183	pte_t *pte;
184	int r, w, x, prot, ret = 0;
185
186	pte = pte_offset_kernel(pmd, addr);
187	do {
188		if ((addr >= STUB_START) && (addr < STUB_END))
189			continue;
190
191		r = pte_read(*pte);
192		w = pte_write(*pte);
193		x = pte_exec(*pte);
194		if (!pte_young(*pte)) {
195			r = 0;
196			w = 0;
197		} else if (!pte_dirty(*pte))
198			w = 0;
199
200		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
201			(x ? UM_PROT_EXEC : 0));
202		if (hvc->force || pte_newpage(*pte)) {
203			if (pte_present(*pte))
204				ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
205					       PAGE_SIZE, prot, hvc);
206			else
207				ret = add_munmap(addr, PAGE_SIZE, hvc);
208		} else if (pte_newprot(*pte))
209			ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
 
 
 
 
 
210		*pte = pte_mkuptodate(*pte);
211	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
212	return ret;
213}
214
215static inline int update_pmd_range(pud_t *pud, unsigned long addr,
216				   unsigned long end,
217				   struct host_vm_change *hvc)
218{
219	pmd_t *pmd;
220	unsigned long next;
221	int ret = 0;
222
223	pmd = pmd_offset(pud, addr);
224	do {
225		next = pmd_addr_end(addr, end);
226		if (!pmd_present(*pmd)) {
227			if (hvc->force || pmd_newpage(*pmd)) {
228				ret = add_munmap(addr, next - addr, hvc);
 
229				pmd_mkuptodate(*pmd);
230			}
231		}
232		else ret = update_pte_range(pmd, addr, next, hvc);
233	} while (pmd++, addr = next, ((addr < end) && !ret));
234	return ret;
235}
236
237static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
238				   unsigned long end,
239				   struct host_vm_change *hvc)
240{
241	pud_t *pud;
242	unsigned long next;
243	int ret = 0;
244
245	pud = pud_offset(pgd, addr);
246	do {
247		next = pud_addr_end(addr, end);
248		if (!pud_present(*pud)) {
249			if (hvc->force || pud_newpage(*pud)) {
250				ret = add_munmap(addr, next - addr, hvc);
 
251				pud_mkuptodate(*pud);
252			}
253		}
254		else ret = update_pmd_range(pud, addr, next, hvc);
255	} while (pud++, addr = next, ((addr < end) && !ret));
256	return ret;
257}
258
259void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
260		      unsigned long end_addr, int force)
 
261{
262	pgd_t *pgd;
263	struct host_vm_change hvc;
264	unsigned long addr = start_addr, next;
265	int ret = 0;
266
267	hvc = INIT_HVC(mm, force);
268	pgd = pgd_offset(mm, addr);
269	do {
270		next = pgd_addr_end(addr, end_addr);
271		if (!pgd_present(*pgd)) {
272			if (force || pgd_newpage(*pgd)) {
273				ret = add_munmap(addr, next - addr, &hvc);
274				pgd_mkuptodate(*pgd);
275			}
276		}
277		else ret = update_pud_range(pgd, addr, next, &hvc);
278	} while (pgd++, addr = next, ((addr < end_addr) && !ret));
279
280	if (!ret)
281		ret = do_ops(&hvc, hvc.index, 1);
282
283	/* This is not an else because ret is modified above */
284	if (ret) {
285		printk(KERN_ERR "fix_range_common: failed, killing current "
286		       "process\n");
287		force_sig(SIGKILL, current);
288	}
289}
290
291static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
292{
293	struct mm_struct *mm;
294	pgd_t *pgd;
295	pud_t *pud;
296	pmd_t *pmd;
297	pte_t *pte;
298	unsigned long addr, last;
299	int updated = 0, err;
300
301	mm = &init_mm;
302	for (addr = start; addr < end;) {
303		pgd = pgd_offset(mm, addr);
304		if (!pgd_present(*pgd)) {
305			last = ADD_ROUND(addr, PGDIR_SIZE);
306			if (last > end)
307				last = end;
308			if (pgd_newpage(*pgd)) {
309				updated = 1;
310				err = os_unmap_memory((void *) addr,
311						      last - addr);
312				if (err < 0)
313					panic("munmap failed, errno = %d\n",
314					      -err);
315			}
316			addr = last;
317			continue;
318		}
319
320		pud = pud_offset(pgd, addr);
321		if (!pud_present(*pud)) {
322			last = ADD_ROUND(addr, PUD_SIZE);
323			if (last > end)
324				last = end;
325			if (pud_newpage(*pud)) {
326				updated = 1;
327				err = os_unmap_memory((void *) addr,
328						      last - addr);
329				if (err < 0)
330					panic("munmap failed, errno = %d\n",
331					      -err);
332			}
333			addr = last;
334			continue;
335		}
336
337		pmd = pmd_offset(pud, addr);
338		if (!pmd_present(*pmd)) {
339			last = ADD_ROUND(addr, PMD_SIZE);
340			if (last > end)
341				last = end;
342			if (pmd_newpage(*pmd)) {
343				updated = 1;
344				err = os_unmap_memory((void *) addr,
345						      last - addr);
346				if (err < 0)
347					panic("munmap failed, errno = %d\n",
348					      -err);
349			}
350			addr = last;
351			continue;
352		}
353
354		pte = pte_offset_kernel(pmd, addr);
355		if (!pte_present(*pte) || pte_newpage(*pte)) {
356			updated = 1;
357			err = os_unmap_memory((void *) addr,
358					      PAGE_SIZE);
359			if (err < 0)
360				panic("munmap failed, errno = %d\n",
361				      -err);
362			if (pte_present(*pte))
363				map_memory(addr,
364					   pte_val(*pte) & PAGE_MASK,
365					   PAGE_SIZE, 1, 1, 1);
366		}
367		else if (pte_newprot(*pte)) {
368			updated = 1;
369			os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
370		}
371		addr += PAGE_SIZE;
372	}
373	return updated;
374}
375
376void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
377{
378	pgd_t *pgd;
379	pud_t *pud;
380	pmd_t *pmd;
381	pte_t *pte;
382	struct mm_struct *mm = vma->vm_mm;
383	void *flush = NULL;
384	int r, w, x, prot, err = 0;
385	struct mm_id *mm_id;
386
387	address &= PAGE_MASK;
388	pgd = pgd_offset(mm, address);
389	if (!pgd_present(*pgd))
390		goto kill;
391
392	pud = pud_offset(pgd, address);
393	if (!pud_present(*pud))
394		goto kill;
395
396	pmd = pmd_offset(pud, address);
397	if (!pmd_present(*pmd))
398		goto kill;
399
400	pte = pte_offset_kernel(pmd, address);
401
402	r = pte_read(*pte);
403	w = pte_write(*pte);
404	x = pte_exec(*pte);
405	if (!pte_young(*pte)) {
406		r = 0;
407		w = 0;
408	} else if (!pte_dirty(*pte)) {
409		w = 0;
410	}
411
412	mm_id = &mm->context.id;
413	prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
414		(x ? UM_PROT_EXEC : 0));
415	if (pte_newpage(*pte)) {
416		if (pte_present(*pte)) {
417			unsigned long long offset;
418			int fd;
419
420			fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
421			err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
422				  1, &flush);
423		}
424		else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
 
 
425	}
426	else if (pte_newprot(*pte))
427		err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
428
429	if (err)
430		goto kill;
431
432	*pte = pte_mkuptodate(*pte);
433
434	return;
435
436kill:
437	printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
438	force_sig(SIGKILL, current);
439}
440
441pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
442{
443	return pgd_offset(mm, address);
444}
445
446pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
447{
448	return pud_offset(pgd, address);
449}
450
451pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
452{
453	return pmd_offset(pud, address);
454}
455
456pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
457{
458	return pte_offset_kernel(pmd, address);
459}
460
461pte_t *addr_pte(struct task_struct *task, unsigned long addr)
462{
463	pgd_t *pgd = pgd_offset(task->mm, addr);
464	pud_t *pud = pud_offset(pgd, addr);
465	pmd_t *pmd = pmd_offset(pud, addr);
466
467	return pte_offset_map(pmd, addr);
468}
469
470void flush_tlb_all(void)
471{
472	flush_tlb_mm(current->mm);
473}
474
475void flush_tlb_kernel_range(unsigned long start, unsigned long end)
476{
477	flush_tlb_kernel_range_common(start, end);
478}
479
480void flush_tlb_kernel_vm(void)
481{
482	flush_tlb_kernel_range_common(start_vm, end_vm);
483}
484
485void __flush_tlb_one(unsigned long addr)
486{
487	flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
488}
489
490static void fix_range(struct mm_struct *mm, unsigned long start_addr,
491		      unsigned long end_addr, int force)
492{
493	fix_range_common(mm, start_addr, end_addr, force);
494}
495
496void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
497		     unsigned long end)
498{
499	if (vma->vm_mm == NULL)
500		flush_tlb_kernel_range_common(start, end);
501	else fix_range(vma->vm_mm, start, end, 0);
502}
503EXPORT_SYMBOL(flush_tlb_range);
504
505void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
506			unsigned long end)
507{
508	/*
509	 * Don't bother flushing if this address space is about to be
510	 * destroyed.
511	 */
512	if (atomic_read(&mm->mm_users) == 0)
513		return;
514
515	fix_range(mm, start, end, 0);
516}
517
518void flush_tlb_mm(struct mm_struct *mm)
519{
520	struct vm_area_struct *vma = mm->mmap;
521
522	while (vma != NULL) {
523		fix_range(mm, vma->vm_start, vma->vm_end, 0);
524		vma = vma->vm_next;
525	}
526}
527
528void force_flush_all(void)
529{
530	struct mm_struct *mm = current->mm;
531	struct vm_area_struct *vma = mm->mmap;
532
533	while (vma != NULL) {
534		fix_range(mm, vma->vm_start, vma->vm_end, 1);
535		vma = vma->vm_next;
536	}
537}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/module.h>
  8#include <linux/sched/signal.h>
  9
 10#include <asm/tlbflush.h>
 11#include <asm/mmu_context.h>
 12#include <as-layout.h>
 13#include <mem_user.h>
 14#include <os.h>
 15#include <skas.h>
 16#include <kern_util.h>
 17
 18struct vm_ops {
 19	struct mm_id *mm_idp;
 20
 21	int (*mmap)(struct mm_id *mm_idp,
 22		    unsigned long virt, unsigned long len, int prot,
 23		    int phys_fd, unsigned long long offset);
 24	int (*unmap)(struct mm_id *mm_idp,
 25		     unsigned long virt, unsigned long len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26};
 27
 28static int kern_map(struct mm_id *mm_idp,
 29		    unsigned long virt, unsigned long len, int prot,
 30		    int phys_fd, unsigned long long offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31{
 32	/* TODO: Why is executable needed to be always set in the kernel? */
 33	return os_map_memory((void *)virt, phys_fd, offset, len,
 34			     prot & UM_PROT_READ, prot & UM_PROT_WRITE,
 35			     1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36}
 37
 38static int kern_unmap(struct mm_id *mm_idp,
 39		      unsigned long virt, unsigned long len)
 40{
 41	return os_unmap_memory((void *)virt, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42}
 43
 44void report_enomem(void)
 
 45{
 46	printk(KERN_ERR "UML ran out of memory on the host side! "
 47			"This can happen due to a memory limitation or "
 48			"vm.max_map_count has been reached.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49}
 50
 
 
 51static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
 52				   unsigned long end,
 53				   struct vm_ops *ops)
 54{
 55	pte_t *pte;
 56	int ret = 0;
 57
 58	pte = pte_offset_kernel(pmd, addr);
 59	do {
 60		if (!pte_needsync(*pte))
 61			continue;
 62
 63		if (pte_present(*pte)) {
 64			__u64 offset;
 65			unsigned long phys = pte_val(*pte) & PAGE_MASK;
 66			int fd = phys_mapping(phys, &offset);
 67			int r, w, x, prot;
 68
 69			r = pte_read(*pte);
 70			w = pte_write(*pte);
 71			x = pte_exec(*pte);
 72			if (!pte_young(*pte)) {
 73				r = 0;
 74				w = 0;
 75			} else if (!pte_dirty(*pte))
 76				w = 0;
 77
 78			prot = (r ? UM_PROT_READ : 0) |
 79			       (w ? UM_PROT_WRITE : 0) |
 80			       (x ? UM_PROT_EXEC : 0);
 81
 82			ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
 83					prot, fd, offset);
 84		} else
 85			ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
 86
 87		*pte = pte_mkuptodate(*pte);
 88	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
 89	return ret;
 90}
 91
 92static inline int update_pmd_range(pud_t *pud, unsigned long addr,
 93				   unsigned long end,
 94				   struct vm_ops *ops)
 95{
 96	pmd_t *pmd;
 97	unsigned long next;
 98	int ret = 0;
 99
100	pmd = pmd_offset(pud, addr);
101	do {
102		next = pmd_addr_end(addr, end);
103		if (!pmd_present(*pmd)) {
104			if (pmd_needsync(*pmd)) {
105				ret = ops->unmap(ops->mm_idp, addr,
106						 next - addr);
107				pmd_mkuptodate(*pmd);
108			}
109		}
110		else ret = update_pte_range(pmd, addr, next, ops);
111	} while (pmd++, addr = next, ((addr < end) && !ret));
112	return ret;
113}
114
115static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
116				   unsigned long end,
117				   struct vm_ops *ops)
118{
119	pud_t *pud;
120	unsigned long next;
121	int ret = 0;
122
123	pud = pud_offset(p4d, addr);
124	do {
125		next = pud_addr_end(addr, end);
126		if (!pud_present(*pud)) {
127			if (pud_needsync(*pud)) {
128				ret = ops->unmap(ops->mm_idp, addr,
129						 next - addr);
130				pud_mkuptodate(*pud);
131			}
132		}
133		else ret = update_pmd_range(pud, addr, next, ops);
134	} while (pud++, addr = next, ((addr < end) && !ret));
135	return ret;
136}
137
138static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
139				   unsigned long end,
140				   struct vm_ops *ops)
141{
142	p4d_t *p4d;
143	unsigned long next;
 
144	int ret = 0;
145
146	p4d = p4d_offset(pgd, addr);
 
147	do {
148		next = p4d_addr_end(addr, end);
149		if (!p4d_present(*p4d)) {
150			if (p4d_needsync(*p4d)) {
151				ret = ops->unmap(ops->mm_idp, addr,
152						 next - addr);
153				p4d_mkuptodate(*p4d);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154			}
155		} else
156			ret = update_pud_range(p4d, addr, next, ops);
157	} while (p4d++, addr = next, ((addr < end) && !ret));
158	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159}
160
161int um_tlb_sync(struct mm_struct *mm)
162{
163	pgd_t *pgd;
164	struct vm_ops ops;
165	unsigned long addr = mm->context.sync_tlb_range_from, next;
166	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
168	if (mm->context.sync_tlb_range_to == 0)
169		return 0;
 
 
 
 
 
170
171	ops.mm_idp = &mm->context.id;
172	if (mm == &init_mm) {
173		ops.mmap = kern_map;
174		ops.unmap = kern_unmap;
175	} else {
176		ops.mmap = map;
177		ops.unmap = unmap;
178	}
 
 
179
180	pgd = pgd_offset(mm, addr);
181	do {
182		next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
183		if (!pgd_present(*pgd)) {
184			if (pgd_needsync(*pgd)) {
185				ret = ops.unmap(ops.mm_idp, addr,
186						next - addr);
187				pgd_mkuptodate(*pgd);
188			}
189		} else
190			ret = update_p4d_range(pgd, addr, next, &ops);
191	} while (pgd++, addr = next,
192		 ((addr < mm->context.sync_tlb_range_to) && !ret));
 
 
 
 
 
 
 
 
 
 
 
 
 
193
194	if (ret == -ENOMEM)
195		report_enomem();
 
 
196
197	mm->context.sync_tlb_range_from = 0;
198	mm->context.sync_tlb_range_to = 0;
 
 
 
199
200	return ret;
201}
202
203void flush_tlb_all(void)
204{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205	/*
206	 * Don't bother flushing if this address space is about to be
207	 * destroyed.
208	 */
209	if (atomic_read(&current->mm->mm_users) == 0)
210		return;
211
212	flush_tlb_mm(current->mm);
213}
214
215void flush_tlb_mm(struct mm_struct *mm)
216{
217	struct vm_area_struct *vma;
218	VMA_ITERATOR(vmi, mm, 0);
 
 
 
 
 
 
 
 
 
 
219
220	for_each_vma(vmi, vma)
221		um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);
 
 
222}