Loading...
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H
8
9#include "linux/sched.h"
10#include "um_mmu.h"
11
12extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
13extern void arch_exit_mmap(struct mm_struct *mm);
14
15#define get_mmu_context(task) do ; while(0)
16#define activate_context(tsk) do ; while(0)
17
18#define deactivate_mm(tsk,mm) do { } while (0)
19
20extern void force_flush_all(void);
21
22static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
23{
24 /*
25 * This is called by fs/exec.c and sys_unshare()
26 * when the new ->mm is used for the first time.
27 */
28 __switch_mm(&new->context.id);
29 arch_dup_mmap(old, new);
30}
31
32static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
33 struct task_struct *tsk)
34{
35 unsigned cpu = smp_processor_id();
36
37 if(prev != next){
38 cpumask_clear_cpu(cpu, mm_cpumask(prev));
39 cpumask_set_cpu(cpu, mm_cpumask(next));
40 if(next != &init_mm)
41 __switch_mm(&next->context.id);
42 }
43}
44
45static inline void enter_lazy_tlb(struct mm_struct *mm,
46 struct task_struct *tsk)
47{
48}
49
50extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
51
52extern void destroy_context(struct mm_struct *mm);
53
54#endif
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H
8
9#include <linux/sched.h>
10#include <asm/mmu.h>
11
12extern void uml_setup_stubs(struct mm_struct *mm);
13/*
14 * Needed since we do not use the asm-generic/mm_hooks.h:
15 */
16static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
17{
18 uml_setup_stubs(mm);
19}
20extern void arch_exit_mmap(struct mm_struct *mm);
21static inline void arch_unmap(struct mm_struct *mm,
22 struct vm_area_struct *vma,
23 unsigned long start, unsigned long end)
24{
25}
26static inline void arch_bprm_mm_init(struct mm_struct *mm,
27 struct vm_area_struct *vma)
28{
29}
30
31static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
32 bool write, bool execute, bool foreign)
33{
34 /* by default, allow everything */
35 return true;
36}
37
38static inline bool arch_pte_access_permitted(pte_t pte, bool write)
39{
40 /* by default, allow everything */
41 return true;
42}
43
44/*
45 * end asm-generic/mm_hooks.h functions
46 */
47
48#define deactivate_mm(tsk,mm) do { } while (0)
49
50extern void force_flush_all(void);
51
52static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
53{
54 /*
55 * This is called by fs/exec.c and sys_unshare()
56 * when the new ->mm is used for the first time.
57 */
58 __switch_mm(&new->context.id);
59 down_write(&new->mmap_sem);
60 uml_setup_stubs(new);
61 up_write(&new->mmap_sem);
62}
63
64static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
65 struct task_struct *tsk)
66{
67 unsigned cpu = smp_processor_id();
68
69 if(prev != next){
70 cpumask_clear_cpu(cpu, mm_cpumask(prev));
71 cpumask_set_cpu(cpu, mm_cpumask(next));
72 if(next != &init_mm)
73 __switch_mm(&next->context.id);
74 }
75}
76
77static inline void enter_lazy_tlb(struct mm_struct *mm,
78 struct task_struct *tsk)
79{
80}
81
82extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
83
84extern void destroy_context(struct mm_struct *mm);
85
86#endif