Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H
8
9#include <linux/sched.h>
10#include <linux/mm_types.h>
11#include <linux/mmap_lock.h>
12
13#include <asm/mmu.h>
14
15extern void uml_setup_stubs(struct mm_struct *mm);
16/*
17 * Needed since we do not use the asm-generic/mm_hooks.h:
18 */
19static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
20{
21 uml_setup_stubs(mm);
22 return 0;
23}
24extern void arch_exit_mmap(struct mm_struct *mm);
25static inline void arch_unmap(struct mm_struct *mm,
26 unsigned long start, unsigned long end)
27{
28}
29static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
30 bool write, bool execute, bool foreign)
31{
32 /* by default, allow everything */
33 return true;
34}
35
36/*
37 * end asm-generic/mm_hooks.h functions
38 */
39
40#define deactivate_mm(tsk,mm) do { } while (0)
41
42extern void force_flush_all(void);
43
44static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
45{
46 /*
47 * This is called by fs/exec.c and sys_unshare()
48 * when the new ->mm is used for the first time.
49 */
50 __switch_mm(&new->context.id);
51 mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
52 uml_setup_stubs(new);
53 mmap_write_unlock(new);
54}
55
56static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
57 struct task_struct *tsk)
58{
59 unsigned cpu = smp_processor_id();
60
61 if(prev != next){
62 cpumask_clear_cpu(cpu, mm_cpumask(prev));
63 cpumask_set_cpu(cpu, mm_cpumask(next));
64 if(next != &init_mm)
65 __switch_mm(&next->context.id);
66 }
67}
68
69static inline void enter_lazy_tlb(struct mm_struct *mm,
70 struct task_struct *tsk)
71{
72}
73
74extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
75
76extern void destroy_context(struct mm_struct *mm);
77
78#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H
8
9#include <linux/sched.h>
10#include <linux/mm_types.h>
11
12#include <asm/mmu.h>
13
14extern void uml_setup_stubs(struct mm_struct *mm);
15/*
16 * Needed since we do not use the asm-generic/mm_hooks.h:
17 */
18static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
19{
20 uml_setup_stubs(mm);
21 return 0;
22}
23extern void arch_exit_mmap(struct mm_struct *mm);
24static inline void arch_unmap(struct mm_struct *mm,
25 unsigned long start, unsigned long end)
26{
27}
28static inline void arch_bprm_mm_init(struct mm_struct *mm,
29 struct vm_area_struct *vma)
30{
31}
32
33static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
34 bool write, bool execute, bool foreign)
35{
36 /* by default, allow everything */
37 return true;
38}
39
40/*
41 * end asm-generic/mm_hooks.h functions
42 */
43
44#define deactivate_mm(tsk,mm) do { } while (0)
45
46extern void force_flush_all(void);
47
48static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
49{
50 /*
51 * This is called by fs/exec.c and sys_unshare()
52 * when the new ->mm is used for the first time.
53 */
54 __switch_mm(&new->context.id);
55 down_write_nested(&new->mmap_sem, 1);
56 uml_setup_stubs(new);
57 up_write(&new->mmap_sem);
58}
59
60static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
61 struct task_struct *tsk)
62{
63 unsigned cpu = smp_processor_id();
64
65 if(prev != next){
66 cpumask_clear_cpu(cpu, mm_cpumask(prev));
67 cpumask_set_cpu(cpu, mm_cpumask(next));
68 if(next != &init_mm)
69 __switch_mm(&next->context.id);
70 }
71}
72
73static inline void enter_lazy_tlb(struct mm_struct *mm,
74 struct task_struct *tsk)
75{
76}
77
78extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
79
80extern void destroy_context(struct mm_struct *mm);
81
82#endif