Loading...
1/*
2 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL
5 */
6
7#include <linux/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/slab.h>
10
11#include <asm/pgalloc.h>
12#include <asm/pgtable.h>
13#include <asm/sections.h>
14#include <as-layout.h>
15#include <os.h>
16#include <skas.h>
17
18static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
19 unsigned long kernel)
20{
21 pgd_t *pgd;
22 pud_t *pud;
23 pmd_t *pmd;
24 pte_t *pte;
25
26 pgd = pgd_offset(mm, proc);
27 pud = pud_alloc(mm, pgd, proc);
28 if (!pud)
29 goto out;
30
31 pmd = pmd_alloc(mm, pud, proc);
32 if (!pmd)
33 goto out_pmd;
34
35 pte = pte_alloc_map(mm, pmd, proc);
36 if (!pte)
37 goto out_pte;
38
39 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
40 *pte = pte_mkread(*pte);
41 return 0;
42
43 out_pte:
44 pmd_free(mm, pmd);
45 out_pmd:
46 pud_free(mm, pud);
47 out:
48 return -ENOMEM;
49}
50
51int init_new_context(struct task_struct *task, struct mm_struct *mm)
52{
53 struct mm_context *from_mm = NULL;
54 struct mm_context *to_mm = &mm->context;
55 unsigned long stack = 0;
56 int ret = -ENOMEM;
57
58 stack = get_zeroed_page(GFP_KERNEL);
59 if (stack == 0)
60 goto out;
61
62 to_mm->id.stack = stack;
63 if (current->mm != NULL && current->mm != &init_mm)
64 from_mm = ¤t->mm->context;
65
66 block_signals();
67 if (from_mm)
68 to_mm->id.u.pid = copy_context_skas0(stack,
69 from_mm->id.u.pid);
70 else to_mm->id.u.pid = start_userspace(stack);
71 unblock_signals();
72
73 if (to_mm->id.u.pid < 0) {
74 ret = to_mm->id.u.pid;
75 goto out_free;
76 }
77
78 ret = init_new_ldt(to_mm, from_mm);
79 if (ret < 0) {
80 printk(KERN_ERR "init_new_context_skas - init_ldt"
81 " failed, errno = %d\n", ret);
82 goto out_free;
83 }
84
85 return 0;
86
87 out_free:
88 if (to_mm->id.stack != 0)
89 free_page(to_mm->id.stack);
90 out:
91 return ret;
92}
93
94void uml_setup_stubs(struct mm_struct *mm)
95{
96 int err, ret;
97
98 ret = init_stub_pte(mm, STUB_CODE,
99 (unsigned long) __syscall_stub_start);
100 if (ret)
101 goto out;
102
103 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
104 if (ret)
105 goto out;
106
107 mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
108 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
109
110 /* dup_mmap already holds mmap_sem */
111 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
112 VM_READ | VM_MAYREAD | VM_EXEC |
113 VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
114 mm->context.stub_pages);
115 if (err) {
116 printk(KERN_ERR "install_special_mapping returned %d\n", err);
117 goto out;
118 }
119 return;
120
121out:
122 force_sigsegv(SIGSEGV, current);
123}
124
125void arch_exit_mmap(struct mm_struct *mm)
126{
127 pte_t *pte;
128
129 pte = virt_to_pte(mm, STUB_CODE);
130 if (pte != NULL)
131 pte_clear(mm, STUB_CODE, pte);
132
133 pte = virt_to_pte(mm, STUB_DATA);
134 if (pte == NULL)
135 return;
136
137 pte_clear(mm, STUB_DATA, pte);
138}
139
140void destroy_context(struct mm_struct *mm)
141{
142 struct mm_context *mmu = &mm->context;
143
144 /*
145 * If init_new_context wasn't called, this will be
146 * zero, resulting in a kill(0), which will result in the
147 * whole UML suddenly dying. Also, cover negative and
148 * 1 cases, since they shouldn't happen either.
149 */
150 if (mmu->id.u.pid < 2) {
151 printk(KERN_ERR "corrupt mm_context - pid = %d\n",
152 mmu->id.u.pid);
153 return;
154 }
155 os_kill_ptraced_process(mmu->id.u.pid, 1);
156
157 free_page(mmu->id.stack);
158 free_ldt(mmu);
159}
1/*
2 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL
5 */
6
7#include <linux/mm.h>
8#include <linux/sched.h>
9#include <linux/slab.h>
10#include <asm/pgalloc.h>
11#include <asm/pgtable.h>
12#include <asm/sections.h>
13#include <as-layout.h>
14#include <os.h>
15#include <skas.h>
16
17static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
18 unsigned long kernel)
19{
20 pgd_t *pgd;
21 pud_t *pud;
22 pmd_t *pmd;
23 pte_t *pte;
24
25 pgd = pgd_offset(mm, proc);
26 pud = pud_alloc(mm, pgd, proc);
27 if (!pud)
28 goto out;
29
30 pmd = pmd_alloc(mm, pud, proc);
31 if (!pmd)
32 goto out_pmd;
33
34 pte = pte_alloc_map(mm, pmd, proc);
35 if (!pte)
36 goto out_pte;
37
38 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
39 *pte = pte_mkread(*pte);
40 return 0;
41
42 out_pte:
43 pmd_free(mm, pmd);
44 out_pmd:
45 pud_free(mm, pud);
46 out:
47 return -ENOMEM;
48}
49
50int init_new_context(struct task_struct *task, struct mm_struct *mm)
51{
52 struct mm_context *from_mm = NULL;
53 struct mm_context *to_mm = &mm->context;
54 unsigned long stack = 0;
55 int ret = -ENOMEM;
56
57 stack = get_zeroed_page(GFP_KERNEL);
58 if (stack == 0)
59 goto out;
60
61 to_mm->id.stack = stack;
62 if (current->mm != NULL && current->mm != &init_mm)
63 from_mm = ¤t->mm->context;
64
65 block_signals();
66 if (from_mm)
67 to_mm->id.u.pid = copy_context_skas0(stack,
68 from_mm->id.u.pid);
69 else to_mm->id.u.pid = start_userspace(stack);
70 unblock_signals();
71
72 if (to_mm->id.u.pid < 0) {
73 ret = to_mm->id.u.pid;
74 goto out_free;
75 }
76
77 ret = init_new_ldt(to_mm, from_mm);
78 if (ret < 0) {
79 printk(KERN_ERR "init_new_context_skas - init_ldt"
80 " failed, errno = %d\n", ret);
81 goto out_free;
82 }
83
84 return 0;
85
86 out_free:
87 if (to_mm->id.stack != 0)
88 free_page(to_mm->id.stack);
89 out:
90 return ret;
91}
92
93void uml_setup_stubs(struct mm_struct *mm)
94{
95 int err, ret;
96
97 ret = init_stub_pte(mm, STUB_CODE,
98 (unsigned long) __syscall_stub_start);
99 if (ret)
100 goto out;
101
102 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
103 if (ret)
104 goto out;
105
106 mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
107 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
108
109 /* dup_mmap already holds mmap_sem */
110 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
111 VM_READ | VM_MAYREAD | VM_EXEC |
112 VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
113 mm->context.stub_pages);
114 if (err) {
115 printk(KERN_ERR "install_special_mapping returned %d\n", err);
116 goto out;
117 }
118 return;
119
120out:
121 force_sigsegv(SIGSEGV, current);
122}
123
124void arch_exit_mmap(struct mm_struct *mm)
125{
126 pte_t *pte;
127
128 pte = virt_to_pte(mm, STUB_CODE);
129 if (pte != NULL)
130 pte_clear(mm, STUB_CODE, pte);
131
132 pte = virt_to_pte(mm, STUB_DATA);
133 if (pte == NULL)
134 return;
135
136 pte_clear(mm, STUB_DATA, pte);
137}
138
139void destroy_context(struct mm_struct *mm)
140{
141 struct mm_context *mmu = &mm->context;
142
143 /*
144 * If init_new_context wasn't called, this will be
145 * zero, resulting in a kill(0), which will result in the
146 * whole UML suddenly dying. Also, cover negative and
147 * 1 cases, since they shouldn't happen either.
148 */
149 if (mmu->id.u.pid < 2) {
150 printk(KERN_ERR "corrupt mm_context - pid = %d\n",
151 mmu->id.u.pid);
152 return;
153 }
154 os_kill_ptraced_process(mmu->id.u.pid, 1);
155
156 free_page(mmu->id.stack);
157 free_ldt(mmu);
158}