Loading...
1/*
2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4 * Copyright (C) 2002 Andi Kleen
5 *
6 * This handles calls from both 32bit and 64bit mode.
7 */
8
9#include <linux/errno.h>
10#include <linux/gfp.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <linux/uaccess.h>
18
19#include <asm/ldt.h>
20#include <asm/desc.h>
21#include <asm/mmu_context.h>
22#include <asm/syscalls.h>
23
24/* context.lock is held for us, so we don't need any locking. */
25static void flush_ldt(void *current_mm)
26{
27 mm_context_t *pc;
28
29 if (current->active_mm != current_mm)
30 return;
31
32 pc = ¤t->active_mm->context;
33 set_ldt(pc->ldt->entries, pc->ldt->size);
34}
35
36/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
37static struct ldt_struct *alloc_ldt_struct(unsigned int size)
38{
39 struct ldt_struct *new_ldt;
40 unsigned int alloc_size;
41
42 if (size > LDT_ENTRIES)
43 return NULL;
44
45 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
46 if (!new_ldt)
47 return NULL;
48
49 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
50 alloc_size = size * LDT_ENTRY_SIZE;
51
52 /*
53 * Xen is very picky: it requires a page-aligned LDT that has no
54 * trailing nonzero bytes in any page that contains LDT descriptors.
55 * Keep it simple: zero the whole allocation and never allocate less
56 * than PAGE_SIZE.
57 */
58 if (alloc_size > PAGE_SIZE)
59 new_ldt->entries = vzalloc(alloc_size);
60 else
61 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
62
63 if (!new_ldt->entries) {
64 kfree(new_ldt);
65 return NULL;
66 }
67
68 new_ldt->size = size;
69 return new_ldt;
70}
71
72/* After calling this, the LDT is immutable. */
73static void finalize_ldt_struct(struct ldt_struct *ldt)
74{
75 paravirt_alloc_ldt(ldt->entries, ldt->size);
76}
77
78/* context.lock is held */
79static void install_ldt(struct mm_struct *current_mm,
80 struct ldt_struct *ldt)
81{
82 /* Synchronizes with lockless_dereference in load_mm_ldt. */
83 smp_store_release(¤t_mm->context.ldt, ldt);
84
85 /* Activate the LDT for all CPUs using current_mm. */
86 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
87}
88
89static void free_ldt_struct(struct ldt_struct *ldt)
90{
91 if (likely(!ldt))
92 return;
93
94 paravirt_free_ldt(ldt->entries, ldt->size);
95 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
96 vfree_atomic(ldt->entries);
97 else
98 free_page((unsigned long)ldt->entries);
99 kfree(ldt);
100}
101
102/*
103 * we do not have to muck with descriptors here, that is
104 * done in switch_mm() as needed.
105 */
106int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
107{
108 struct ldt_struct *new_ldt;
109 struct mm_struct *old_mm;
110 int retval = 0;
111
112 mutex_init(&mm->context.lock);
113 old_mm = current->mm;
114 if (!old_mm) {
115 mm->context.ldt = NULL;
116 return 0;
117 }
118
119 mutex_lock(&old_mm->context.lock);
120 if (!old_mm->context.ldt) {
121 mm->context.ldt = NULL;
122 goto out_unlock;
123 }
124
125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
126 if (!new_ldt) {
127 retval = -ENOMEM;
128 goto out_unlock;
129 }
130
131 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
132 new_ldt->size * LDT_ENTRY_SIZE);
133 finalize_ldt_struct(new_ldt);
134
135 mm->context.ldt = new_ldt;
136
137out_unlock:
138 mutex_unlock(&old_mm->context.lock);
139 return retval;
140}
141
142/*
143 * No need to lock the MM as we are the last user
144 *
145 * 64bit: Don't touch the LDT register - we're already in the next thread.
146 */
147void destroy_context_ldt(struct mm_struct *mm)
148{
149 free_ldt_struct(mm->context.ldt);
150 mm->context.ldt = NULL;
151}
152
153static int read_ldt(void __user *ptr, unsigned long bytecount)
154{
155 int retval;
156 unsigned long size;
157 struct mm_struct *mm = current->mm;
158
159 mutex_lock(&mm->context.lock);
160
161 if (!mm->context.ldt) {
162 retval = 0;
163 goto out_unlock;
164 }
165
166 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
167 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
168
169 size = mm->context.ldt->size * LDT_ENTRY_SIZE;
170 if (size > bytecount)
171 size = bytecount;
172
173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
174 retval = -EFAULT;
175 goto out_unlock;
176 }
177
178 if (size != bytecount) {
179 /* Zero-fill the rest and pretend we read bytecount bytes. */
180 if (clear_user(ptr + size, bytecount - size)) {
181 retval = -EFAULT;
182 goto out_unlock;
183 }
184 }
185 retval = bytecount;
186
187out_unlock:
188 mutex_unlock(&mm->context.lock);
189 return retval;
190}
191
192static int read_default_ldt(void __user *ptr, unsigned long bytecount)
193{
194 /* CHECKME: Can we use _one_ random number ? */
195#ifdef CONFIG_X86_32
196 unsigned long size = 5 * sizeof(struct desc_struct);
197#else
198 unsigned long size = 128;
199#endif
200 if (bytecount > size)
201 bytecount = size;
202 if (clear_user(ptr, bytecount))
203 return -EFAULT;
204 return bytecount;
205}
206
207static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
208{
209 struct mm_struct *mm = current->mm;
210 struct ldt_struct *new_ldt, *old_ldt;
211 unsigned int oldsize, newsize;
212 struct user_desc ldt_info;
213 struct desc_struct ldt;
214 int error;
215
216 error = -EINVAL;
217 if (bytecount != sizeof(ldt_info))
218 goto out;
219 error = -EFAULT;
220 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
221 goto out;
222
223 error = -EINVAL;
224 if (ldt_info.entry_number >= LDT_ENTRIES)
225 goto out;
226 if (ldt_info.contents == 3) {
227 if (oldmode)
228 goto out;
229 if (ldt_info.seg_not_present == 0)
230 goto out;
231 }
232
233 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
234 LDT_empty(&ldt_info)) {
235 /* The user wants to clear the entry. */
236 memset(&ldt, 0, sizeof(ldt));
237 } else {
238 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
239 error = -EINVAL;
240 goto out;
241 }
242
243 fill_ldt(&ldt, &ldt_info);
244 if (oldmode)
245 ldt.avl = 0;
246 }
247
248 mutex_lock(&mm->context.lock);
249
250 old_ldt = mm->context.ldt;
251 oldsize = old_ldt ? old_ldt->size : 0;
252 newsize = max(ldt_info.entry_number + 1, oldsize);
253
254 error = -ENOMEM;
255 new_ldt = alloc_ldt_struct(newsize);
256 if (!new_ldt)
257 goto out_unlock;
258
259 if (old_ldt)
260 memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
261 new_ldt->entries[ldt_info.entry_number] = ldt;
262 finalize_ldt_struct(new_ldt);
263
264 install_ldt(mm, new_ldt);
265 free_ldt_struct(old_ldt);
266 error = 0;
267
268out_unlock:
269 mutex_unlock(&mm->context.lock);
270out:
271 return error;
272}
273
274asmlinkage int sys_modify_ldt(int func, void __user *ptr,
275 unsigned long bytecount)
276{
277 int ret = -ENOSYS;
278
279 switch (func) {
280 case 0:
281 ret = read_ldt(ptr, bytecount);
282 break;
283 case 1:
284 ret = write_ldt(ptr, bytecount, 1);
285 break;
286 case 2:
287 ret = read_default_ldt(ptr, bytecount);
288 break;
289 case 0x11:
290 ret = write_ldt(ptr, bytecount, 0);
291 break;
292 }
293 return ret;
294}
1/*
2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4 * Copyright (C) 2002 Andi Kleen
5 *
6 * This handles calls from both 32bit and 64bit mode.
7 */
8
9#include <linux/errno.h>
10#include <linux/gfp.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/vmalloc.h>
16#include <linux/uaccess.h>
17
18#include <asm/system.h>
19#include <asm/ldt.h>
20#include <asm/desc.h>
21#include <asm/mmu_context.h>
22#include <asm/syscalls.h>
23
24#ifdef CONFIG_SMP
25static void flush_ldt(void *current_mm)
26{
27 if (current->active_mm == current_mm)
28 load_LDT(¤t->active_mm->context);
29}
30#endif
31
32static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
33{
34 void *oldldt, *newldt;
35 int oldsize;
36
37 if (mincount <= pc->size)
38 return 0;
39 oldsize = pc->size;
40 mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
41 (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
42 if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
43 newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
44 else
45 newldt = (void *)__get_free_page(GFP_KERNEL);
46
47 if (!newldt)
48 return -ENOMEM;
49
50 if (oldsize)
51 memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
52 oldldt = pc->ldt;
53 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
54 (mincount - oldsize) * LDT_ENTRY_SIZE);
55
56 paravirt_alloc_ldt(newldt, mincount);
57
58#ifdef CONFIG_X86_64
59 /* CHECKME: Do we really need this ? */
60 wmb();
61#endif
62 pc->ldt = newldt;
63 wmb();
64 pc->size = mincount;
65 wmb();
66
67 if (reload) {
68#ifdef CONFIG_SMP
69 preempt_disable();
70 load_LDT(pc);
71 if (!cpumask_equal(mm_cpumask(current->mm),
72 cpumask_of(smp_processor_id())))
73 smp_call_function(flush_ldt, current->mm, 1);
74 preempt_enable();
75#else
76 load_LDT(pc);
77#endif
78 }
79 if (oldsize) {
80 paravirt_free_ldt(oldldt, oldsize);
81 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
82 vfree(oldldt);
83 else
84 put_page(virt_to_page(oldldt));
85 }
86 return 0;
87}
88
89static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
90{
91 int err = alloc_ldt(new, old->size, 0);
92 int i;
93
94 if (err < 0)
95 return err;
96
97 for (i = 0; i < old->size; i++)
98 write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
99 return 0;
100}
101
102/*
103 * we do not have to muck with descriptors here, that is
104 * done in switch_mm() as needed.
105 */
106int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
107{
108 struct mm_struct *old_mm;
109 int retval = 0;
110
111 mutex_init(&mm->context.lock);
112 mm->context.size = 0;
113 old_mm = current->mm;
114 if (old_mm && old_mm->context.size > 0) {
115 mutex_lock(&old_mm->context.lock);
116 retval = copy_ldt(&mm->context, &old_mm->context);
117 mutex_unlock(&old_mm->context.lock);
118 }
119 return retval;
120}
121
122/*
123 * No need to lock the MM as we are the last user
124 *
125 * 64bit: Don't touch the LDT register - we're already in the next thread.
126 */
127void destroy_context(struct mm_struct *mm)
128{
129 if (mm->context.size) {
130#ifdef CONFIG_X86_32
131 /* CHECKME: Can this ever happen ? */
132 if (mm == current->active_mm)
133 clear_LDT();
134#endif
135 paravirt_free_ldt(mm->context.ldt, mm->context.size);
136 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
137 vfree(mm->context.ldt);
138 else
139 put_page(virt_to_page(mm->context.ldt));
140 mm->context.size = 0;
141 }
142}
143
144static int read_ldt(void __user *ptr, unsigned long bytecount)
145{
146 int err;
147 unsigned long size;
148 struct mm_struct *mm = current->mm;
149
150 if (!mm->context.size)
151 return 0;
152 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
153 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
154
155 mutex_lock(&mm->context.lock);
156 size = mm->context.size * LDT_ENTRY_SIZE;
157 if (size > bytecount)
158 size = bytecount;
159
160 err = 0;
161 if (copy_to_user(ptr, mm->context.ldt, size))
162 err = -EFAULT;
163 mutex_unlock(&mm->context.lock);
164 if (err < 0)
165 goto error_return;
166 if (size != bytecount) {
167 /* zero-fill the rest */
168 if (clear_user(ptr + size, bytecount - size) != 0) {
169 err = -EFAULT;
170 goto error_return;
171 }
172 }
173 return bytecount;
174error_return:
175 return err;
176}
177
178static int read_default_ldt(void __user *ptr, unsigned long bytecount)
179{
180 /* CHECKME: Can we use _one_ random number ? */
181#ifdef CONFIG_X86_32
182 unsigned long size = 5 * sizeof(struct desc_struct);
183#else
184 unsigned long size = 128;
185#endif
186 if (bytecount > size)
187 bytecount = size;
188 if (clear_user(ptr, bytecount))
189 return -EFAULT;
190 return bytecount;
191}
192
193static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
194{
195 struct mm_struct *mm = current->mm;
196 struct desc_struct ldt;
197 int error;
198 struct user_desc ldt_info;
199
200 error = -EINVAL;
201 if (bytecount != sizeof(ldt_info))
202 goto out;
203 error = -EFAULT;
204 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
205 goto out;
206
207 error = -EINVAL;
208 if (ldt_info.entry_number >= LDT_ENTRIES)
209 goto out;
210 if (ldt_info.contents == 3) {
211 if (oldmode)
212 goto out;
213 if (ldt_info.seg_not_present == 0)
214 goto out;
215 }
216
217 mutex_lock(&mm->context.lock);
218 if (ldt_info.entry_number >= mm->context.size) {
219 error = alloc_ldt(¤t->mm->context,
220 ldt_info.entry_number + 1, 1);
221 if (error < 0)
222 goto out_unlock;
223 }
224
225 /* Allow LDTs to be cleared by the user. */
226 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
227 if (oldmode || LDT_empty(&ldt_info)) {
228 memset(&ldt, 0, sizeof(ldt));
229 goto install;
230 }
231 }
232
233 fill_ldt(&ldt, &ldt_info);
234 if (oldmode)
235 ldt.avl = 0;
236
237 /* Install the new entry ... */
238install:
239 write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
240 error = 0;
241
242out_unlock:
243 mutex_unlock(&mm->context.lock);
244out:
245 return error;
246}
247
248asmlinkage int sys_modify_ldt(int func, void __user *ptr,
249 unsigned long bytecount)
250{
251 int ret = -ENOSYS;
252
253 switch (func) {
254 case 0:
255 ret = read_ldt(ptr, bytecount);
256 break;
257 case 1:
258 ret = write_ldt(ptr, bytecount, 1);
259 break;
260 case 2:
261 ret = read_default_ldt(ptr, bytecount);
262 break;
263 case 0x11:
264 ret = write_ldt(ptr, bytecount, 0);
265 break;
266 }
267 return ret;
268}