Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2002 Andi Kleen
6 *
7 * This handles calls from both 32bit and 64bit mode.
8 *
9 * Lock order:
10 * contex.ldt_usr_sem
11 * mmap_sem
12 * context.lock
13 */
14
15#include <linux/errno.h>
16#include <linux/gfp.h>
17#include <linux/sched.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/syscalls.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <linux/uaccess.h>
25
26#include <asm/ldt.h>
27#include <asm/tlb.h>
28#include <asm/desc.h>
29#include <asm/mmu_context.h>
30#include <asm/syscalls.h>
31
32static void refresh_ldt_segments(void)
33{
34#ifdef CONFIG_X86_64
35 unsigned short sel;
36
37 /*
38 * Make sure that the cached DS and ES descriptors match the updated
39 * LDT.
40 */
41 savesegment(ds, sel);
42 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
43 loadsegment(ds, sel);
44
45 savesegment(es, sel);
46 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
47 loadsegment(es, sel);
48#endif
49}
50
51/* context.lock is held by the task which issued the smp function call */
52static void flush_ldt(void *__mm)
53{
54 struct mm_struct *mm = __mm;
55
56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
57 return;
58
59 load_mm_ldt(mm);
60
61 refresh_ldt_segments();
62}
63
64/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
65static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
66{
67 struct ldt_struct *new_ldt;
68 unsigned int alloc_size;
69
70 if (num_entries > LDT_ENTRIES)
71 return NULL;
72
73 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
74 if (!new_ldt)
75 return NULL;
76
77 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
78 alloc_size = num_entries * LDT_ENTRY_SIZE;
79
80 /*
81 * Xen is very picky: it requires a page-aligned LDT that has no
82 * trailing nonzero bytes in any page that contains LDT descriptors.
83 * Keep it simple: zero the whole allocation and never allocate less
84 * than PAGE_SIZE.
85 */
86 if (alloc_size > PAGE_SIZE)
87 new_ldt->entries = vzalloc(alloc_size);
88 else
89 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
90
91 if (!new_ldt->entries) {
92 kfree(new_ldt);
93 return NULL;
94 }
95
96 /* The new LDT isn't aliased for PTI yet. */
97 new_ldt->slot = -1;
98
99 new_ldt->nr_entries = num_entries;
100 return new_ldt;
101}
102
103/*
104 * If PTI is enabled, this maps the LDT into the kernelmode and
105 * usermode tables for the given mm.
106 *
107 * There is no corresponding unmap function. Even if the LDT is freed, we
108 * leave the PTEs around until the slot is reused or the mm is destroyed.
109 * This is harmless: the LDT is always in ordinary memory, and no one will
110 * access the freed slot.
111 *
112 * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
113 * it useful, and the flush would slow down modify_ldt().
114 */
115static int
116map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
117{
118#ifdef CONFIG_PAGE_TABLE_ISOLATION
119 bool is_vmalloc, had_top_level_entry;
120 unsigned long va;
121 spinlock_t *ptl;
122 pgd_t *pgd;
123 int i;
124
125 if (!static_cpu_has(X86_FEATURE_PTI))
126 return 0;
127
128 /*
129 * Any given ldt_struct should have map_ldt_struct() called at most
130 * once.
131 */
132 WARN_ON(ldt->slot != -1);
133
134 /*
135 * Did we already have the top level entry allocated? We can't
136 * use pgd_none() for this because it doens't do anything on
137 * 4-level page table kernels.
138 */
139 pgd = pgd_offset(mm, LDT_BASE_ADDR);
140 had_top_level_entry = (pgd->pgd != 0);
141
142 is_vmalloc = is_vmalloc_addr(ldt->entries);
143
144 for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
145 unsigned long offset = i << PAGE_SHIFT;
146 const void *src = (char *)ldt->entries + offset;
147 unsigned long pfn;
148 pgprot_t pte_prot;
149 pte_t pte, *ptep;
150
151 va = (unsigned long)ldt_slot_va(slot) + offset;
152 pfn = is_vmalloc ? vmalloc_to_pfn(src) :
153 page_to_pfn(virt_to_page(src));
154 /*
155 * Treat the PTI LDT range as a *userspace* range.
156 * get_locked_pte() will allocate all needed pagetables
157 * and account for them in this mm.
158 */
159 ptep = get_locked_pte(mm, va, &ptl);
160 if (!ptep)
161 return -ENOMEM;
162 /*
163 * Map it RO so the easy to find address is not a primary
164 * target via some kernel interface which misses a
165 * permission check.
166 */
167 pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
168 /* Filter out unsuppored __PAGE_KERNEL* bits: */
169 pgprot_val(pte_prot) &= __supported_pte_mask;
170 pte = pfn_pte(pfn, pte_prot);
171 set_pte_at(mm, va, ptep, pte);
172 pte_unmap_unlock(ptep, ptl);
173 }
174
175 if (mm->context.ldt) {
176 /*
177 * We already had an LDT. The top-level entry should already
178 * have been allocated and synchronized with the usermode
179 * tables.
180 */
181 WARN_ON(!had_top_level_entry);
182 if (static_cpu_has(X86_FEATURE_PTI))
183 WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
184 } else {
185 /*
186 * This is the first time we're mapping an LDT for this process.
187 * Sync the pgd to the usermode tables.
188 */
189 WARN_ON(had_top_level_entry);
190 if (static_cpu_has(X86_FEATURE_PTI)) {
191 WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
192 set_pgd(kernel_to_user_pgdp(pgd), *pgd);
193 }
194 }
195
196 va = (unsigned long)ldt_slot_va(slot);
197 flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
198
199 ldt->slot = slot;
200#endif
201 return 0;
202}
203
204static void free_ldt_pgtables(struct mm_struct *mm)
205{
206#ifdef CONFIG_PAGE_TABLE_ISOLATION
207 struct mmu_gather tlb;
208 unsigned long start = LDT_BASE_ADDR;
209 unsigned long end = start + (1UL << PGDIR_SHIFT);
210
211 if (!static_cpu_has(X86_FEATURE_PTI))
212 return;
213
214 tlb_gather_mmu(&tlb, mm, start, end);
215 free_pgd_range(&tlb, start, end, start, end);
216 tlb_finish_mmu(&tlb, start, end);
217#endif
218}
219
220/* After calling this, the LDT is immutable. */
221static void finalize_ldt_struct(struct ldt_struct *ldt)
222{
223 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
224}
225
226static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
227{
228 mutex_lock(&mm->context.lock);
229
230 /* Synchronizes with READ_ONCE in load_mm_ldt. */
231 smp_store_release(&mm->context.ldt, ldt);
232
233 /* Activate the LDT for all CPUs using currents mm. */
234 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
235
236 mutex_unlock(&mm->context.lock);
237}
238
239static void free_ldt_struct(struct ldt_struct *ldt)
240{
241 if (likely(!ldt))
242 return;
243
244 paravirt_free_ldt(ldt->entries, ldt->nr_entries);
245 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
246 vfree_atomic(ldt->entries);
247 else
248 free_page((unsigned long)ldt->entries);
249 kfree(ldt);
250}
251
252/*
253 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
254 * the new task is not running, so nothing can be installed.
255 */
256int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
257{
258 struct ldt_struct *new_ldt;
259 int retval = 0;
260
261 if (!old_mm)
262 return 0;
263
264 mutex_lock(&old_mm->context.lock);
265 if (!old_mm->context.ldt)
266 goto out_unlock;
267
268 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
269 if (!new_ldt) {
270 retval = -ENOMEM;
271 goto out_unlock;
272 }
273
274 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
275 new_ldt->nr_entries * LDT_ENTRY_SIZE);
276 finalize_ldt_struct(new_ldt);
277
278 retval = map_ldt_struct(mm, new_ldt, 0);
279 if (retval) {
280 free_ldt_pgtables(mm);
281 free_ldt_struct(new_ldt);
282 goto out_unlock;
283 }
284 mm->context.ldt = new_ldt;
285
286out_unlock:
287 mutex_unlock(&old_mm->context.lock);
288 return retval;
289}
290
291/*
292 * No need to lock the MM as we are the last user
293 *
294 * 64bit: Don't touch the LDT register - we're already in the next thread.
295 */
296void destroy_context_ldt(struct mm_struct *mm)
297{
298 free_ldt_struct(mm->context.ldt);
299 mm->context.ldt = NULL;
300}
301
302void ldt_arch_exit_mmap(struct mm_struct *mm)
303{
304 free_ldt_pgtables(mm);
305}
306
307static int read_ldt(void __user *ptr, unsigned long bytecount)
308{
309 struct mm_struct *mm = current->mm;
310 unsigned long entries_size;
311 int retval;
312
313 down_read(&mm->context.ldt_usr_sem);
314
315 if (!mm->context.ldt) {
316 retval = 0;
317 goto out_unlock;
318 }
319
320 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
321 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
322
323 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
324 if (entries_size > bytecount)
325 entries_size = bytecount;
326
327 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
328 retval = -EFAULT;
329 goto out_unlock;
330 }
331
332 if (entries_size != bytecount) {
333 /* Zero-fill the rest and pretend we read bytecount bytes. */
334 if (clear_user(ptr + entries_size, bytecount - entries_size)) {
335 retval = -EFAULT;
336 goto out_unlock;
337 }
338 }
339 retval = bytecount;
340
341out_unlock:
342 up_read(&mm->context.ldt_usr_sem);
343 return retval;
344}
345
346static int read_default_ldt(void __user *ptr, unsigned long bytecount)
347{
348 /* CHECKME: Can we use _one_ random number ? */
349#ifdef CONFIG_X86_32
350 unsigned long size = 5 * sizeof(struct desc_struct);
351#else
352 unsigned long size = 128;
353#endif
354 if (bytecount > size)
355 bytecount = size;
356 if (clear_user(ptr, bytecount))
357 return -EFAULT;
358 return bytecount;
359}
360
361static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
362{
363 struct mm_struct *mm = current->mm;
364 struct ldt_struct *new_ldt, *old_ldt;
365 unsigned int old_nr_entries, new_nr_entries;
366 struct user_desc ldt_info;
367 struct desc_struct ldt;
368 int error;
369
370 error = -EINVAL;
371 if (bytecount != sizeof(ldt_info))
372 goto out;
373 error = -EFAULT;
374 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
375 goto out;
376
377 error = -EINVAL;
378 if (ldt_info.entry_number >= LDT_ENTRIES)
379 goto out;
380 if (ldt_info.contents == 3) {
381 if (oldmode)
382 goto out;
383 if (ldt_info.seg_not_present == 0)
384 goto out;
385 }
386
387 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
388 LDT_empty(&ldt_info)) {
389 /* The user wants to clear the entry. */
390 memset(&ldt, 0, sizeof(ldt));
391 } else {
392 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
393 error = -EINVAL;
394 goto out;
395 }
396
397 fill_ldt(&ldt, &ldt_info);
398 if (oldmode)
399 ldt.avl = 0;
400 }
401
402 if (down_write_killable(&mm->context.ldt_usr_sem))
403 return -EINTR;
404
405 old_ldt = mm->context.ldt;
406 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
407 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
408
409 error = -ENOMEM;
410 new_ldt = alloc_ldt_struct(new_nr_entries);
411 if (!new_ldt)
412 goto out_unlock;
413
414 if (old_ldt)
415 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
416
417 new_ldt->entries[ldt_info.entry_number] = ldt;
418 finalize_ldt_struct(new_ldt);
419
420 /*
421 * If we are using PTI, map the new LDT into the userspace pagetables.
422 * If there is already an LDT, use the other slot so that other CPUs
423 * will continue to use the old LDT until install_ldt() switches
424 * them over to the new LDT.
425 */
426 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
427 if (error) {
428 /*
429 * This only can fail for the first LDT setup. If an LDT is
430 * already installed then the PTE page is already
431 * populated. Mop up a half populated page table.
432 */
433 if (!WARN_ON_ONCE(old_ldt))
434 free_ldt_pgtables(mm);
435 free_ldt_struct(new_ldt);
436 goto out_unlock;
437 }
438
439 install_ldt(mm, new_ldt);
440 free_ldt_struct(old_ldt);
441 error = 0;
442
443out_unlock:
444 up_write(&mm->context.ldt_usr_sem);
445out:
446 return error;
447}
448
449SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
450 unsigned long , bytecount)
451{
452 int ret = -ENOSYS;
453
454 switch (func) {
455 case 0:
456 ret = read_ldt(ptr, bytecount);
457 break;
458 case 1:
459 ret = write_ldt(ptr, bytecount, 1);
460 break;
461 case 2:
462 ret = read_default_ldt(ptr, bytecount);
463 break;
464 case 0x11:
465 ret = write_ldt(ptr, bytecount, 0);
466 break;
467 }
468 /*
469 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
470 * return type, but tht ABI for sys_modify_ldt() expects
471 * 'int'. This cast gives us an int-sized value in %rax
472 * for the return code. The 'unsigned' is necessary so
473 * the compiler does not try to sign-extend the negative
474 * return codes into the high half of the register when
475 * taking the value from int->long.
476 */
477 return (unsigned int)ret;
478}
1/*
2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4 * Copyright (C) 2002 Andi Kleen
5 *
6 * This handles calls from both 32bit and 64bit mode.
7 */
8
9#include <linux/errno.h>
10#include <linux/gfp.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/vmalloc.h>
16#include <linux/uaccess.h>
17
18#include <asm/system.h>
19#include <asm/ldt.h>
20#include <asm/desc.h>
21#include <asm/mmu_context.h>
22#include <asm/syscalls.h>
23
24#ifdef CONFIG_SMP
25static void flush_ldt(void *current_mm)
26{
27 if (current->active_mm == current_mm)
28 load_LDT(¤t->active_mm->context);
29}
30#endif
31
32static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
33{
34 void *oldldt, *newldt;
35 int oldsize;
36
37 if (mincount <= pc->size)
38 return 0;
39 oldsize = pc->size;
40 mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
41 (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
42 if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
43 newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
44 else
45 newldt = (void *)__get_free_page(GFP_KERNEL);
46
47 if (!newldt)
48 return -ENOMEM;
49
50 if (oldsize)
51 memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
52 oldldt = pc->ldt;
53 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
54 (mincount - oldsize) * LDT_ENTRY_SIZE);
55
56 paravirt_alloc_ldt(newldt, mincount);
57
58#ifdef CONFIG_X86_64
59 /* CHECKME: Do we really need this ? */
60 wmb();
61#endif
62 pc->ldt = newldt;
63 wmb();
64 pc->size = mincount;
65 wmb();
66
67 if (reload) {
68#ifdef CONFIG_SMP
69 preempt_disable();
70 load_LDT(pc);
71 if (!cpumask_equal(mm_cpumask(current->mm),
72 cpumask_of(smp_processor_id())))
73 smp_call_function(flush_ldt, current->mm, 1);
74 preempt_enable();
75#else
76 load_LDT(pc);
77#endif
78 }
79 if (oldsize) {
80 paravirt_free_ldt(oldldt, oldsize);
81 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
82 vfree(oldldt);
83 else
84 put_page(virt_to_page(oldldt));
85 }
86 return 0;
87}
88
89static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
90{
91 int err = alloc_ldt(new, old->size, 0);
92 int i;
93
94 if (err < 0)
95 return err;
96
97 for (i = 0; i < old->size; i++)
98 write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
99 return 0;
100}
101
102/*
103 * we do not have to muck with descriptors here, that is
104 * done in switch_mm() as needed.
105 */
106int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
107{
108 struct mm_struct *old_mm;
109 int retval = 0;
110
111 mutex_init(&mm->context.lock);
112 mm->context.size = 0;
113 old_mm = current->mm;
114 if (old_mm && old_mm->context.size > 0) {
115 mutex_lock(&old_mm->context.lock);
116 retval = copy_ldt(&mm->context, &old_mm->context);
117 mutex_unlock(&old_mm->context.lock);
118 }
119 return retval;
120}
121
122/*
123 * No need to lock the MM as we are the last user
124 *
125 * 64bit: Don't touch the LDT register - we're already in the next thread.
126 */
127void destroy_context(struct mm_struct *mm)
128{
129 if (mm->context.size) {
130#ifdef CONFIG_X86_32
131 /* CHECKME: Can this ever happen ? */
132 if (mm == current->active_mm)
133 clear_LDT();
134#endif
135 paravirt_free_ldt(mm->context.ldt, mm->context.size);
136 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
137 vfree(mm->context.ldt);
138 else
139 put_page(virt_to_page(mm->context.ldt));
140 mm->context.size = 0;
141 }
142}
143
144static int read_ldt(void __user *ptr, unsigned long bytecount)
145{
146 int err;
147 unsigned long size;
148 struct mm_struct *mm = current->mm;
149
150 if (!mm->context.size)
151 return 0;
152 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
153 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
154
155 mutex_lock(&mm->context.lock);
156 size = mm->context.size * LDT_ENTRY_SIZE;
157 if (size > bytecount)
158 size = bytecount;
159
160 err = 0;
161 if (copy_to_user(ptr, mm->context.ldt, size))
162 err = -EFAULT;
163 mutex_unlock(&mm->context.lock);
164 if (err < 0)
165 goto error_return;
166 if (size != bytecount) {
167 /* zero-fill the rest */
168 if (clear_user(ptr + size, bytecount - size) != 0) {
169 err = -EFAULT;
170 goto error_return;
171 }
172 }
173 return bytecount;
174error_return:
175 return err;
176}
177
178static int read_default_ldt(void __user *ptr, unsigned long bytecount)
179{
180 /* CHECKME: Can we use _one_ random number ? */
181#ifdef CONFIG_X86_32
182 unsigned long size = 5 * sizeof(struct desc_struct);
183#else
184 unsigned long size = 128;
185#endif
186 if (bytecount > size)
187 bytecount = size;
188 if (clear_user(ptr, bytecount))
189 return -EFAULT;
190 return bytecount;
191}
192
193static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
194{
195 struct mm_struct *mm = current->mm;
196 struct desc_struct ldt;
197 int error;
198 struct user_desc ldt_info;
199
200 error = -EINVAL;
201 if (bytecount != sizeof(ldt_info))
202 goto out;
203 error = -EFAULT;
204 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
205 goto out;
206
207 error = -EINVAL;
208 if (ldt_info.entry_number >= LDT_ENTRIES)
209 goto out;
210 if (ldt_info.contents == 3) {
211 if (oldmode)
212 goto out;
213 if (ldt_info.seg_not_present == 0)
214 goto out;
215 }
216
217 mutex_lock(&mm->context.lock);
218 if (ldt_info.entry_number >= mm->context.size) {
219 error = alloc_ldt(¤t->mm->context,
220 ldt_info.entry_number + 1, 1);
221 if (error < 0)
222 goto out_unlock;
223 }
224
225 /* Allow LDTs to be cleared by the user. */
226 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
227 if (oldmode || LDT_empty(&ldt_info)) {
228 memset(&ldt, 0, sizeof(ldt));
229 goto install;
230 }
231 }
232
233 fill_ldt(&ldt, &ldt_info);
234 if (oldmode)
235 ldt.avl = 0;
236
237 /* Install the new entry ... */
238install:
239 write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
240 error = 0;
241
242out_unlock:
243 mutex_unlock(&mm->context.lock);
244out:
245 return error;
246}
247
248asmlinkage int sys_modify_ldt(int func, void __user *ptr,
249 unsigned long bytecount)
250{
251 int ret = -ENOSYS;
252
253 switch (func) {
254 case 0:
255 ret = read_ldt(ptr, bytecount);
256 break;
257 case 1:
258 ret = write_ldt(ptr, bytecount, 1);
259 break;
260 case 2:
261 ret = read_default_ldt(ptr, bytecount);
262 break;
263 case 0x11:
264 ret = write_ldt(ptr, bytecount, 0);
265 break;
266 }
267 return ret;
268}