Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
  4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  5 * Copyright (C) 2002 Andi Kleen
  6 *
  7 * This handles calls from both 32bit and 64bit mode.
  8 *
  9 * Lock order:
 10 *	contex.ldt_usr_sem
 11 *	  mmap_sem
 12 *	    context.lock
 13 */
 14
 15#include <linux/errno.h>
 16#include <linux/gfp.h>
 17#include <linux/sched.h>
 18#include <linux/string.h>
 19#include <linux/mm.h>
 20#include <linux/smp.h>
 21#include <linux/syscalls.h>
 22#include <linux/slab.h>
 23#include <linux/vmalloc.h>
 24#include <linux/uaccess.h>
 25
 26#include <asm/ldt.h>
 27#include <asm/tlb.h>
 28#include <asm/desc.h>
 29#include <asm/mmu_context.h>
 30#include <asm/syscalls.h>
 31
 32static void refresh_ldt_segments(void)
 
 33{
 34#ifdef CONFIG_X86_64
 35	unsigned short sel;
 36
 37	/*
 38	 * Make sure that the cached DS and ES descriptors match the updated
 39	 * LDT.
 40	 */
 41	savesegment(ds, sel);
 42	if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
 43		loadsegment(ds, sel);
 44
 45	savesegment(es, sel);
 46	if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
 47		loadsegment(es, sel);
 48#endif
 49}
 50
 51/* context.lock is held by the task which issued the smp function call */
 52static void flush_ldt(void *__mm)
 53{
 54	struct mm_struct *mm = __mm;
 55
 56	if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
 57		return;
 58
 59	load_mm_ldt(mm);
 60
 61	refresh_ldt_segments();
 62}
 
 63
 64/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
 65static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 66{
 67	struct ldt_struct *new_ldt;
 68	unsigned int alloc_size;
 69
 70	if (num_entries > LDT_ENTRIES)
 71		return NULL;
 72
 73	new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
 74	if (!new_ldt)
 75		return NULL;
 76
 77	BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
 78	alloc_size = num_entries * LDT_ENTRY_SIZE;
 79
 80	/*
 81	 * Xen is very picky: it requires a page-aligned LDT that has no
 82	 * trailing nonzero bytes in any page that contains LDT descriptors.
 83	 * Keep it simple: zero the whole allocation and never allocate less
 84	 * than PAGE_SIZE.
 85	 */
 86	if (alloc_size > PAGE_SIZE)
 87		new_ldt->entries = vzalloc(alloc_size);
 88	else
 89		new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
 90
 91	if (!new_ldt->entries) {
 92		kfree(new_ldt);
 93		return NULL;
 94	}
 95
 96	/* The new LDT isn't aliased for PTI yet. */
 97	new_ldt->slot = -1;
 98
 99	new_ldt->nr_entries = num_entries;
100	return new_ldt;
101}
 
 
102
103/*
104 * If PTI is enabled, this maps the LDT into the kernelmode and
105 * usermode tables for the given mm.
106 *
107 * There is no corresponding unmap function.  Even if the LDT is freed, we
108 * leave the PTEs around until the slot is reused or the mm is destroyed.
109 * This is harmless: the LDT is always in ordinary memory, and no one will
110 * access the freed slot.
111 *
112 * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
113 * it useful, and the flush would slow down modify_ldt().
114 */
115static int
116map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
117{
118#ifdef CONFIG_PAGE_TABLE_ISOLATION
119	bool is_vmalloc, had_top_level_entry;
120	unsigned long va;
121	spinlock_t *ptl;
122	pgd_t *pgd;
123	int i;
124
125	if (!static_cpu_has(X86_FEATURE_PTI))
126		return 0;
127
128	/*
129	 * Any given ldt_struct should have map_ldt_struct() called at most
130	 * once.
131	 */
132	WARN_ON(ldt->slot != -1);
133
134	/*
135	 * Did we already have the top level entry allocated?  We can't
136	 * use pgd_none() for this because it doens't do anything on
137	 * 4-level page table kernels.
138	 */
139	pgd = pgd_offset(mm, LDT_BASE_ADDR);
140	had_top_level_entry = (pgd->pgd != 0);
141
142	is_vmalloc = is_vmalloc_addr(ldt->entries);
143
144	for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
145		unsigned long offset = i << PAGE_SHIFT;
146		const void *src = (char *)ldt->entries + offset;
147		unsigned long pfn;
148		pgprot_t pte_prot;
149		pte_t pte, *ptep;
150
151		va = (unsigned long)ldt_slot_va(slot) + offset;
152		pfn = is_vmalloc ? vmalloc_to_pfn(src) :
153			page_to_pfn(virt_to_page(src));
154		/*
155		 * Treat the PTI LDT range as a *userspace* range.
156		 * get_locked_pte() will allocate all needed pagetables
157		 * and account for them in this mm.
158		 */
159		ptep = get_locked_pte(mm, va, &ptl);
160		if (!ptep)
161			return -ENOMEM;
162		/*
163		 * Map it RO so the easy to find address is not a primary
164		 * target via some kernel interface which misses a
165		 * permission check.
166		 */
167		pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
168		/* Filter out unsuppored __PAGE_KERNEL* bits: */
169		pgprot_val(pte_prot) &= __supported_pte_mask;
170		pte = pfn_pte(pfn, pte_prot);
171		set_pte_at(mm, va, ptep, pte);
172		pte_unmap_unlock(ptep, ptl);
173	}
174
175	if (mm->context.ldt) {
176		/*
177		 * We already had an LDT.  The top-level entry should already
178		 * have been allocated and synchronized with the usermode
179		 * tables.
180		 */
181		WARN_ON(!had_top_level_entry);
182		if (static_cpu_has(X86_FEATURE_PTI))
183			WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
184	} else {
185		/*
186		 * This is the first time we're mapping an LDT for this process.
187		 * Sync the pgd to the usermode tables.
188		 */
189		WARN_ON(had_top_level_entry);
190		if (static_cpu_has(X86_FEATURE_PTI)) {
191			WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
192			set_pgd(kernel_to_user_pgdp(pgd), *pgd);
193		}
194	}
195
196	va = (unsigned long)ldt_slot_va(slot);
197	flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
198
199	ldt->slot = slot;
200#endif
201	return 0;
202}
203
204static void free_ldt_pgtables(struct mm_struct *mm)
205{
206#ifdef CONFIG_PAGE_TABLE_ISOLATION
207	struct mmu_gather tlb;
208	unsigned long start = LDT_BASE_ADDR;
209	unsigned long end = start + (1UL << PGDIR_SHIFT);
210
211	if (!static_cpu_has(X86_FEATURE_PTI))
212		return;
213
214	tlb_gather_mmu(&tlb, mm, start, end);
215	free_pgd_range(&tlb, start, end, start, end);
216	tlb_finish_mmu(&tlb, start, end);
217#endif
218}
219
220/* After calling this, the LDT is immutable. */
221static void finalize_ldt_struct(struct ldt_struct *ldt)
222{
223	paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
224}
225
226static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
227{
228	mutex_lock(&mm->context.lock);
229
230	/* Synchronizes with READ_ONCE in load_mm_ldt. */
231	smp_store_release(&mm->context.ldt, ldt);
232
233	/* Activate the LDT for all CPUs using currents mm. */
234	on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
235
236	mutex_unlock(&mm->context.lock);
237}
238
239static void free_ldt_struct(struct ldt_struct *ldt)
240{
241	if (likely(!ldt))
242		return;
243
244	paravirt_free_ldt(ldt->entries, ldt->nr_entries);
245	if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
246		vfree_atomic(ldt->entries);
247	else
248		free_page((unsigned long)ldt->entries);
249	kfree(ldt);
250}
251
252/*
253 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
254 * the new task is not running, so nothing can be installed.
255 */
256int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
257{
258	struct ldt_struct *new_ldt;
259	int retval = 0;
260
261	if (!old_mm)
262		return 0;
263
264	mutex_lock(&old_mm->context.lock);
265	if (!old_mm->context.ldt)
266		goto out_unlock;
267
268	new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
269	if (!new_ldt) {
270		retval = -ENOMEM;
271		goto out_unlock;
272	}
273
274	memcpy(new_ldt->entries, old_mm->context.ldt->entries,
275	       new_ldt->nr_entries * LDT_ENTRY_SIZE);
276	finalize_ldt_struct(new_ldt);
277
278	retval = map_ldt_struct(mm, new_ldt, 0);
279	if (retval) {
280		free_ldt_pgtables(mm);
281		free_ldt_struct(new_ldt);
282		goto out_unlock;
283	}
284	mm->context.ldt = new_ldt;
285
286out_unlock:
287	mutex_unlock(&old_mm->context.lock);
288	return retval;
289}
290
291/*
292 * No need to lock the MM as we are the last user
293 *
294 * 64bit: Don't touch the LDT register - we're already in the next thread.
295 */
296void destroy_context_ldt(struct mm_struct *mm)
297{
298	free_ldt_struct(mm->context.ldt);
299	mm->context.ldt = NULL;
300}
301
302void ldt_arch_exit_mmap(struct mm_struct *mm)
303{
304	free_ldt_pgtables(mm);
 
 
 
 
 
 
 
 
 
 
 
 
305}
306
307static int read_ldt(void __user *ptr, unsigned long bytecount)
308{
 
 
309	struct mm_struct *mm = current->mm;
310	unsigned long entries_size;
311	int retval;
312
313	down_read(&mm->context.ldt_usr_sem);
314
315	if (!mm->context.ldt) {
316		retval = 0;
317		goto out_unlock;
318	}
319
 
 
320	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
321		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
322
323	entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
324	if (entries_size > bytecount)
325		entries_size = bytecount;
326
327	if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
328		retval = -EFAULT;
329		goto out_unlock;
330	}
331
332	if (entries_size != bytecount) {
333		/* Zero-fill the rest and pretend we read bytecount bytes. */
334		if (clear_user(ptr + entries_size, bytecount - entries_size)) {
335			retval = -EFAULT;
336			goto out_unlock;
 
 
337		}
338	}
339	retval = bytecount;
340
341out_unlock:
342	up_read(&mm->context.ldt_usr_sem);
343	return retval;
344}
345
346static int read_default_ldt(void __user *ptr, unsigned long bytecount)
347{
348	/* CHECKME: Can we use _one_ random number ? */
349#ifdef CONFIG_X86_32
350	unsigned long size = 5 * sizeof(struct desc_struct);
351#else
352	unsigned long size = 128;
353#endif
354	if (bytecount > size)
355		bytecount = size;
356	if (clear_user(ptr, bytecount))
357		return -EFAULT;
358	return bytecount;
359}
360
361static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
362{
363	struct mm_struct *mm = current->mm;
364	struct ldt_struct *new_ldt, *old_ldt;
365	unsigned int old_nr_entries, new_nr_entries;
366	struct user_desc ldt_info;
367	struct desc_struct ldt;
368	int error;
 
369
370	error = -EINVAL;
371	if (bytecount != sizeof(ldt_info))
372		goto out;
373	error = -EFAULT;
374	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
375		goto out;
376
377	error = -EINVAL;
378	if (ldt_info.entry_number >= LDT_ENTRIES)
379		goto out;
380	if (ldt_info.contents == 3) {
381		if (oldmode)
382			goto out;
383		if (ldt_info.seg_not_present == 0)
384			goto out;
385	}
386
387	if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
388	    LDT_empty(&ldt_info)) {
389		/* The user wants to clear the entry. */
390		memset(&ldt, 0, sizeof(ldt));
391	} else {
392		if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
393			error = -EINVAL;
394			goto out;
395		}
396
397		fill_ldt(&ldt, &ldt_info);
398		if (oldmode)
399			ldt.avl = 0;
400	}
401
402	if (down_write_killable(&mm->context.ldt_usr_sem))
403		return -EINTR;
404
405	old_ldt       = mm->context.ldt;
406	old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
407	new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
408
409	error = -ENOMEM;
410	new_ldt = alloc_ldt_struct(new_nr_entries);
411	if (!new_ldt)
412		goto out_unlock;
413
414	if (old_ldt)
415		memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
416
417	new_ldt->entries[ldt_info.entry_number] = ldt;
418	finalize_ldt_struct(new_ldt);
419
420	/*
421	 * If we are using PTI, map the new LDT into the userspace pagetables.
422	 * If there is already an LDT, use the other slot so that other CPUs
423	 * will continue to use the old LDT until install_ldt() switches
424	 * them over to the new LDT.
425	 */
426	error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
427	if (error) {
428		/*
429		 * This only can fail for the first LDT setup. If an LDT is
430		 * already installed then the PTE page is already
431		 * populated. Mop up a half populated page table.
432		 */
433		if (!WARN_ON_ONCE(old_ldt))
434			free_ldt_pgtables(mm);
435		free_ldt_struct(new_ldt);
436		goto out_unlock;
437	}
438
439	install_ldt(mm, new_ldt);
440	free_ldt_struct(old_ldt);
 
 
 
 
 
441	error = 0;
442
443out_unlock:
444	up_write(&mm->context.ldt_usr_sem);
445out:
446	return error;
447}
448
449SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
450		unsigned long , bytecount)
451{
452	int ret = -ENOSYS;
453
454	switch (func) {
455	case 0:
456		ret = read_ldt(ptr, bytecount);
457		break;
458	case 1:
459		ret = write_ldt(ptr, bytecount, 1);
460		break;
461	case 2:
462		ret = read_default_ldt(ptr, bytecount);
463		break;
464	case 0x11:
465		ret = write_ldt(ptr, bytecount, 0);
466		break;
467	}
468	/*
469	 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
470	 * return type, but tht ABI for sys_modify_ldt() expects
471	 * 'int'.  This cast gives us an int-sized value in %rax
472	 * for the return code.  The 'unsigned' is necessary so
473	 * the compiler does not try to sign-extend the negative
474	 * return codes into the high half of the register when
475	 * taking the value from int->long.
476	 */
477	return (unsigned int)ret;
478}
v3.5.6
 
  1/*
  2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
  3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  4 * Copyright (C) 2002 Andi Kleen
  5 *
  6 * This handles calls from both 32bit and 64bit mode.
 
 
 
 
 
  7 */
  8
  9#include <linux/errno.h>
 10#include <linux/gfp.h>
 11#include <linux/sched.h>
 12#include <linux/string.h>
 13#include <linux/mm.h>
 14#include <linux/smp.h>
 
 
 15#include <linux/vmalloc.h>
 16#include <linux/uaccess.h>
 17
 18#include <asm/ldt.h>
 
 19#include <asm/desc.h>
 20#include <asm/mmu_context.h>
 21#include <asm/syscalls.h>
 22
 23#ifdef CONFIG_SMP
 24static void flush_ldt(void *current_mm)
 25{
 26	if (current->active_mm == current_mm)
 27		load_LDT(&current->active_mm->context);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28}
 29#endif
 30
 31static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 
 32{
 33	void *oldldt, *newldt;
 34	int oldsize;
 35
 36	if (mincount <= pc->size)
 37		return 0;
 38	oldsize = pc->size;
 39	mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
 40			(~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
 41	if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
 42		newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 43	else
 44		newldt = (void *)__get_free_page(GFP_KERNEL);
 
 
 
 
 
 45
 46	if (!newldt)
 47		return -ENOMEM;
 48
 49	if (oldsize)
 50		memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
 51	oldldt = pc->ldt;
 52	memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
 53	       (mincount - oldsize) * LDT_ENTRY_SIZE);
 54
 55	paravirt_alloc_ldt(newldt, mincount);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56
 57#ifdef CONFIG_X86_64
 58	/* CHECKME: Do we really need this ? */
 59	wmb();
 60#endif
 61	pc->ldt = newldt;
 62	wmb();
 63	pc->size = mincount;
 64	wmb();
 65
 66	if (reload) {
 67#ifdef CONFIG_SMP
 68		preempt_disable();
 69		load_LDT(pc);
 70		if (!cpumask_equal(mm_cpumask(current->mm),
 71				   cpumask_of(smp_processor_id())))
 72			smp_call_function(flush_ldt, current->mm, 1);
 73		preempt_enable();
 74#else
 75		load_LDT(pc);
 76#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77	}
 78	if (oldsize) {
 79		paravirt_free_ldt(oldldt, oldsize);
 80		if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
 81			vfree(oldldt);
 82		else
 83			put_page(virt_to_page(oldldt));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84	}
 
 
 
 
 
 
 85	return 0;
 86}
 87
 88static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
 89{
 90	int err = alloc_ldt(new, old->size, 0);
 91	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92
 93	if (err < 0)
 94		return err;
 
 
 95
 96	for (i = 0; i < old->size; i++)
 97		write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
 98	return 0;
 
 
 
 99}
100
101/*
102 * we do not have to muck with descriptors here, that is
103 * done in switch_mm() as needed.
104 */
105int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
106{
107	struct mm_struct *old_mm;
108	int retval = 0;
109
110	mutex_init(&mm->context.lock);
111	mm->context.size = 0;
112	old_mm = current->mm;
113	if (old_mm && old_mm->context.size > 0) {
114		mutex_lock(&old_mm->context.lock);
115		retval = copy_ldt(&mm->context, &old_mm->context);
116		mutex_unlock(&old_mm->context.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117	}
 
 
 
 
118	return retval;
119}
120
121/*
122 * No need to lock the MM as we are the last user
123 *
124 * 64bit: Don't touch the LDT register - we're already in the next thread.
125 */
126void destroy_context(struct mm_struct *mm)
 
 
 
 
 
 
127{
128	if (mm->context.size) {
129#ifdef CONFIG_X86_32
130		/* CHECKME: Can this ever happen ? */
131		if (mm == current->active_mm)
132			clear_LDT();
133#endif
134		paravirt_free_ldt(mm->context.ldt, mm->context.size);
135		if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
136			vfree(mm->context.ldt);
137		else
138			put_page(virt_to_page(mm->context.ldt));
139		mm->context.size = 0;
140	}
141}
142
143static int read_ldt(void __user *ptr, unsigned long bytecount)
144{
145	int err;
146	unsigned long size;
147	struct mm_struct *mm = current->mm;
 
 
 
 
 
 
 
 
 
148
149	if (!mm->context.size)
150		return 0;
151	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
152		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
153
154	mutex_lock(&mm->context.lock);
155	size = mm->context.size * LDT_ENTRY_SIZE;
156	if (size > bytecount)
157		size = bytecount;
158
159	err = 0;
160	if (copy_to_user(ptr, mm->context.ldt, size))
161		err = -EFAULT;
162	mutex_unlock(&mm->context.lock);
163	if (err < 0)
164		goto error_return;
165	if (size != bytecount) {
166		/* zero-fill the rest */
167		if (clear_user(ptr + size, bytecount - size) != 0) {
168			err = -EFAULT;
169			goto error_return;
170		}
171	}
172	return bytecount;
173error_return:
174	return err;
 
 
175}
176
177static int read_default_ldt(void __user *ptr, unsigned long bytecount)
178{
179	/* CHECKME: Can we use _one_ random number ? */
180#ifdef CONFIG_X86_32
181	unsigned long size = 5 * sizeof(struct desc_struct);
182#else
183	unsigned long size = 128;
184#endif
185	if (bytecount > size)
186		bytecount = size;
187	if (clear_user(ptr, bytecount))
188		return -EFAULT;
189	return bytecount;
190}
191
192static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
193{
194	struct mm_struct *mm = current->mm;
 
 
 
195	struct desc_struct ldt;
196	int error;
197	struct user_desc ldt_info;
198
199	error = -EINVAL;
200	if (bytecount != sizeof(ldt_info))
201		goto out;
202	error = -EFAULT;
203	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
204		goto out;
205
206	error = -EINVAL;
207	if (ldt_info.entry_number >= LDT_ENTRIES)
208		goto out;
209	if (ldt_info.contents == 3) {
210		if (oldmode)
211			goto out;
212		if (ldt_info.seg_not_present == 0)
213			goto out;
214	}
215
216	mutex_lock(&mm->context.lock);
217	if (ldt_info.entry_number >= mm->context.size) {
218		error = alloc_ldt(&current->mm->context,
219				  ldt_info.entry_number + 1, 1);
220		if (error < 0)
221			goto out_unlock;
 
 
 
 
 
 
 
222	}
223
224	/* Allow LDTs to be cleared by the user. */
225	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
226		if (oldmode || LDT_empty(&ldt_info)) {
227			memset(&ldt, 0, sizeof(ldt));
228			goto install;
229		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230	}
231
232	fill_ldt(&ldt, &ldt_info);
233	if (oldmode)
234		ldt.avl = 0;
235
236	/* Install the new entry ...  */
237install:
238	write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
239	error = 0;
240
241out_unlock:
242	mutex_unlock(&mm->context.lock);
243out:
244	return error;
245}
246
247asmlinkage int sys_modify_ldt(int func, void __user *ptr,
248			      unsigned long bytecount)
249{
250	int ret = -ENOSYS;
251
252	switch (func) {
253	case 0:
254		ret = read_ldt(ptr, bytecount);
255		break;
256	case 1:
257		ret = write_ldt(ptr, bytecount, 1);
258		break;
259	case 2:
260		ret = read_default_ldt(ptr, bytecount);
261		break;
262	case 0x11:
263		ret = write_ldt(ptr, bytecount, 0);
264		break;
265	}
266	return ret;
 
 
 
 
 
 
 
 
 
267}