Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Copyright (C) 1994  Linus Torvalds
  4 *
  5 *  29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
  6 *                stack - Manfred Spraul <manfred@colorfullife.com>
  7 *
  8 *  22 mar 2002 - Manfred detected the stackfaults, but didn't handle
  9 *                them correctly. Now the emulation will be in a
 10 *                consistent state after stackfaults - Kasper Dupont
 11 *                <kasperd@daimi.au.dk>
 12 *
 13 *  22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
 14 *                <kasperd@daimi.au.dk>
 15 *
 16 *  ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
 17 *                caused by Kasper Dupont's changes - Stas Sergeev
 18 *
 19 *   4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
 20 *                Kasper Dupont <kasperd@daimi.au.dk>
 21 *
 22 *   9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
 23 *                Kasper Dupont <kasperd@daimi.au.dk>
 24 *
 25 *   9 apr 2002 - Changed stack access macros to jump to a label
 26 *                instead of returning to userspace. This simplifies
 27 *                do_int, and is needed by handle_vm6_fault. Kasper
 28 *                Dupont <kasperd@daimi.au.dk>
 29 *
 30 */
 31
 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 33
 34#include <linux/capability.h>
 35#include <linux/errno.h>
 36#include <linux/interrupt.h>
 37#include <linux/syscalls.h>
 38#include <linux/sched.h>
 39#include <linux/sched/task_stack.h>
 40#include <linux/kernel.h>
 41#include <linux/signal.h>
 42#include <linux/string.h>
 43#include <linux/mm.h>
 44#include <linux/smp.h>
 45#include <linux/highmem.h>
 46#include <linux/ptrace.h>
 47#include <linux/audit.h>
 48#include <linux/stddef.h>
 49#include <linux/slab.h>
 50#include <linux/security.h>
 51
 52#include <linux/uaccess.h>
 53#include <asm/io.h>
 54#include <asm/tlbflush.h>
 55#include <asm/irq.h>
 56#include <asm/traps.h>
 57#include <asm/vm86.h>
 58#include <asm/switch_to.h>
 59
 60/*
 61 * Known problems:
 62 *
 63 * Interrupt handling is not guaranteed:
 64 * - a real x86 will disable all interrupts for one instruction
 65 *   after a "mov ss,xx" to make stack handling atomic even without
 66 *   the 'lss' instruction. We can't guarantee this in v86 mode,
 67 *   as the next instruction might result in a page fault or similar.
 68 * - a real x86 will have interrupts disabled for one instruction
 69 *   past the 'sti' that enables them. We don't bother with all the
 70 *   details yet.
 71 *
 72 * Let's hope these problems do not actually matter for anything.
 73 */
 74
 75
 76/*
 77 * 8- and 16-bit register defines..
 78 */
 79#define AL(regs)	(((unsigned char *)&((regs)->pt.ax))[0])
 80#define AH(regs)	(((unsigned char *)&((regs)->pt.ax))[1])
 81#define IP(regs)	(*(unsigned short *)&((regs)->pt.ip))
 82#define SP(regs)	(*(unsigned short *)&((regs)->pt.sp))
 83
 84/*
 85 * virtual flags (16 and 32-bit versions)
 86 */
 87#define VFLAGS	(*(unsigned short *)&(current->thread.vm86->veflags))
 88#define VEFLAGS	(current->thread.vm86->veflags)
 89
 90#define set_flags(X, new, mask) \
 91((X) = ((X) & ~(mask)) | ((new) & (mask)))
 92
 93#define SAFE_MASK	(0xDD5)
 94#define RETURN_MASK	(0xDFF)
 95
 96void save_v86_state(struct kernel_vm86_regs *regs, int retval)
 97{
 
 98	struct task_struct *tsk = current;
 99	struct vm86plus_struct __user *user;
100	struct vm86 *vm86 = current->thread.vm86;
101	long err = 0;
102
103	/*
104	 * This gets called from entry.S with interrupts disabled, but
105	 * from process context. Enable interrupts here, before trying
106	 * to access user space.
107	 */
108	local_irq_enable();
109
110	if (!vm86 || !vm86->user_vm86) {
111		pr_alert("no user_vm86: BAD\n");
112		do_exit(SIGSEGV);
113	}
114	set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
115	user = vm86->user_vm86;
116
117	if (!access_ok(user, vm86->vm86plus.is_vm86pus ?
118		       sizeof(struct vm86plus_struct) :
119		       sizeof(struct vm86_struct))) {
120		pr_alert("could not access userspace vm86 info\n");
121		do_exit(SIGSEGV);
122	}
123
124	put_user_try {
125		put_user_ex(regs->pt.bx, &user->regs.ebx);
126		put_user_ex(regs->pt.cx, &user->regs.ecx);
127		put_user_ex(regs->pt.dx, &user->regs.edx);
128		put_user_ex(regs->pt.si, &user->regs.esi);
129		put_user_ex(regs->pt.di, &user->regs.edi);
130		put_user_ex(regs->pt.bp, &user->regs.ebp);
131		put_user_ex(regs->pt.ax, &user->regs.eax);
132		put_user_ex(regs->pt.ip, &user->regs.eip);
133		put_user_ex(regs->pt.cs, &user->regs.cs);
134		put_user_ex(regs->pt.flags, &user->regs.eflags);
135		put_user_ex(regs->pt.sp, &user->regs.esp);
136		put_user_ex(regs->pt.ss, &user->regs.ss);
137		put_user_ex(regs->es, &user->regs.es);
138		put_user_ex(regs->ds, &user->regs.ds);
139		put_user_ex(regs->fs, &user->regs.fs);
140		put_user_ex(regs->gs, &user->regs.gs);
141
142		put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
143	} put_user_catch(err);
144	if (err) {
145		pr_alert("could not access userspace vm86 info\n");
146		do_exit(SIGSEGV);
147	}
148
149	preempt_disable();
150	tsk->thread.sp0 = vm86->saved_sp0;
151	tsk->thread.sysenter_cs = __KERNEL_CS;
152	update_task_stack(tsk);
153	refresh_sysenter_cs(&tsk->thread);
154	vm86->saved_sp0 = 0;
155	preempt_enable();
156
157	memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
158
159	lazy_load_gs(vm86->regs32.gs);
160
161	regs->pt.ax = retval;
162}
163
164static void mark_screen_rdonly(struct mm_struct *mm)
165{
166	struct vm_area_struct *vma;
167	spinlock_t *ptl;
168	pgd_t *pgd;
169	p4d_t *p4d;
170	pud_t *pud;
171	pmd_t *pmd;
172	pte_t *pte;
173	int i;
174
175	down_write(&mm->mmap_sem);
176	pgd = pgd_offset(mm, 0xA0000);
177	if (pgd_none_or_clear_bad(pgd))
178		goto out;
179	p4d = p4d_offset(pgd, 0xA0000);
180	if (p4d_none_or_clear_bad(p4d))
181		goto out;
182	pud = pud_offset(p4d, 0xA0000);
183	if (pud_none_or_clear_bad(pud))
184		goto out;
185	pmd = pmd_offset(pud, 0xA0000);
186
187	if (pmd_trans_huge(*pmd)) {
188		vma = find_vma(mm, 0xA0000);
189		split_huge_pmd(vma, pmd, 0xA0000);
190	}
191	if (pmd_none_or_clear_bad(pmd))
192		goto out;
193	pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
194	for (i = 0; i < 32; i++) {
195		if (pte_present(*pte))
196			set_pte(pte, pte_wrprotect(*pte));
197		pte++;
198	}
199	pte_unmap_unlock(pte, ptl);
200out:
201	up_write(&mm->mmap_sem);
202	flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
203}
204
205
206
207static int do_vm86_irq_handling(int subfunction, int irqnumber);
208static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
209
210SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
211{
212	return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
213}
214
215
216SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
217{
218	switch (cmd) {
219	case VM86_REQUEST_IRQ:
220	case VM86_FREE_IRQ:
221	case VM86_GET_IRQ_BITS:
222	case VM86_GET_AND_RESET_IRQ:
223		return do_vm86_irq_handling(cmd, (int)arg);
224	case VM86_PLUS_INSTALL_CHECK:
225		/*
226		 * NOTE: on old vm86 stuff this will return the error
227		 *  from access_ok(), because the subfunction is
228		 *  interpreted as (invalid) address to vm86_struct.
229		 *  So the installation check works.
230		 */
231		return 0;
232	}
233
234	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
235	return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
236}
237
238
239static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
240{
 
241	struct task_struct *tsk = current;
242	struct vm86 *vm86 = tsk->thread.vm86;
243	struct kernel_vm86_regs vm86regs;
244	struct pt_regs *regs = current_pt_regs();
245	unsigned long err = 0;
246
247	err = security_mmap_addr(0);
248	if (err) {
249		/*
250		 * vm86 cannot virtualize the address space, so vm86 users
251		 * need to manage the low 1MB themselves using mmap.  Given
252		 * that BIOS places important data in the first page, vm86
253		 * is essentially useless if mmap_min_addr != 0.  DOSEMU,
254		 * for example, won't even bother trying to use vm86 if it
255		 * can't map a page at virtual address 0.
256		 *
257		 * To reduce the available kernel attack surface, simply
258		 * disallow vm86(old) for users who cannot mmap at va 0.
259		 *
260		 * The implementation of security_mmap_addr will allow
261		 * suitably privileged users to map va 0 even if
262		 * vm.mmap_min_addr is set above 0, and we want this
263		 * behavior for vm86 as well, as it ensures that legacy
264		 * tools like vbetool will not fail just because of
265		 * vm.mmap_min_addr.
266		 */
267		pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d).  Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
268			     current->comm, task_pid_nr(current),
269			     from_kuid_munged(&init_user_ns, current_uid()));
270		return -EPERM;
271	}
272
273	if (!vm86) {
274		if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
275			return -ENOMEM;
276		tsk->thread.vm86 = vm86;
277	}
278	if (vm86->saved_sp0)
279		return -EPERM;
280
281	if (!access_ok(user_vm86, plus ?
282		       sizeof(struct vm86_struct) :
283		       sizeof(struct vm86plus_struct)))
284		return -EFAULT;
285
286	memset(&vm86regs, 0, sizeof(vm86regs));
287	get_user_try {
288		unsigned short seg;
289		get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
290		get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
291		get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
292		get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
293		get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
294		get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
295		get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
296		get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
297		get_user_ex(seg, &user_vm86->regs.cs);
298		vm86regs.pt.cs = seg;
299		get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
300		get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
301		get_user_ex(seg, &user_vm86->regs.ss);
302		vm86regs.pt.ss = seg;
303		get_user_ex(vm86regs.es, &user_vm86->regs.es);
304		get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
305		get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
306		get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
307
308		get_user_ex(vm86->flags, &user_vm86->flags);
309		get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
310		get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
311	} get_user_catch(err);
312	if (err)
313		return err;
314
315	if (copy_from_user(&vm86->int_revectored,
316			   &user_vm86->int_revectored,
317			   sizeof(struct revectored_struct)))
318		return -EFAULT;
319	if (copy_from_user(&vm86->int21_revectored,
320			   &user_vm86->int21_revectored,
321			   sizeof(struct revectored_struct)))
322		return -EFAULT;
323	if (plus) {
324		if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
325				   sizeof(struct vm86plus_info_struct)))
326			return -EFAULT;
327		vm86->vm86plus.is_vm86pus = 1;
328	} else
329		memset(&vm86->vm86plus, 0,
330		       sizeof(struct vm86plus_info_struct));
331
332	memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
333	vm86->user_vm86 = user_vm86;
334
335/*
336 * The flags register is also special: we cannot trust that the user
337 * has set it up safely, so this makes sure interrupt etc flags are
338 * inherited from protected mode.
339 */
340	VEFLAGS = vm86regs.pt.flags;
341	vm86regs.pt.flags &= SAFE_MASK;
342	vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
343	vm86regs.pt.flags |= X86_VM_MASK;
344
345	vm86regs.pt.orig_ax = regs->orig_ax;
346
347	switch (vm86->cpu_type) {
348	case CPU_286:
349		vm86->veflags_mask = 0;
350		break;
351	case CPU_386:
352		vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
353		break;
354	case CPU_486:
355		vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
356		break;
357	default:
358		vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
359		break;
360	}
361
362/*
363 * Save old state
364 */
365	vm86->saved_sp0 = tsk->thread.sp0;
366	lazy_save_gs(vm86->regs32.gs);
367
 
368	/* make room for real-mode segments */
369	preempt_disable();
370	tsk->thread.sp0 += 16;
371
372	if (boot_cpu_has(X86_FEATURE_SEP)) {
373		tsk->thread.sysenter_cs = 0;
374		refresh_sysenter_cs(&tsk->thread);
375	}
376
377	update_task_stack(tsk);
378	preempt_enable();
379
380	if (vm86->flags & VM86_SCREEN_BITMAP)
381		mark_screen_rdonly(tsk->mm);
382
383	memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
384	force_iret();
385	return regs->ax;
386}
387
388static inline void set_IF(struct kernel_vm86_regs *regs)
389{
390	VEFLAGS |= X86_EFLAGS_VIF;
391}
392
393static inline void clear_IF(struct kernel_vm86_regs *regs)
394{
395	VEFLAGS &= ~X86_EFLAGS_VIF;
396}
397
398static inline void clear_TF(struct kernel_vm86_regs *regs)
399{
400	regs->pt.flags &= ~X86_EFLAGS_TF;
401}
402
403static inline void clear_AC(struct kernel_vm86_regs *regs)
404{
405	regs->pt.flags &= ~X86_EFLAGS_AC;
406}
407
408/*
409 * It is correct to call set_IF(regs) from the set_vflags_*
410 * functions. However someone forgot to call clear_IF(regs)
411 * in the opposite case.
412 * After the command sequence CLI PUSHF STI POPF you should
413 * end up with interrupts disabled, but you ended up with
414 * interrupts enabled.
415 *  ( I was testing my own changes, but the only bug I
416 *    could find was in a function I had not changed. )
417 * [KD]
418 */
419
420static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
421{
422	set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
423	set_flags(regs->pt.flags, flags, SAFE_MASK);
424	if (flags & X86_EFLAGS_IF)
425		set_IF(regs);
426	else
427		clear_IF(regs);
428}
429
430static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
431{
432	set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
433	set_flags(regs->pt.flags, flags, SAFE_MASK);
434	if (flags & X86_EFLAGS_IF)
435		set_IF(regs);
436	else
437		clear_IF(regs);
438}
439
440static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
441{
442	unsigned long flags = regs->pt.flags & RETURN_MASK;
443
444	if (VEFLAGS & X86_EFLAGS_VIF)
445		flags |= X86_EFLAGS_IF;
446	flags |= X86_EFLAGS_IOPL;
447	return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
448}
449
450static inline int is_revectored(int nr, struct revectored_struct *bitmap)
451{
452	return test_bit(nr, bitmap->__map);
453}
454
455#define val_byte(val, n) (((__u8 *)&val)[n])
456
457#define pushb(base, ptr, val, err_label) \
458	do { \
459		__u8 __val = val; \
460		ptr--; \
461		if (put_user(__val, base + ptr) < 0) \
462			goto err_label; \
463	} while (0)
464
465#define pushw(base, ptr, val, err_label) \
466	do { \
467		__u16 __val = val; \
468		ptr--; \
469		if (put_user(val_byte(__val, 1), base + ptr) < 0) \
470			goto err_label; \
471		ptr--; \
472		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
473			goto err_label; \
474	} while (0)
475
476#define pushl(base, ptr, val, err_label) \
477	do { \
478		__u32 __val = val; \
479		ptr--; \
480		if (put_user(val_byte(__val, 3), base + ptr) < 0) \
481			goto err_label; \
482		ptr--; \
483		if (put_user(val_byte(__val, 2), base + ptr) < 0) \
484			goto err_label; \
485		ptr--; \
486		if (put_user(val_byte(__val, 1), base + ptr) < 0) \
487			goto err_label; \
488		ptr--; \
489		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
490			goto err_label; \
491	} while (0)
492
493#define popb(base, ptr, err_label) \
494	({ \
495		__u8 __res; \
496		if (get_user(__res, base + ptr) < 0) \
497			goto err_label; \
498		ptr++; \
499		__res; \
500	})
501
502#define popw(base, ptr, err_label) \
503	({ \
504		__u16 __res; \
505		if (get_user(val_byte(__res, 0), base + ptr) < 0) \
506			goto err_label; \
507		ptr++; \
508		if (get_user(val_byte(__res, 1), base + ptr) < 0) \
509			goto err_label; \
510		ptr++; \
511		__res; \
512	})
513
514#define popl(base, ptr, err_label) \
515	({ \
516		__u32 __res; \
517		if (get_user(val_byte(__res, 0), base + ptr) < 0) \
518			goto err_label; \
519		ptr++; \
520		if (get_user(val_byte(__res, 1), base + ptr) < 0) \
521			goto err_label; \
522		ptr++; \
523		if (get_user(val_byte(__res, 2), base + ptr) < 0) \
524			goto err_label; \
525		ptr++; \
526		if (get_user(val_byte(__res, 3), base + ptr) < 0) \
527			goto err_label; \
528		ptr++; \
529		__res; \
530	})
531
532/* There are so many possible reasons for this function to return
533 * VM86_INTx, so adding another doesn't bother me. We can expect
534 * userspace programs to be able to handle it. (Getting a problem
535 * in userspace is always better than an Oops anyway.) [KD]
536 */
537static void do_int(struct kernel_vm86_regs *regs, int i,
538    unsigned char __user *ssp, unsigned short sp)
539{
540	unsigned long __user *intr_ptr;
541	unsigned long segoffs;
542	struct vm86 *vm86 = current->thread.vm86;
543
544	if (regs->pt.cs == BIOSSEG)
545		goto cannot_handle;
546	if (is_revectored(i, &vm86->int_revectored))
547		goto cannot_handle;
548	if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
549		goto cannot_handle;
550	intr_ptr = (unsigned long __user *) (i << 2);
551	if (get_user(segoffs, intr_ptr))
552		goto cannot_handle;
553	if ((segoffs >> 16) == BIOSSEG)
554		goto cannot_handle;
555	pushw(ssp, sp, get_vflags(regs), cannot_handle);
556	pushw(ssp, sp, regs->pt.cs, cannot_handle);
557	pushw(ssp, sp, IP(regs), cannot_handle);
558	regs->pt.cs = segoffs >> 16;
559	SP(regs) -= 6;
560	IP(regs) = segoffs & 0xffff;
561	clear_TF(regs);
562	clear_IF(regs);
563	clear_AC(regs);
564	return;
565
566cannot_handle:
567	save_v86_state(regs, VM86_INTx + (i << 8));
568}
569
570int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
571{
572	struct vm86 *vm86 = current->thread.vm86;
573
574	if (vm86->vm86plus.is_vm86pus) {
575		if ((trapno == 3) || (trapno == 1)) {
576			save_v86_state(regs, VM86_TRAP + (trapno << 8));
577			return 0;
578		}
579		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
580		return 0;
581	}
582	if (trapno != 1)
583		return 1; /* we let this handle by the calling routine */
584	current->thread.trap_nr = trapno;
585	current->thread.error_code = error_code;
586	force_sig(SIGTRAP);
587	return 0;
588}
589
590void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
591{
592	unsigned char opcode;
593	unsigned char __user *csp;
594	unsigned char __user *ssp;
595	unsigned short ip, sp, orig_flags;
596	int data32, pref_done;
597	struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus;
598
599#define CHECK_IF_IN_TRAP \
600	if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
601		newflags |= X86_EFLAGS_TF
602
603	orig_flags = *(unsigned short *)&regs->pt.flags;
604
605	csp = (unsigned char __user *) (regs->pt.cs << 4);
606	ssp = (unsigned char __user *) (regs->pt.ss << 4);
607	sp = SP(regs);
608	ip = IP(regs);
609
610	data32 = 0;
611	pref_done = 0;
612	do {
613		switch (opcode = popb(csp, ip, simulate_sigsegv)) {
614		case 0x66:      /* 32-bit data */     data32 = 1; break;
615		case 0x67:      /* 32-bit address */  break;
616		case 0x2e:      /* CS */              break;
617		case 0x3e:      /* DS */              break;
618		case 0x26:      /* ES */              break;
619		case 0x36:      /* SS */              break;
620		case 0x65:      /* GS */              break;
621		case 0x64:      /* FS */              break;
622		case 0xf2:      /* repnz */       break;
623		case 0xf3:      /* rep */             break;
624		default: pref_done = 1;
625		}
626	} while (!pref_done);
627
628	switch (opcode) {
629
630	/* pushf */
631	case 0x9c:
632		if (data32) {
633			pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
634			SP(regs) -= 4;
635		} else {
636			pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
637			SP(regs) -= 2;
638		}
639		IP(regs) = ip;
640		goto vm86_fault_return;
641
642	/* popf */
643	case 0x9d:
644		{
645		unsigned long newflags;
646		if (data32) {
647			newflags = popl(ssp, sp, simulate_sigsegv);
648			SP(regs) += 4;
649		} else {
650			newflags = popw(ssp, sp, simulate_sigsegv);
651			SP(regs) += 2;
652		}
653		IP(regs) = ip;
654		CHECK_IF_IN_TRAP;
655		if (data32)
656			set_vflags_long(newflags, regs);
657		else
658			set_vflags_short(newflags, regs);
659
660		goto check_vip;
661		}
662
663	/* int xx */
664	case 0xcd: {
665		int intno = popb(csp, ip, simulate_sigsegv);
666		IP(regs) = ip;
667		if (vmpi->vm86dbg_active) {
668			if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
669				save_v86_state(regs, VM86_INTx + (intno << 8));
670				return;
671			}
672		}
673		do_int(regs, intno, ssp, sp);
674		return;
675	}
676
677	/* iret */
678	case 0xcf:
679		{
680		unsigned long newip;
681		unsigned long newcs;
682		unsigned long newflags;
683		if (data32) {
684			newip = popl(ssp, sp, simulate_sigsegv);
685			newcs = popl(ssp, sp, simulate_sigsegv);
686			newflags = popl(ssp, sp, simulate_sigsegv);
687			SP(regs) += 12;
688		} else {
689			newip = popw(ssp, sp, simulate_sigsegv);
690			newcs = popw(ssp, sp, simulate_sigsegv);
691			newflags = popw(ssp, sp, simulate_sigsegv);
692			SP(regs) += 6;
693		}
694		IP(regs) = newip;
695		regs->pt.cs = newcs;
696		CHECK_IF_IN_TRAP;
697		if (data32) {
698			set_vflags_long(newflags, regs);
699		} else {
700			set_vflags_short(newflags, regs);
701		}
702		goto check_vip;
703		}
704
705	/* cli */
706	case 0xfa:
707		IP(regs) = ip;
708		clear_IF(regs);
709		goto vm86_fault_return;
710
711	/* sti */
712	/*
713	 * Damn. This is incorrect: the 'sti' instruction should actually
714	 * enable interrupts after the /next/ instruction. Not good.
715	 *
716	 * Probably needs some horsing around with the TF flag. Aiee..
717	 */
718	case 0xfb:
719		IP(regs) = ip;
720		set_IF(regs);
721		goto check_vip;
722
723	default:
724		save_v86_state(regs, VM86_UNKNOWN);
725	}
726
727	return;
728
729check_vip:
730	if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
731	    (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
732		save_v86_state(regs, VM86_STI);
733		return;
734	}
735
736vm86_fault_return:
737	if (vmpi->force_return_for_pic  && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
738		save_v86_state(regs, VM86_PICRETURN);
739		return;
740	}
741	if (orig_flags & X86_EFLAGS_TF)
742		handle_vm86_trap(regs, 0, X86_TRAP_DB);
743	return;
744
745simulate_sigsegv:
746	/* FIXME: After a long discussion with Stas we finally
747	 *        agreed, that this is wrong. Here we should
748	 *        really send a SIGSEGV to the user program.
749	 *        But how do we create the correct context? We
750	 *        are inside a general protection fault handler
751	 *        and has just returned from a page fault handler.
752	 *        The correct context for the signal handler
753	 *        should be a mixture of the two, but how do we
754	 *        get the information? [KD]
755	 */
756	save_v86_state(regs, VM86_UNKNOWN);
757}
758
759/* ---------------- vm86 special IRQ passing stuff ----------------- */
760
761#define VM86_IRQNAME		"vm86irq"
762
763static struct vm86_irqs {
764	struct task_struct *tsk;
765	int sig;
766} vm86_irqs[16];
767
768static DEFINE_SPINLOCK(irqbits_lock);
769static int irqbits;
770
771#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
772	| (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO)  | (1 << SIGURG) \
773	| (1 << SIGUNUSED))
774
775static irqreturn_t irq_handler(int intno, void *dev_id)
776{
777	int irq_bit;
778	unsigned long flags;
779
780	spin_lock_irqsave(&irqbits_lock, flags);
781	irq_bit = 1 << intno;
782	if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
783		goto out;
784	irqbits |= irq_bit;
785	if (vm86_irqs[intno].sig)
786		send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
787	/*
788	 * IRQ will be re-enabled when user asks for the irq (whether
789	 * polling or as a result of the signal)
790	 */
791	disable_irq_nosync(intno);
792	spin_unlock_irqrestore(&irqbits_lock, flags);
793	return IRQ_HANDLED;
794
795out:
796	spin_unlock_irqrestore(&irqbits_lock, flags);
797	return IRQ_NONE;
798}
799
800static inline void free_vm86_irq(int irqnumber)
801{
802	unsigned long flags;
803
804	free_irq(irqnumber, NULL);
805	vm86_irqs[irqnumber].tsk = NULL;
806
807	spin_lock_irqsave(&irqbits_lock, flags);
808	irqbits &= ~(1 << irqnumber);
809	spin_unlock_irqrestore(&irqbits_lock, flags);
810}
811
812void release_vm86_irqs(struct task_struct *task)
813{
814	int i;
815	for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
816	    if (vm86_irqs[i].tsk == task)
817		free_vm86_irq(i);
818}
819
820static inline int get_and_reset_irq(int irqnumber)
821{
822	int bit;
823	unsigned long flags;
824	int ret = 0;
825
826	if (invalid_vm86_irq(irqnumber)) return 0;
827	if (vm86_irqs[irqnumber].tsk != current) return 0;
828	spin_lock_irqsave(&irqbits_lock, flags);
829	bit = irqbits & (1 << irqnumber);
830	irqbits &= ~bit;
831	if (bit) {
832		enable_irq(irqnumber);
833		ret = 1;
834	}
835
836	spin_unlock_irqrestore(&irqbits_lock, flags);
837	return ret;
838}
839
840
841static int do_vm86_irq_handling(int subfunction, int irqnumber)
842{
843	int ret;
844	switch (subfunction) {
845		case VM86_GET_AND_RESET_IRQ: {
846			return get_and_reset_irq(irqnumber);
847		}
848		case VM86_GET_IRQ_BITS: {
849			return irqbits;
850		}
851		case VM86_REQUEST_IRQ: {
852			int sig = irqnumber >> 8;
853			int irq = irqnumber & 255;
854			if (!capable(CAP_SYS_ADMIN)) return -EPERM;
855			if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
856			if (invalid_vm86_irq(irq)) return -EPERM;
857			if (vm86_irqs[irq].tsk) return -EPERM;
858			ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
859			if (ret) return ret;
860			vm86_irqs[irq].sig = sig;
861			vm86_irqs[irq].tsk = current;
862			return irq;
863		}
864		case  VM86_FREE_IRQ: {
865			if (invalid_vm86_irq(irqnumber)) return -EPERM;
866			if (!vm86_irqs[irqnumber].tsk) return 0;
867			if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
868			free_vm86_irq(irqnumber);
869			return 0;
870		}
871	}
872	return -EINVAL;
873}
874
v4.10.11
 
  1/*
  2 *  Copyright (C) 1994  Linus Torvalds
  3 *
  4 *  29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
  5 *                stack - Manfred Spraul <manfred@colorfullife.com>
  6 *
  7 *  22 mar 2002 - Manfred detected the stackfaults, but didn't handle
  8 *                them correctly. Now the emulation will be in a
  9 *                consistent state after stackfaults - Kasper Dupont
 10 *                <kasperd@daimi.au.dk>
 11 *
 12 *  22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
 13 *                <kasperd@daimi.au.dk>
 14 *
 15 *  ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
 16 *                caused by Kasper Dupont's changes - Stas Sergeev
 17 *
 18 *   4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
 19 *                Kasper Dupont <kasperd@daimi.au.dk>
 20 *
 21 *   9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
 22 *                Kasper Dupont <kasperd@daimi.au.dk>
 23 *
 24 *   9 apr 2002 - Changed stack access macros to jump to a label
 25 *                instead of returning to userspace. This simplifies
 26 *                do_int, and is needed by handle_vm6_fault. Kasper
 27 *                Dupont <kasperd@daimi.au.dk>
 28 *
 29 */
 30
 31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 32
 33#include <linux/capability.h>
 34#include <linux/errno.h>
 35#include <linux/interrupt.h>
 36#include <linux/syscalls.h>
 37#include <linux/sched.h>
 
 38#include <linux/kernel.h>
 39#include <linux/signal.h>
 40#include <linux/string.h>
 41#include <linux/mm.h>
 42#include <linux/smp.h>
 43#include <linux/highmem.h>
 44#include <linux/ptrace.h>
 45#include <linux/audit.h>
 46#include <linux/stddef.h>
 47#include <linux/slab.h>
 48#include <linux/security.h>
 49
 50#include <linux/uaccess.h>
 51#include <asm/io.h>
 52#include <asm/tlbflush.h>
 53#include <asm/irq.h>
 54#include <asm/traps.h>
 55#include <asm/vm86.h>
 
 56
 57/*
 58 * Known problems:
 59 *
 60 * Interrupt handling is not guaranteed:
 61 * - a real x86 will disable all interrupts for one instruction
 62 *   after a "mov ss,xx" to make stack handling atomic even without
 63 *   the 'lss' instruction. We can't guarantee this in v86 mode,
 64 *   as the next instruction might result in a page fault or similar.
 65 * - a real x86 will have interrupts disabled for one instruction
 66 *   past the 'sti' that enables them. We don't bother with all the
 67 *   details yet.
 68 *
 69 * Let's hope these problems do not actually matter for anything.
 70 */
 71
 72
 73/*
 74 * 8- and 16-bit register defines..
 75 */
 76#define AL(regs)	(((unsigned char *)&((regs)->pt.ax))[0])
 77#define AH(regs)	(((unsigned char *)&((regs)->pt.ax))[1])
 78#define IP(regs)	(*(unsigned short *)&((regs)->pt.ip))
 79#define SP(regs)	(*(unsigned short *)&((regs)->pt.sp))
 80
 81/*
 82 * virtual flags (16 and 32-bit versions)
 83 */
 84#define VFLAGS	(*(unsigned short *)&(current->thread.vm86->veflags))
 85#define VEFLAGS	(current->thread.vm86->veflags)
 86
 87#define set_flags(X, new, mask) \
 88((X) = ((X) & ~(mask)) | ((new) & (mask)))
 89
 90#define SAFE_MASK	(0xDD5)
 91#define RETURN_MASK	(0xDFF)
 92
 93void save_v86_state(struct kernel_vm86_regs *regs, int retval)
 94{
 95	struct tss_struct *tss;
 96	struct task_struct *tsk = current;
 97	struct vm86plus_struct __user *user;
 98	struct vm86 *vm86 = current->thread.vm86;
 99	long err = 0;
100
101	/*
102	 * This gets called from entry.S with interrupts disabled, but
103	 * from process context. Enable interrupts here, before trying
104	 * to access user space.
105	 */
106	local_irq_enable();
107
108	if (!vm86 || !vm86->user_vm86) {
109		pr_alert("no user_vm86: BAD\n");
110		do_exit(SIGSEGV);
111	}
112	set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
113	user = vm86->user_vm86;
114
115	if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
116		       sizeof(struct vm86plus_struct) :
117		       sizeof(struct vm86_struct))) {
118		pr_alert("could not access userspace vm86 info\n");
119		do_exit(SIGSEGV);
120	}
121
122	put_user_try {
123		put_user_ex(regs->pt.bx, &user->regs.ebx);
124		put_user_ex(regs->pt.cx, &user->regs.ecx);
125		put_user_ex(regs->pt.dx, &user->regs.edx);
126		put_user_ex(regs->pt.si, &user->regs.esi);
127		put_user_ex(regs->pt.di, &user->regs.edi);
128		put_user_ex(regs->pt.bp, &user->regs.ebp);
129		put_user_ex(regs->pt.ax, &user->regs.eax);
130		put_user_ex(regs->pt.ip, &user->regs.eip);
131		put_user_ex(regs->pt.cs, &user->regs.cs);
132		put_user_ex(regs->pt.flags, &user->regs.eflags);
133		put_user_ex(regs->pt.sp, &user->regs.esp);
134		put_user_ex(regs->pt.ss, &user->regs.ss);
135		put_user_ex(regs->es, &user->regs.es);
136		put_user_ex(regs->ds, &user->regs.ds);
137		put_user_ex(regs->fs, &user->regs.fs);
138		put_user_ex(regs->gs, &user->regs.gs);
139
140		put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
141	} put_user_catch(err);
142	if (err) {
143		pr_alert("could not access userspace vm86 info\n");
144		do_exit(SIGSEGV);
145	}
146
147	tss = &per_cpu(cpu_tss, get_cpu());
148	tsk->thread.sp0 = vm86->saved_sp0;
149	tsk->thread.sysenter_cs = __KERNEL_CS;
150	load_sp0(tss, &tsk->thread);
 
151	vm86->saved_sp0 = 0;
152	put_cpu();
153
154	memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
155
156	lazy_load_gs(vm86->regs32.gs);
157
158	regs->pt.ax = retval;
159}
160
161static void mark_screen_rdonly(struct mm_struct *mm)
162{
163	struct vm_area_struct *vma;
164	spinlock_t *ptl;
165	pgd_t *pgd;
 
166	pud_t *pud;
167	pmd_t *pmd;
168	pte_t *pte;
169	int i;
170
171	down_write(&mm->mmap_sem);
172	pgd = pgd_offset(mm, 0xA0000);
173	if (pgd_none_or_clear_bad(pgd))
174		goto out;
175	pud = pud_offset(pgd, 0xA0000);
 
 
 
176	if (pud_none_or_clear_bad(pud))
177		goto out;
178	pmd = pmd_offset(pud, 0xA0000);
179
180	if (pmd_trans_huge(*pmd)) {
181		vma = find_vma(mm, 0xA0000);
182		split_huge_pmd(vma, pmd, 0xA0000);
183	}
184	if (pmd_none_or_clear_bad(pmd))
185		goto out;
186	pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
187	for (i = 0; i < 32; i++) {
188		if (pte_present(*pte))
189			set_pte(pte, pte_wrprotect(*pte));
190		pte++;
191	}
192	pte_unmap_unlock(pte, ptl);
193out:
194	up_write(&mm->mmap_sem);
195	flush_tlb();
196}
197
198
199
200static int do_vm86_irq_handling(int subfunction, int irqnumber);
201static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
202
203SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
204{
205	return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
206}
207
208
209SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
210{
211	switch (cmd) {
212	case VM86_REQUEST_IRQ:
213	case VM86_FREE_IRQ:
214	case VM86_GET_IRQ_BITS:
215	case VM86_GET_AND_RESET_IRQ:
216		return do_vm86_irq_handling(cmd, (int)arg);
217	case VM86_PLUS_INSTALL_CHECK:
218		/*
219		 * NOTE: on old vm86 stuff this will return the error
220		 *  from access_ok(), because the subfunction is
221		 *  interpreted as (invalid) address to vm86_struct.
222		 *  So the installation check works.
223		 */
224		return 0;
225	}
226
227	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
228	return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
229}
230
231
232static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
233{
234	struct tss_struct *tss;
235	struct task_struct *tsk = current;
236	struct vm86 *vm86 = tsk->thread.vm86;
237	struct kernel_vm86_regs vm86regs;
238	struct pt_regs *regs = current_pt_regs();
239	unsigned long err = 0;
240
241	err = security_mmap_addr(0);
242	if (err) {
243		/*
244		 * vm86 cannot virtualize the address space, so vm86 users
245		 * need to manage the low 1MB themselves using mmap.  Given
246		 * that BIOS places important data in the first page, vm86
247		 * is essentially useless if mmap_min_addr != 0.  DOSEMU,
248		 * for example, won't even bother trying to use vm86 if it
249		 * can't map a page at virtual address 0.
250		 *
251		 * To reduce the available kernel attack surface, simply
252		 * disallow vm86(old) for users who cannot mmap at va 0.
253		 *
254		 * The implementation of security_mmap_addr will allow
255		 * suitably privileged users to map va 0 even if
256		 * vm.mmap_min_addr is set above 0, and we want this
257		 * behavior for vm86 as well, as it ensures that legacy
258		 * tools like vbetool will not fail just because of
259		 * vm.mmap_min_addr.
260		 */
261		pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d).  Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
262			     current->comm, task_pid_nr(current),
263			     from_kuid_munged(&init_user_ns, current_uid()));
264		return -EPERM;
265	}
266
267	if (!vm86) {
268		if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
269			return -ENOMEM;
270		tsk->thread.vm86 = vm86;
271	}
272	if (vm86->saved_sp0)
273		return -EPERM;
274
275	if (!access_ok(VERIFY_READ, user_vm86, plus ?
276		       sizeof(struct vm86_struct) :
277		       sizeof(struct vm86plus_struct)))
278		return -EFAULT;
279
280	memset(&vm86regs, 0, sizeof(vm86regs));
281	get_user_try {
282		unsigned short seg;
283		get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
284		get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
285		get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
286		get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
287		get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
288		get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
289		get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
290		get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
291		get_user_ex(seg, &user_vm86->regs.cs);
292		vm86regs.pt.cs = seg;
293		get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
294		get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
295		get_user_ex(seg, &user_vm86->regs.ss);
296		vm86regs.pt.ss = seg;
297		get_user_ex(vm86regs.es, &user_vm86->regs.es);
298		get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
299		get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
300		get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
301
302		get_user_ex(vm86->flags, &user_vm86->flags);
303		get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
304		get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
305	} get_user_catch(err);
306	if (err)
307		return err;
308
309	if (copy_from_user(&vm86->int_revectored,
310			   &user_vm86->int_revectored,
311			   sizeof(struct revectored_struct)))
312		return -EFAULT;
313	if (copy_from_user(&vm86->int21_revectored,
314			   &user_vm86->int21_revectored,
315			   sizeof(struct revectored_struct)))
316		return -EFAULT;
317	if (plus) {
318		if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
319				   sizeof(struct vm86plus_info_struct)))
320			return -EFAULT;
321		vm86->vm86plus.is_vm86pus = 1;
322	} else
323		memset(&vm86->vm86plus, 0,
324		       sizeof(struct vm86plus_info_struct));
325
326	memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
327	vm86->user_vm86 = user_vm86;
328
329/*
330 * The flags register is also special: we cannot trust that the user
331 * has set it up safely, so this makes sure interrupt etc flags are
332 * inherited from protected mode.
333 */
334	VEFLAGS = vm86regs.pt.flags;
335	vm86regs.pt.flags &= SAFE_MASK;
336	vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
337	vm86regs.pt.flags |= X86_VM_MASK;
338
339	vm86regs.pt.orig_ax = regs->orig_ax;
340
341	switch (vm86->cpu_type) {
342	case CPU_286:
343		vm86->veflags_mask = 0;
344		break;
345	case CPU_386:
346		vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
347		break;
348	case CPU_486:
349		vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
350		break;
351	default:
352		vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
353		break;
354	}
355
356/*
357 * Save old state
358 */
359	vm86->saved_sp0 = tsk->thread.sp0;
360	lazy_save_gs(vm86->regs32.gs);
361
362	tss = &per_cpu(cpu_tss, get_cpu());
363	/* make room for real-mode segments */
 
364	tsk->thread.sp0 += 16;
365
366	if (static_cpu_has(X86_FEATURE_SEP))
367		tsk->thread.sysenter_cs = 0;
 
 
368
369	load_sp0(tss, &tsk->thread);
370	put_cpu();
371
372	if (vm86->flags & VM86_SCREEN_BITMAP)
373		mark_screen_rdonly(tsk->mm);
374
375	memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
376	force_iret();
377	return regs->ax;
378}
379
380static inline void set_IF(struct kernel_vm86_regs *regs)
381{
382	VEFLAGS |= X86_EFLAGS_VIF;
383}
384
385static inline void clear_IF(struct kernel_vm86_regs *regs)
386{
387	VEFLAGS &= ~X86_EFLAGS_VIF;
388}
389
390static inline void clear_TF(struct kernel_vm86_regs *regs)
391{
392	regs->pt.flags &= ~X86_EFLAGS_TF;
393}
394
395static inline void clear_AC(struct kernel_vm86_regs *regs)
396{
397	regs->pt.flags &= ~X86_EFLAGS_AC;
398}
399
400/*
401 * It is correct to call set_IF(regs) from the set_vflags_*
402 * functions. However someone forgot to call clear_IF(regs)
403 * in the opposite case.
404 * After the command sequence CLI PUSHF STI POPF you should
405 * end up with interrupts disabled, but you ended up with
406 * interrupts enabled.
407 *  ( I was testing my own changes, but the only bug I
408 *    could find was in a function I had not changed. )
409 * [KD]
410 */
411
412static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
413{
414	set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
415	set_flags(regs->pt.flags, flags, SAFE_MASK);
416	if (flags & X86_EFLAGS_IF)
417		set_IF(regs);
418	else
419		clear_IF(regs);
420}
421
422static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
423{
424	set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
425	set_flags(regs->pt.flags, flags, SAFE_MASK);
426	if (flags & X86_EFLAGS_IF)
427		set_IF(regs);
428	else
429		clear_IF(regs);
430}
431
432static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
433{
434	unsigned long flags = regs->pt.flags & RETURN_MASK;
435
436	if (VEFLAGS & X86_EFLAGS_VIF)
437		flags |= X86_EFLAGS_IF;
438	flags |= X86_EFLAGS_IOPL;
439	return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
440}
441
442static inline int is_revectored(int nr, struct revectored_struct *bitmap)
443{
444	return test_bit(nr, bitmap->__map);
445}
446
447#define val_byte(val, n) (((__u8 *)&val)[n])
448
449#define pushb(base, ptr, val, err_label) \
450	do { \
451		__u8 __val = val; \
452		ptr--; \
453		if (put_user(__val, base + ptr) < 0) \
454			goto err_label; \
455	} while (0)
456
457#define pushw(base, ptr, val, err_label) \
458	do { \
459		__u16 __val = val; \
460		ptr--; \
461		if (put_user(val_byte(__val, 1), base + ptr) < 0) \
462			goto err_label; \
463		ptr--; \
464		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
465			goto err_label; \
466	} while (0)
467
468#define pushl(base, ptr, val, err_label) \
469	do { \
470		__u32 __val = val; \
471		ptr--; \
472		if (put_user(val_byte(__val, 3), base + ptr) < 0) \
473			goto err_label; \
474		ptr--; \
475		if (put_user(val_byte(__val, 2), base + ptr) < 0) \
476			goto err_label; \
477		ptr--; \
478		if (put_user(val_byte(__val, 1), base + ptr) < 0) \
479			goto err_label; \
480		ptr--; \
481		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
482			goto err_label; \
483	} while (0)
484
485#define popb(base, ptr, err_label) \
486	({ \
487		__u8 __res; \
488		if (get_user(__res, base + ptr) < 0) \
489			goto err_label; \
490		ptr++; \
491		__res; \
492	})
493
494#define popw(base, ptr, err_label) \
495	({ \
496		__u16 __res; \
497		if (get_user(val_byte(__res, 0), base + ptr) < 0) \
498			goto err_label; \
499		ptr++; \
500		if (get_user(val_byte(__res, 1), base + ptr) < 0) \
501			goto err_label; \
502		ptr++; \
503		__res; \
504	})
505
506#define popl(base, ptr, err_label) \
507	({ \
508		__u32 __res; \
509		if (get_user(val_byte(__res, 0), base + ptr) < 0) \
510			goto err_label; \
511		ptr++; \
512		if (get_user(val_byte(__res, 1), base + ptr) < 0) \
513			goto err_label; \
514		ptr++; \
515		if (get_user(val_byte(__res, 2), base + ptr) < 0) \
516			goto err_label; \
517		ptr++; \
518		if (get_user(val_byte(__res, 3), base + ptr) < 0) \
519			goto err_label; \
520		ptr++; \
521		__res; \
522	})
523
524/* There are so many possible reasons for this function to return
525 * VM86_INTx, so adding another doesn't bother me. We can expect
526 * userspace programs to be able to handle it. (Getting a problem
527 * in userspace is always better than an Oops anyway.) [KD]
528 */
529static void do_int(struct kernel_vm86_regs *regs, int i,
530    unsigned char __user *ssp, unsigned short sp)
531{
532	unsigned long __user *intr_ptr;
533	unsigned long segoffs;
534	struct vm86 *vm86 = current->thread.vm86;
535
536	if (regs->pt.cs == BIOSSEG)
537		goto cannot_handle;
538	if (is_revectored(i, &vm86->int_revectored))
539		goto cannot_handle;
540	if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
541		goto cannot_handle;
542	intr_ptr = (unsigned long __user *) (i << 2);
543	if (get_user(segoffs, intr_ptr))
544		goto cannot_handle;
545	if ((segoffs >> 16) == BIOSSEG)
546		goto cannot_handle;
547	pushw(ssp, sp, get_vflags(regs), cannot_handle);
548	pushw(ssp, sp, regs->pt.cs, cannot_handle);
549	pushw(ssp, sp, IP(regs), cannot_handle);
550	regs->pt.cs = segoffs >> 16;
551	SP(regs) -= 6;
552	IP(regs) = segoffs & 0xffff;
553	clear_TF(regs);
554	clear_IF(regs);
555	clear_AC(regs);
556	return;
557
558cannot_handle:
559	save_v86_state(regs, VM86_INTx + (i << 8));
560}
561
562int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
563{
564	struct vm86 *vm86 = current->thread.vm86;
565
566	if (vm86->vm86plus.is_vm86pus) {
567		if ((trapno == 3) || (trapno == 1)) {
568			save_v86_state(regs, VM86_TRAP + (trapno << 8));
569			return 0;
570		}
571		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
572		return 0;
573	}
574	if (trapno != 1)
575		return 1; /* we let this handle by the calling routine */
576	current->thread.trap_nr = trapno;
577	current->thread.error_code = error_code;
578	force_sig(SIGTRAP, current);
579	return 0;
580}
581
582void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
583{
584	unsigned char opcode;
585	unsigned char __user *csp;
586	unsigned char __user *ssp;
587	unsigned short ip, sp, orig_flags;
588	int data32, pref_done;
589	struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus;
590
591#define CHECK_IF_IN_TRAP \
592	if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
593		newflags |= X86_EFLAGS_TF
594
595	orig_flags = *(unsigned short *)&regs->pt.flags;
596
597	csp = (unsigned char __user *) (regs->pt.cs << 4);
598	ssp = (unsigned char __user *) (regs->pt.ss << 4);
599	sp = SP(regs);
600	ip = IP(regs);
601
602	data32 = 0;
603	pref_done = 0;
604	do {
605		switch (opcode = popb(csp, ip, simulate_sigsegv)) {
606		case 0x66:      /* 32-bit data */     data32 = 1; break;
607		case 0x67:      /* 32-bit address */  break;
608		case 0x2e:      /* CS */              break;
609		case 0x3e:      /* DS */              break;
610		case 0x26:      /* ES */              break;
611		case 0x36:      /* SS */              break;
612		case 0x65:      /* GS */              break;
613		case 0x64:      /* FS */              break;
614		case 0xf2:      /* repnz */       break;
615		case 0xf3:      /* rep */             break;
616		default: pref_done = 1;
617		}
618	} while (!pref_done);
619
620	switch (opcode) {
621
622	/* pushf */
623	case 0x9c:
624		if (data32) {
625			pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
626			SP(regs) -= 4;
627		} else {
628			pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
629			SP(regs) -= 2;
630		}
631		IP(regs) = ip;
632		goto vm86_fault_return;
633
634	/* popf */
635	case 0x9d:
636		{
637		unsigned long newflags;
638		if (data32) {
639			newflags = popl(ssp, sp, simulate_sigsegv);
640			SP(regs) += 4;
641		} else {
642			newflags = popw(ssp, sp, simulate_sigsegv);
643			SP(regs) += 2;
644		}
645		IP(regs) = ip;
646		CHECK_IF_IN_TRAP;
647		if (data32)
648			set_vflags_long(newflags, regs);
649		else
650			set_vflags_short(newflags, regs);
651
652		goto check_vip;
653		}
654
655	/* int xx */
656	case 0xcd: {
657		int intno = popb(csp, ip, simulate_sigsegv);
658		IP(regs) = ip;
659		if (vmpi->vm86dbg_active) {
660			if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
661				save_v86_state(regs, VM86_INTx + (intno << 8));
662				return;
663			}
664		}
665		do_int(regs, intno, ssp, sp);
666		return;
667	}
668
669	/* iret */
670	case 0xcf:
671		{
672		unsigned long newip;
673		unsigned long newcs;
674		unsigned long newflags;
675		if (data32) {
676			newip = popl(ssp, sp, simulate_sigsegv);
677			newcs = popl(ssp, sp, simulate_sigsegv);
678			newflags = popl(ssp, sp, simulate_sigsegv);
679			SP(regs) += 12;
680		} else {
681			newip = popw(ssp, sp, simulate_sigsegv);
682			newcs = popw(ssp, sp, simulate_sigsegv);
683			newflags = popw(ssp, sp, simulate_sigsegv);
684			SP(regs) += 6;
685		}
686		IP(regs) = newip;
687		regs->pt.cs = newcs;
688		CHECK_IF_IN_TRAP;
689		if (data32) {
690			set_vflags_long(newflags, regs);
691		} else {
692			set_vflags_short(newflags, regs);
693		}
694		goto check_vip;
695		}
696
697	/* cli */
698	case 0xfa:
699		IP(regs) = ip;
700		clear_IF(regs);
701		goto vm86_fault_return;
702
703	/* sti */
704	/*
705	 * Damn. This is incorrect: the 'sti' instruction should actually
706	 * enable interrupts after the /next/ instruction. Not good.
707	 *
708	 * Probably needs some horsing around with the TF flag. Aiee..
709	 */
710	case 0xfb:
711		IP(regs) = ip;
712		set_IF(regs);
713		goto check_vip;
714
715	default:
716		save_v86_state(regs, VM86_UNKNOWN);
717	}
718
719	return;
720
721check_vip:
722	if (VEFLAGS & X86_EFLAGS_VIP) {
 
723		save_v86_state(regs, VM86_STI);
724		return;
725	}
726
727vm86_fault_return:
728	if (vmpi->force_return_for_pic  && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
729		save_v86_state(regs, VM86_PICRETURN);
730		return;
731	}
732	if (orig_flags & X86_EFLAGS_TF)
733		handle_vm86_trap(regs, 0, X86_TRAP_DB);
734	return;
735
736simulate_sigsegv:
737	/* FIXME: After a long discussion with Stas we finally
738	 *        agreed, that this is wrong. Here we should
739	 *        really send a SIGSEGV to the user program.
740	 *        But how do we create the correct context? We
741	 *        are inside a general protection fault handler
742	 *        and has just returned from a page fault handler.
743	 *        The correct context for the signal handler
744	 *        should be a mixture of the two, but how do we
745	 *        get the information? [KD]
746	 */
747	save_v86_state(regs, VM86_UNKNOWN);
748}
749
750/* ---------------- vm86 special IRQ passing stuff ----------------- */
751
752#define VM86_IRQNAME		"vm86irq"
753
754static struct vm86_irqs {
755	struct task_struct *tsk;
756	int sig;
757} vm86_irqs[16];
758
759static DEFINE_SPINLOCK(irqbits_lock);
760static int irqbits;
761
762#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
763	| (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO)  | (1 << SIGURG) \
764	| (1 << SIGUNUSED))
765
766static irqreturn_t irq_handler(int intno, void *dev_id)
767{
768	int irq_bit;
769	unsigned long flags;
770
771	spin_lock_irqsave(&irqbits_lock, flags);
772	irq_bit = 1 << intno;
773	if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
774		goto out;
775	irqbits |= irq_bit;
776	if (vm86_irqs[intno].sig)
777		send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
778	/*
779	 * IRQ will be re-enabled when user asks for the irq (whether
780	 * polling or as a result of the signal)
781	 */
782	disable_irq_nosync(intno);
783	spin_unlock_irqrestore(&irqbits_lock, flags);
784	return IRQ_HANDLED;
785
786out:
787	spin_unlock_irqrestore(&irqbits_lock, flags);
788	return IRQ_NONE;
789}
790
791static inline void free_vm86_irq(int irqnumber)
792{
793	unsigned long flags;
794
795	free_irq(irqnumber, NULL);
796	vm86_irqs[irqnumber].tsk = NULL;
797
798	spin_lock_irqsave(&irqbits_lock, flags);
799	irqbits &= ~(1 << irqnumber);
800	spin_unlock_irqrestore(&irqbits_lock, flags);
801}
802
803void release_vm86_irqs(struct task_struct *task)
804{
805	int i;
806	for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
807	    if (vm86_irqs[i].tsk == task)
808		free_vm86_irq(i);
809}
810
811static inline int get_and_reset_irq(int irqnumber)
812{
813	int bit;
814	unsigned long flags;
815	int ret = 0;
816
817	if (invalid_vm86_irq(irqnumber)) return 0;
818	if (vm86_irqs[irqnumber].tsk != current) return 0;
819	spin_lock_irqsave(&irqbits_lock, flags);
820	bit = irqbits & (1 << irqnumber);
821	irqbits &= ~bit;
822	if (bit) {
823		enable_irq(irqnumber);
824		ret = 1;
825	}
826
827	spin_unlock_irqrestore(&irqbits_lock, flags);
828	return ret;
829}
830
831
832static int do_vm86_irq_handling(int subfunction, int irqnumber)
833{
834	int ret;
835	switch (subfunction) {
836		case VM86_GET_AND_RESET_IRQ: {
837			return get_and_reset_irq(irqnumber);
838		}
839		case VM86_GET_IRQ_BITS: {
840			return irqbits;
841		}
842		case VM86_REQUEST_IRQ: {
843			int sig = irqnumber >> 8;
844			int irq = irqnumber & 255;
845			if (!capable(CAP_SYS_ADMIN)) return -EPERM;
846			if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
847			if (invalid_vm86_irq(irq)) return -EPERM;
848			if (vm86_irqs[irq].tsk) return -EPERM;
849			ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
850			if (ret) return ret;
851			vm86_irqs[irq].sig = sig;
852			vm86_irqs[irq].tsk = current;
853			return irq;
854		}
855		case  VM86_FREE_IRQ: {
856			if (invalid_vm86_irq(irqnumber)) return -EPERM;
857			if (!vm86_irqs[irqnumber].tsk) return 0;
858			if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
859			free_vm86_irq(irqnumber);
860			return 0;
861		}
862	}
863	return -EINVAL;
864}
865