Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
6 * stack - Manfred Spraul <manfred@colorfullife.com>
7 *
8 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
9 * them correctly. Now the emulation will be in a
10 * consistent state after stackfaults - Kasper Dupont
11 * <kasperd@daimi.au.dk>
12 *
13 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
14 * <kasperd@daimi.au.dk>
15 *
16 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
17 * caused by Kasper Dupont's changes - Stas Sergeev
18 *
19 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
20 * Kasper Dupont <kasperd@daimi.au.dk>
21 *
22 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
23 * Kasper Dupont <kasperd@daimi.au.dk>
24 *
25 * 9 apr 2002 - Changed stack access macros to jump to a label
26 * instead of returning to userspace. This simplifies
27 * do_int, and is needed by handle_vm6_fault. Kasper
28 * Dupont <kasperd@daimi.au.dk>
29 *
30 */
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/interrupt.h>
37#include <linux/syscalls.h>
38#include <linux/sched.h>
39#include <linux/sched/task_stack.h>
40#include <linux/kernel.h>
41#include <linux/signal.h>
42#include <linux/string.h>
43#include <linux/mm.h>
44#include <linux/smp.h>
45#include <linux/highmem.h>
46#include <linux/ptrace.h>
47#include <linux/audit.h>
48#include <linux/stddef.h>
49#include <linux/slab.h>
50#include <linux/security.h>
51
52#include <linux/uaccess.h>
53#include <asm/io.h>
54#include <asm/tlbflush.h>
55#include <asm/irq.h>
56#include <asm/traps.h>
57#include <asm/vm86.h>
58#include <asm/switch_to.h>
59
60/*
61 * Known problems:
62 *
63 * Interrupt handling is not guaranteed:
64 * - a real x86 will disable all interrupts for one instruction
65 * after a "mov ss,xx" to make stack handling atomic even without
66 * the 'lss' instruction. We can't guarantee this in v86 mode,
67 * as the next instruction might result in a page fault or similar.
68 * - a real x86 will have interrupts disabled for one instruction
69 * past the 'sti' that enables them. We don't bother with all the
70 * details yet.
71 *
72 * Let's hope these problems do not actually matter for anything.
73 */
74
75
76/*
77 * 8- and 16-bit register defines..
78 */
79#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
80#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
81#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
82#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
83
84/*
85 * virtual flags (16 and 32-bit versions)
86 */
87#define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
88#define VEFLAGS (current->thread.vm86->veflags)
89
90#define set_flags(X, new, mask) \
91((X) = ((X) & ~(mask)) | ((new) & (mask)))
92
93#define SAFE_MASK (0xDD5)
94#define RETURN_MASK (0xDFF)
95
96void save_v86_state(struct kernel_vm86_regs *regs, int retval)
97{
98 struct task_struct *tsk = current;
99 struct vm86plus_struct __user *user;
100 struct vm86 *vm86 = current->thread.vm86;
101 long err = 0;
102
103 /*
104 * This gets called from entry.S with interrupts disabled, but
105 * from process context. Enable interrupts here, before trying
106 * to access user space.
107 */
108 local_irq_enable();
109
110 if (!vm86 || !vm86->user_vm86) {
111 pr_alert("no user_vm86: BAD\n");
112 do_exit(SIGSEGV);
113 }
114 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
115 user = vm86->user_vm86;
116
117 if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
118 sizeof(struct vm86plus_struct) :
119 sizeof(struct vm86_struct))) {
120 pr_alert("could not access userspace vm86 info\n");
121 do_exit(SIGSEGV);
122 }
123
124 put_user_try {
125 put_user_ex(regs->pt.bx, &user->regs.ebx);
126 put_user_ex(regs->pt.cx, &user->regs.ecx);
127 put_user_ex(regs->pt.dx, &user->regs.edx);
128 put_user_ex(regs->pt.si, &user->regs.esi);
129 put_user_ex(regs->pt.di, &user->regs.edi);
130 put_user_ex(regs->pt.bp, &user->regs.ebp);
131 put_user_ex(regs->pt.ax, &user->regs.eax);
132 put_user_ex(regs->pt.ip, &user->regs.eip);
133 put_user_ex(regs->pt.cs, &user->regs.cs);
134 put_user_ex(regs->pt.flags, &user->regs.eflags);
135 put_user_ex(regs->pt.sp, &user->regs.esp);
136 put_user_ex(regs->pt.ss, &user->regs.ss);
137 put_user_ex(regs->es, &user->regs.es);
138 put_user_ex(regs->ds, &user->regs.ds);
139 put_user_ex(regs->fs, &user->regs.fs);
140 put_user_ex(regs->gs, &user->regs.gs);
141
142 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
143 } put_user_catch(err);
144 if (err) {
145 pr_alert("could not access userspace vm86 info\n");
146 do_exit(SIGSEGV);
147 }
148
149 preempt_disable();
150 tsk->thread.sp0 = vm86->saved_sp0;
151 tsk->thread.sysenter_cs = __KERNEL_CS;
152 update_sp0(tsk);
153 refresh_sysenter_cs(&tsk->thread);
154 vm86->saved_sp0 = 0;
155 preempt_enable();
156
157 memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs));
158
159 lazy_load_gs(vm86->regs32.gs);
160
161 regs->pt.ax = retval;
162}
163
164static void mark_screen_rdonly(struct mm_struct *mm)
165{
166 struct vm_area_struct *vma;
167 spinlock_t *ptl;
168 pgd_t *pgd;
169 p4d_t *p4d;
170 pud_t *pud;
171 pmd_t *pmd;
172 pte_t *pte;
173 int i;
174
175 down_write(&mm->mmap_sem);
176 pgd = pgd_offset(mm, 0xA0000);
177 if (pgd_none_or_clear_bad(pgd))
178 goto out;
179 p4d = p4d_offset(pgd, 0xA0000);
180 if (p4d_none_or_clear_bad(p4d))
181 goto out;
182 pud = pud_offset(p4d, 0xA0000);
183 if (pud_none_or_clear_bad(pud))
184 goto out;
185 pmd = pmd_offset(pud, 0xA0000);
186
187 if (pmd_trans_huge(*pmd)) {
188 vma = find_vma(mm, 0xA0000);
189 split_huge_pmd(vma, pmd, 0xA0000);
190 }
191 if (pmd_none_or_clear_bad(pmd))
192 goto out;
193 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
194 for (i = 0; i < 32; i++) {
195 if (pte_present(*pte))
196 set_pte(pte, pte_wrprotect(*pte));
197 pte++;
198 }
199 pte_unmap_unlock(pte, ptl);
200out:
201 up_write(&mm->mmap_sem);
202 flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
203}
204
205
206
207static int do_vm86_irq_handling(int subfunction, int irqnumber);
208static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
209
210SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
211{
212 return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
213}
214
215
216SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
217{
218 switch (cmd) {
219 case VM86_REQUEST_IRQ:
220 case VM86_FREE_IRQ:
221 case VM86_GET_IRQ_BITS:
222 case VM86_GET_AND_RESET_IRQ:
223 return do_vm86_irq_handling(cmd, (int)arg);
224 case VM86_PLUS_INSTALL_CHECK:
225 /*
226 * NOTE: on old vm86 stuff this will return the error
227 * from access_ok(), because the subfunction is
228 * interpreted as (invalid) address to vm86_struct.
229 * So the installation check works.
230 */
231 return 0;
232 }
233
234 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
235 return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
236}
237
238
239static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
240{
241 struct task_struct *tsk = current;
242 struct vm86 *vm86 = tsk->thread.vm86;
243 struct kernel_vm86_regs vm86regs;
244 struct pt_regs *regs = current_pt_regs();
245 unsigned long err = 0;
246
247 err = security_mmap_addr(0);
248 if (err) {
249 /*
250 * vm86 cannot virtualize the address space, so vm86 users
251 * need to manage the low 1MB themselves using mmap. Given
252 * that BIOS places important data in the first page, vm86
253 * is essentially useless if mmap_min_addr != 0. DOSEMU,
254 * for example, won't even bother trying to use vm86 if it
255 * can't map a page at virtual address 0.
256 *
257 * To reduce the available kernel attack surface, simply
258 * disallow vm86(old) for users who cannot mmap at va 0.
259 *
260 * The implementation of security_mmap_addr will allow
261 * suitably privileged users to map va 0 even if
262 * vm.mmap_min_addr is set above 0, and we want this
263 * behavior for vm86 as well, as it ensures that legacy
264 * tools like vbetool will not fail just because of
265 * vm.mmap_min_addr.
266 */
267 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
268 current->comm, task_pid_nr(current),
269 from_kuid_munged(&init_user_ns, current_uid()));
270 return -EPERM;
271 }
272
273 if (!vm86) {
274 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
275 return -ENOMEM;
276 tsk->thread.vm86 = vm86;
277 }
278 if (vm86->saved_sp0)
279 return -EPERM;
280
281 if (!access_ok(VERIFY_READ, user_vm86, plus ?
282 sizeof(struct vm86_struct) :
283 sizeof(struct vm86plus_struct)))
284 return -EFAULT;
285
286 memset(&vm86regs, 0, sizeof(vm86regs));
287 get_user_try {
288 unsigned short seg;
289 get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
290 get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
291 get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
292 get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
293 get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
294 get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
295 get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
296 get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
297 get_user_ex(seg, &user_vm86->regs.cs);
298 vm86regs.pt.cs = seg;
299 get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
300 get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
301 get_user_ex(seg, &user_vm86->regs.ss);
302 vm86regs.pt.ss = seg;
303 get_user_ex(vm86regs.es, &user_vm86->regs.es);
304 get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
305 get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
306 get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
307
308 get_user_ex(vm86->flags, &user_vm86->flags);
309 get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
310 get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
311 } get_user_catch(err);
312 if (err)
313 return err;
314
315 if (copy_from_user(&vm86->int_revectored,
316 &user_vm86->int_revectored,
317 sizeof(struct revectored_struct)))
318 return -EFAULT;
319 if (copy_from_user(&vm86->int21_revectored,
320 &user_vm86->int21_revectored,
321 sizeof(struct revectored_struct)))
322 return -EFAULT;
323 if (plus) {
324 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
325 sizeof(struct vm86plus_info_struct)))
326 return -EFAULT;
327 vm86->vm86plus.is_vm86pus = 1;
328 } else
329 memset(&vm86->vm86plus, 0,
330 sizeof(struct vm86plus_info_struct));
331
332 memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
333 vm86->user_vm86 = user_vm86;
334
335/*
336 * The flags register is also special: we cannot trust that the user
337 * has set it up safely, so this makes sure interrupt etc flags are
338 * inherited from protected mode.
339 */
340 VEFLAGS = vm86regs.pt.flags;
341 vm86regs.pt.flags &= SAFE_MASK;
342 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
343 vm86regs.pt.flags |= X86_VM_MASK;
344
345 vm86regs.pt.orig_ax = regs->orig_ax;
346
347 switch (vm86->cpu_type) {
348 case CPU_286:
349 vm86->veflags_mask = 0;
350 break;
351 case CPU_386:
352 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
353 break;
354 case CPU_486:
355 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
356 break;
357 default:
358 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
359 break;
360 }
361
362/*
363 * Save old state
364 */
365 vm86->saved_sp0 = tsk->thread.sp0;
366 lazy_save_gs(vm86->regs32.gs);
367
368 /* make room for real-mode segments */
369 preempt_disable();
370 tsk->thread.sp0 += 16;
371
372 if (static_cpu_has(X86_FEATURE_SEP)) {
373 tsk->thread.sysenter_cs = 0;
374 refresh_sysenter_cs(&tsk->thread);
375 }
376
377 update_sp0(tsk);
378 preempt_enable();
379
380 if (vm86->flags & VM86_SCREEN_BITMAP)
381 mark_screen_rdonly(tsk->mm);
382
383 memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
384 force_iret();
385 return regs->ax;
386}
387
388static inline void set_IF(struct kernel_vm86_regs *regs)
389{
390 VEFLAGS |= X86_EFLAGS_VIF;
391}
392
393static inline void clear_IF(struct kernel_vm86_regs *regs)
394{
395 VEFLAGS &= ~X86_EFLAGS_VIF;
396}
397
398static inline void clear_TF(struct kernel_vm86_regs *regs)
399{
400 regs->pt.flags &= ~X86_EFLAGS_TF;
401}
402
403static inline void clear_AC(struct kernel_vm86_regs *regs)
404{
405 regs->pt.flags &= ~X86_EFLAGS_AC;
406}
407
408/*
409 * It is correct to call set_IF(regs) from the set_vflags_*
410 * functions. However someone forgot to call clear_IF(regs)
411 * in the opposite case.
412 * After the command sequence CLI PUSHF STI POPF you should
413 * end up with interrupts disabled, but you ended up with
414 * interrupts enabled.
415 * ( I was testing my own changes, but the only bug I
416 * could find was in a function I had not changed. )
417 * [KD]
418 */
419
420static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
421{
422 set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
423 set_flags(regs->pt.flags, flags, SAFE_MASK);
424 if (flags & X86_EFLAGS_IF)
425 set_IF(regs);
426 else
427 clear_IF(regs);
428}
429
430static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
431{
432 set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
433 set_flags(regs->pt.flags, flags, SAFE_MASK);
434 if (flags & X86_EFLAGS_IF)
435 set_IF(regs);
436 else
437 clear_IF(regs);
438}
439
440static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
441{
442 unsigned long flags = regs->pt.flags & RETURN_MASK;
443
444 if (VEFLAGS & X86_EFLAGS_VIF)
445 flags |= X86_EFLAGS_IF;
446 flags |= X86_EFLAGS_IOPL;
447 return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
448}
449
450static inline int is_revectored(int nr, struct revectored_struct *bitmap)
451{
452 return test_bit(nr, bitmap->__map);
453}
454
455#define val_byte(val, n) (((__u8 *)&val)[n])
456
457#define pushb(base, ptr, val, err_label) \
458 do { \
459 __u8 __val = val; \
460 ptr--; \
461 if (put_user(__val, base + ptr) < 0) \
462 goto err_label; \
463 } while (0)
464
465#define pushw(base, ptr, val, err_label) \
466 do { \
467 __u16 __val = val; \
468 ptr--; \
469 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
470 goto err_label; \
471 ptr--; \
472 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
473 goto err_label; \
474 } while (0)
475
476#define pushl(base, ptr, val, err_label) \
477 do { \
478 __u32 __val = val; \
479 ptr--; \
480 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
481 goto err_label; \
482 ptr--; \
483 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
484 goto err_label; \
485 ptr--; \
486 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
487 goto err_label; \
488 ptr--; \
489 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
490 goto err_label; \
491 } while (0)
492
493#define popb(base, ptr, err_label) \
494 ({ \
495 __u8 __res; \
496 if (get_user(__res, base + ptr) < 0) \
497 goto err_label; \
498 ptr++; \
499 __res; \
500 })
501
502#define popw(base, ptr, err_label) \
503 ({ \
504 __u16 __res; \
505 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
506 goto err_label; \
507 ptr++; \
508 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
509 goto err_label; \
510 ptr++; \
511 __res; \
512 })
513
514#define popl(base, ptr, err_label) \
515 ({ \
516 __u32 __res; \
517 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
518 goto err_label; \
519 ptr++; \
520 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
521 goto err_label; \
522 ptr++; \
523 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
524 goto err_label; \
525 ptr++; \
526 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
527 goto err_label; \
528 ptr++; \
529 __res; \
530 })
531
532/* There are so many possible reasons for this function to return
533 * VM86_INTx, so adding another doesn't bother me. We can expect
534 * userspace programs to be able to handle it. (Getting a problem
535 * in userspace is always better than an Oops anyway.) [KD]
536 */
537static void do_int(struct kernel_vm86_regs *regs, int i,
538 unsigned char __user *ssp, unsigned short sp)
539{
540 unsigned long __user *intr_ptr;
541 unsigned long segoffs;
542 struct vm86 *vm86 = current->thread.vm86;
543
544 if (regs->pt.cs == BIOSSEG)
545 goto cannot_handle;
546 if (is_revectored(i, &vm86->int_revectored))
547 goto cannot_handle;
548 if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
549 goto cannot_handle;
550 intr_ptr = (unsigned long __user *) (i << 2);
551 if (get_user(segoffs, intr_ptr))
552 goto cannot_handle;
553 if ((segoffs >> 16) == BIOSSEG)
554 goto cannot_handle;
555 pushw(ssp, sp, get_vflags(regs), cannot_handle);
556 pushw(ssp, sp, regs->pt.cs, cannot_handle);
557 pushw(ssp, sp, IP(regs), cannot_handle);
558 regs->pt.cs = segoffs >> 16;
559 SP(regs) -= 6;
560 IP(regs) = segoffs & 0xffff;
561 clear_TF(regs);
562 clear_IF(regs);
563 clear_AC(regs);
564 return;
565
566cannot_handle:
567 save_v86_state(regs, VM86_INTx + (i << 8));
568}
569
570int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
571{
572 struct vm86 *vm86 = current->thread.vm86;
573
574 if (vm86->vm86plus.is_vm86pus) {
575 if ((trapno == 3) || (trapno == 1)) {
576 save_v86_state(regs, VM86_TRAP + (trapno << 8));
577 return 0;
578 }
579 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
580 return 0;
581 }
582 if (trapno != 1)
583 return 1; /* we let this handle by the calling routine */
584 current->thread.trap_nr = trapno;
585 current->thread.error_code = error_code;
586 force_sig(SIGTRAP, current);
587 return 0;
588}
589
590void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
591{
592 unsigned char opcode;
593 unsigned char __user *csp;
594 unsigned char __user *ssp;
595 unsigned short ip, sp, orig_flags;
596 int data32, pref_done;
597 struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus;
598
599#define CHECK_IF_IN_TRAP \
600 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
601 newflags |= X86_EFLAGS_TF
602
603 orig_flags = *(unsigned short *)®s->pt.flags;
604
605 csp = (unsigned char __user *) (regs->pt.cs << 4);
606 ssp = (unsigned char __user *) (regs->pt.ss << 4);
607 sp = SP(regs);
608 ip = IP(regs);
609
610 data32 = 0;
611 pref_done = 0;
612 do {
613 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
614 case 0x66: /* 32-bit data */ data32 = 1; break;
615 case 0x67: /* 32-bit address */ break;
616 case 0x2e: /* CS */ break;
617 case 0x3e: /* DS */ break;
618 case 0x26: /* ES */ break;
619 case 0x36: /* SS */ break;
620 case 0x65: /* GS */ break;
621 case 0x64: /* FS */ break;
622 case 0xf2: /* repnz */ break;
623 case 0xf3: /* rep */ break;
624 default: pref_done = 1;
625 }
626 } while (!pref_done);
627
628 switch (opcode) {
629
630 /* pushf */
631 case 0x9c:
632 if (data32) {
633 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
634 SP(regs) -= 4;
635 } else {
636 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
637 SP(regs) -= 2;
638 }
639 IP(regs) = ip;
640 goto vm86_fault_return;
641
642 /* popf */
643 case 0x9d:
644 {
645 unsigned long newflags;
646 if (data32) {
647 newflags = popl(ssp, sp, simulate_sigsegv);
648 SP(regs) += 4;
649 } else {
650 newflags = popw(ssp, sp, simulate_sigsegv);
651 SP(regs) += 2;
652 }
653 IP(regs) = ip;
654 CHECK_IF_IN_TRAP;
655 if (data32)
656 set_vflags_long(newflags, regs);
657 else
658 set_vflags_short(newflags, regs);
659
660 goto check_vip;
661 }
662
663 /* int xx */
664 case 0xcd: {
665 int intno = popb(csp, ip, simulate_sigsegv);
666 IP(regs) = ip;
667 if (vmpi->vm86dbg_active) {
668 if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
669 save_v86_state(regs, VM86_INTx + (intno << 8));
670 return;
671 }
672 }
673 do_int(regs, intno, ssp, sp);
674 return;
675 }
676
677 /* iret */
678 case 0xcf:
679 {
680 unsigned long newip;
681 unsigned long newcs;
682 unsigned long newflags;
683 if (data32) {
684 newip = popl(ssp, sp, simulate_sigsegv);
685 newcs = popl(ssp, sp, simulate_sigsegv);
686 newflags = popl(ssp, sp, simulate_sigsegv);
687 SP(regs) += 12;
688 } else {
689 newip = popw(ssp, sp, simulate_sigsegv);
690 newcs = popw(ssp, sp, simulate_sigsegv);
691 newflags = popw(ssp, sp, simulate_sigsegv);
692 SP(regs) += 6;
693 }
694 IP(regs) = newip;
695 regs->pt.cs = newcs;
696 CHECK_IF_IN_TRAP;
697 if (data32) {
698 set_vflags_long(newflags, regs);
699 } else {
700 set_vflags_short(newflags, regs);
701 }
702 goto check_vip;
703 }
704
705 /* cli */
706 case 0xfa:
707 IP(regs) = ip;
708 clear_IF(regs);
709 goto vm86_fault_return;
710
711 /* sti */
712 /*
713 * Damn. This is incorrect: the 'sti' instruction should actually
714 * enable interrupts after the /next/ instruction. Not good.
715 *
716 * Probably needs some horsing around with the TF flag. Aiee..
717 */
718 case 0xfb:
719 IP(regs) = ip;
720 set_IF(regs);
721 goto check_vip;
722
723 default:
724 save_v86_state(regs, VM86_UNKNOWN);
725 }
726
727 return;
728
729check_vip:
730 if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
731 (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
732 save_v86_state(regs, VM86_STI);
733 return;
734 }
735
736vm86_fault_return:
737 if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
738 save_v86_state(regs, VM86_PICRETURN);
739 return;
740 }
741 if (orig_flags & X86_EFLAGS_TF)
742 handle_vm86_trap(regs, 0, X86_TRAP_DB);
743 return;
744
745simulate_sigsegv:
746 /* FIXME: After a long discussion with Stas we finally
747 * agreed, that this is wrong. Here we should
748 * really send a SIGSEGV to the user program.
749 * But how do we create the correct context? We
750 * are inside a general protection fault handler
751 * and has just returned from a page fault handler.
752 * The correct context for the signal handler
753 * should be a mixture of the two, but how do we
754 * get the information? [KD]
755 */
756 save_v86_state(regs, VM86_UNKNOWN);
757}
758
759/* ---------------- vm86 special IRQ passing stuff ----------------- */
760
761#define VM86_IRQNAME "vm86irq"
762
763static struct vm86_irqs {
764 struct task_struct *tsk;
765 int sig;
766} vm86_irqs[16];
767
768static DEFINE_SPINLOCK(irqbits_lock);
769static int irqbits;
770
771#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
772 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
773 | (1 << SIGUNUSED))
774
775static irqreturn_t irq_handler(int intno, void *dev_id)
776{
777 int irq_bit;
778 unsigned long flags;
779
780 spin_lock_irqsave(&irqbits_lock, flags);
781 irq_bit = 1 << intno;
782 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
783 goto out;
784 irqbits |= irq_bit;
785 if (vm86_irqs[intno].sig)
786 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
787 /*
788 * IRQ will be re-enabled when user asks for the irq (whether
789 * polling or as a result of the signal)
790 */
791 disable_irq_nosync(intno);
792 spin_unlock_irqrestore(&irqbits_lock, flags);
793 return IRQ_HANDLED;
794
795out:
796 spin_unlock_irqrestore(&irqbits_lock, flags);
797 return IRQ_NONE;
798}
799
800static inline void free_vm86_irq(int irqnumber)
801{
802 unsigned long flags;
803
804 free_irq(irqnumber, NULL);
805 vm86_irqs[irqnumber].tsk = NULL;
806
807 spin_lock_irqsave(&irqbits_lock, flags);
808 irqbits &= ~(1 << irqnumber);
809 spin_unlock_irqrestore(&irqbits_lock, flags);
810}
811
812void release_vm86_irqs(struct task_struct *task)
813{
814 int i;
815 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
816 if (vm86_irqs[i].tsk == task)
817 free_vm86_irq(i);
818}
819
820static inline int get_and_reset_irq(int irqnumber)
821{
822 int bit;
823 unsigned long flags;
824 int ret = 0;
825
826 if (invalid_vm86_irq(irqnumber)) return 0;
827 if (vm86_irqs[irqnumber].tsk != current) return 0;
828 spin_lock_irqsave(&irqbits_lock, flags);
829 bit = irqbits & (1 << irqnumber);
830 irqbits &= ~bit;
831 if (bit) {
832 enable_irq(irqnumber);
833 ret = 1;
834 }
835
836 spin_unlock_irqrestore(&irqbits_lock, flags);
837 return ret;
838}
839
840
841static int do_vm86_irq_handling(int subfunction, int irqnumber)
842{
843 int ret;
844 switch (subfunction) {
845 case VM86_GET_AND_RESET_IRQ: {
846 return get_and_reset_irq(irqnumber);
847 }
848 case VM86_GET_IRQ_BITS: {
849 return irqbits;
850 }
851 case VM86_REQUEST_IRQ: {
852 int sig = irqnumber >> 8;
853 int irq = irqnumber & 255;
854 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
855 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
856 if (invalid_vm86_irq(irq)) return -EPERM;
857 if (vm86_irqs[irq].tsk) return -EPERM;
858 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
859 if (ret) return ret;
860 vm86_irqs[irq].sig = sig;
861 vm86_irqs[irq].tsk = current;
862 return irq;
863 }
864 case VM86_FREE_IRQ: {
865 if (invalid_vm86_irq(irqnumber)) return -EPERM;
866 if (!vm86_irqs[irqnumber].tsk) return 0;
867 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
868 free_vm86_irq(irqnumber);
869 return 0;
870 }
871 }
872 return -EINVAL;
873}
874
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
6 * stack - Manfred Spraul <manfred@colorfullife.com>
7 *
8 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
9 * them correctly. Now the emulation will be in a
10 * consistent state after stackfaults - Kasper Dupont
11 * <kasperd@daimi.au.dk>
12 *
13 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
14 * <kasperd@daimi.au.dk>
15 *
16 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
17 * caused by Kasper Dupont's changes - Stas Sergeev
18 *
19 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
20 * Kasper Dupont <kasperd@daimi.au.dk>
21 *
22 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
23 * Kasper Dupont <kasperd@daimi.au.dk>
24 *
25 * 9 apr 2002 - Changed stack access macros to jump to a label
26 * instead of returning to userspace. This simplifies
27 * do_int, and is needed by handle_vm6_fault. Kasper
28 * Dupont <kasperd@daimi.au.dk>
29 *
30 */
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/interrupt.h>
37#include <linux/syscalls.h>
38#include <linux/sched.h>
39#include <linux/sched/task_stack.h>
40#include <linux/kernel.h>
41#include <linux/signal.h>
42#include <linux/string.h>
43#include <linux/mm.h>
44#include <linux/smp.h>
45#include <linux/highmem.h>
46#include <linux/ptrace.h>
47#include <linux/audit.h>
48#include <linux/stddef.h>
49#include <linux/slab.h>
50#include <linux/security.h>
51
52#include <linux/uaccess.h>
53#include <asm/io.h>
54#include <asm/tlbflush.h>
55#include <asm/irq.h>
56#include <asm/traps.h>
57#include <asm/vm86.h>
58#include <asm/switch_to.h>
59
60/*
61 * Known problems:
62 *
63 * Interrupt handling is not guaranteed:
64 * - a real x86 will disable all interrupts for one instruction
65 * after a "mov ss,xx" to make stack handling atomic even without
66 * the 'lss' instruction. We can't guarantee this in v86 mode,
67 * as the next instruction might result in a page fault or similar.
68 * - a real x86 will have interrupts disabled for one instruction
69 * past the 'sti' that enables them. We don't bother with all the
70 * details yet.
71 *
72 * Let's hope these problems do not actually matter for anything.
73 */
74
75
76/*
77 * 8- and 16-bit register defines..
78 */
79#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
80#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
81#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
82#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
83
84/*
85 * virtual flags (16 and 32-bit versions)
86 */
87#define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
88#define VEFLAGS (current->thread.vm86->veflags)
89
90#define set_flags(X, new, mask) \
91((X) = ((X) & ~(mask)) | ((new) & (mask)))
92
93#define SAFE_MASK (0xDD5)
94#define RETURN_MASK (0xDFF)
95
96void save_v86_state(struct kernel_vm86_regs *regs, int retval)
97{
98 struct task_struct *tsk = current;
99 struct vm86plus_struct __user *user;
100 struct vm86 *vm86 = current->thread.vm86;
101
102 /*
103 * This gets called from entry.S with interrupts disabled, but
104 * from process context. Enable interrupts here, before trying
105 * to access user space.
106 */
107 local_irq_enable();
108
109 BUG_ON(!vm86);
110
111 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
112 user = vm86->user_vm86;
113
114 if (!user_access_begin(user, vm86->vm86plus.is_vm86pus ?
115 sizeof(struct vm86plus_struct) :
116 sizeof(struct vm86_struct)))
117 goto Efault;
118
119 unsafe_put_user(regs->pt.bx, &user->regs.ebx, Efault_end);
120 unsafe_put_user(regs->pt.cx, &user->regs.ecx, Efault_end);
121 unsafe_put_user(regs->pt.dx, &user->regs.edx, Efault_end);
122 unsafe_put_user(regs->pt.si, &user->regs.esi, Efault_end);
123 unsafe_put_user(regs->pt.di, &user->regs.edi, Efault_end);
124 unsafe_put_user(regs->pt.bp, &user->regs.ebp, Efault_end);
125 unsafe_put_user(regs->pt.ax, &user->regs.eax, Efault_end);
126 unsafe_put_user(regs->pt.ip, &user->regs.eip, Efault_end);
127 unsafe_put_user(regs->pt.cs, &user->regs.cs, Efault_end);
128 unsafe_put_user(regs->pt.flags, &user->regs.eflags, Efault_end);
129 unsafe_put_user(regs->pt.sp, &user->regs.esp, Efault_end);
130 unsafe_put_user(regs->pt.ss, &user->regs.ss, Efault_end);
131 unsafe_put_user(regs->es, &user->regs.es, Efault_end);
132 unsafe_put_user(regs->ds, &user->regs.ds, Efault_end);
133 unsafe_put_user(regs->fs, &user->regs.fs, Efault_end);
134 unsafe_put_user(regs->gs, &user->regs.gs, Efault_end);
135
136 /*
137 * Don't write screen_bitmap in case some user had a value there
138 * and expected it to remain unchanged.
139 */
140
141 user_access_end();
142
143exit_vm86:
144 preempt_disable();
145 tsk->thread.sp0 = vm86->saved_sp0;
146 tsk->thread.sysenter_cs = __KERNEL_CS;
147 update_task_stack(tsk);
148 refresh_sysenter_cs(&tsk->thread);
149 vm86->saved_sp0 = 0;
150 preempt_enable();
151
152 memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs));
153
154 loadsegment(gs, vm86->regs32.gs);
155
156 regs->pt.ax = retval;
157 return;
158
159Efault_end:
160 user_access_end();
161Efault:
162 pr_alert("could not access userspace vm86 info\n");
163 force_exit_sig(SIGSEGV);
164 goto exit_vm86;
165}
166
167static int do_vm86_irq_handling(int subfunction, int irqnumber);
168static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
169
170SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
171{
172 return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
173}
174
175
176SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
177{
178 switch (cmd) {
179 case VM86_REQUEST_IRQ:
180 case VM86_FREE_IRQ:
181 case VM86_GET_IRQ_BITS:
182 case VM86_GET_AND_RESET_IRQ:
183 return do_vm86_irq_handling(cmd, (int)arg);
184 case VM86_PLUS_INSTALL_CHECK:
185 /*
186 * NOTE: on old vm86 stuff this will return the error
187 * from access_ok(), because the subfunction is
188 * interpreted as (invalid) address to vm86_struct.
189 * So the installation check works.
190 */
191 return 0;
192 }
193
194 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
195 return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
196}
197
198
199static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
200{
201 struct task_struct *tsk = current;
202 struct vm86 *vm86 = tsk->thread.vm86;
203 struct kernel_vm86_regs vm86regs;
204 struct pt_regs *regs = current_pt_regs();
205 unsigned long err = 0;
206 struct vm86_struct v;
207
208 err = security_mmap_addr(0);
209 if (err) {
210 /*
211 * vm86 cannot virtualize the address space, so vm86 users
212 * need to manage the low 1MB themselves using mmap. Given
213 * that BIOS places important data in the first page, vm86
214 * is essentially useless if mmap_min_addr != 0. DOSEMU,
215 * for example, won't even bother trying to use vm86 if it
216 * can't map a page at virtual address 0.
217 *
218 * To reduce the available kernel attack surface, simply
219 * disallow vm86(old) for users who cannot mmap at va 0.
220 *
221 * The implementation of security_mmap_addr will allow
222 * suitably privileged users to map va 0 even if
223 * vm.mmap_min_addr is set above 0, and we want this
224 * behavior for vm86 as well, as it ensures that legacy
225 * tools like vbetool will not fail just because of
226 * vm.mmap_min_addr.
227 */
228 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
229 current->comm, task_pid_nr(current),
230 from_kuid_munged(&init_user_ns, current_uid()));
231 return -EPERM;
232 }
233
234 if (!vm86) {
235 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
236 return -ENOMEM;
237 tsk->thread.vm86 = vm86;
238 }
239 if (vm86->saved_sp0)
240 return -EPERM;
241
242 if (copy_from_user(&v, user_vm86,
243 offsetof(struct vm86_struct, int_revectored)))
244 return -EFAULT;
245
246
247 /* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */
248 if (v.flags & VM86_SCREEN_BITMAP) {
249 char comm[TASK_COMM_LEN];
250
251 pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", get_task_comm(comm, current));
252 return -EINVAL;
253 }
254
255 memset(&vm86regs, 0, sizeof(vm86regs));
256
257 vm86regs.pt.bx = v.regs.ebx;
258 vm86regs.pt.cx = v.regs.ecx;
259 vm86regs.pt.dx = v.regs.edx;
260 vm86regs.pt.si = v.regs.esi;
261 vm86regs.pt.di = v.regs.edi;
262 vm86regs.pt.bp = v.regs.ebp;
263 vm86regs.pt.ax = v.regs.eax;
264 vm86regs.pt.ip = v.regs.eip;
265 vm86regs.pt.cs = v.regs.cs;
266 vm86regs.pt.flags = v.regs.eflags;
267 vm86regs.pt.sp = v.regs.esp;
268 vm86regs.pt.ss = v.regs.ss;
269 vm86regs.es = v.regs.es;
270 vm86regs.ds = v.regs.ds;
271 vm86regs.fs = v.regs.fs;
272 vm86regs.gs = v.regs.gs;
273
274 vm86->flags = v.flags;
275 vm86->cpu_type = v.cpu_type;
276
277 if (copy_from_user(&vm86->int_revectored,
278 &user_vm86->int_revectored,
279 sizeof(struct revectored_struct)))
280 return -EFAULT;
281 if (copy_from_user(&vm86->int21_revectored,
282 &user_vm86->int21_revectored,
283 sizeof(struct revectored_struct)))
284 return -EFAULT;
285 if (plus) {
286 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
287 sizeof(struct vm86plus_info_struct)))
288 return -EFAULT;
289 vm86->vm86plus.is_vm86pus = 1;
290 } else
291 memset(&vm86->vm86plus, 0,
292 sizeof(struct vm86plus_info_struct));
293
294 memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
295 vm86->user_vm86 = user_vm86;
296
297/*
298 * The flags register is also special: we cannot trust that the user
299 * has set it up safely, so this makes sure interrupt etc flags are
300 * inherited from protected mode.
301 */
302 VEFLAGS = vm86regs.pt.flags;
303 vm86regs.pt.flags &= SAFE_MASK;
304 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
305 vm86regs.pt.flags |= X86_VM_MASK;
306
307 vm86regs.pt.orig_ax = regs->orig_ax;
308
309 switch (vm86->cpu_type) {
310 case CPU_286:
311 vm86->veflags_mask = 0;
312 break;
313 case CPU_386:
314 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
315 break;
316 case CPU_486:
317 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
318 break;
319 default:
320 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
321 break;
322 }
323
324/*
325 * Save old state
326 */
327 vm86->saved_sp0 = tsk->thread.sp0;
328 savesegment(gs, vm86->regs32.gs);
329
330 /* make room for real-mode segments */
331 preempt_disable();
332 tsk->thread.sp0 += 16;
333
334 if (boot_cpu_has(X86_FEATURE_SEP)) {
335 tsk->thread.sysenter_cs = 0;
336 refresh_sysenter_cs(&tsk->thread);
337 }
338
339 update_task_stack(tsk);
340 preempt_enable();
341
342 memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
343 return regs->ax;
344}
345
346static inline void set_IF(struct kernel_vm86_regs *regs)
347{
348 VEFLAGS |= X86_EFLAGS_VIF;
349}
350
351static inline void clear_IF(struct kernel_vm86_regs *regs)
352{
353 VEFLAGS &= ~X86_EFLAGS_VIF;
354}
355
356static inline void clear_TF(struct kernel_vm86_regs *regs)
357{
358 regs->pt.flags &= ~X86_EFLAGS_TF;
359}
360
361static inline void clear_AC(struct kernel_vm86_regs *regs)
362{
363 regs->pt.flags &= ~X86_EFLAGS_AC;
364}
365
366/*
367 * It is correct to call set_IF(regs) from the set_vflags_*
368 * functions. However someone forgot to call clear_IF(regs)
369 * in the opposite case.
370 * After the command sequence CLI PUSHF STI POPF you should
371 * end up with interrupts disabled, but you ended up with
372 * interrupts enabled.
373 * ( I was testing my own changes, but the only bug I
374 * could find was in a function I had not changed. )
375 * [KD]
376 */
377
378static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
379{
380 set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
381 set_flags(regs->pt.flags, flags, SAFE_MASK);
382 if (flags & X86_EFLAGS_IF)
383 set_IF(regs);
384 else
385 clear_IF(regs);
386}
387
388static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
389{
390 set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
391 set_flags(regs->pt.flags, flags, SAFE_MASK);
392 if (flags & X86_EFLAGS_IF)
393 set_IF(regs);
394 else
395 clear_IF(regs);
396}
397
398static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
399{
400 unsigned long flags = regs->pt.flags & RETURN_MASK;
401
402 if (VEFLAGS & X86_EFLAGS_VIF)
403 flags |= X86_EFLAGS_IF;
404 flags |= X86_EFLAGS_IOPL;
405 return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
406}
407
408static inline int is_revectored(int nr, struct revectored_struct *bitmap)
409{
410 return test_bit(nr, bitmap->__map);
411}
412
413#define val_byte(val, n) (((__u8 *)&val)[n])
414
415#define pushb(base, ptr, val, err_label) \
416 do { \
417 __u8 __val = val; \
418 ptr--; \
419 if (put_user(__val, base + ptr) < 0) \
420 goto err_label; \
421 } while (0)
422
423#define pushw(base, ptr, val, err_label) \
424 do { \
425 __u16 __val = val; \
426 ptr--; \
427 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
428 goto err_label; \
429 ptr--; \
430 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
431 goto err_label; \
432 } while (0)
433
434#define pushl(base, ptr, val, err_label) \
435 do { \
436 __u32 __val = val; \
437 ptr--; \
438 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
439 goto err_label; \
440 ptr--; \
441 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
442 goto err_label; \
443 ptr--; \
444 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
445 goto err_label; \
446 ptr--; \
447 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
448 goto err_label; \
449 } while (0)
450
451#define popb(base, ptr, err_label) \
452 ({ \
453 __u8 __res; \
454 if (get_user(__res, base + ptr) < 0) \
455 goto err_label; \
456 ptr++; \
457 __res; \
458 })
459
460#define popw(base, ptr, err_label) \
461 ({ \
462 __u16 __res; \
463 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
464 goto err_label; \
465 ptr++; \
466 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
467 goto err_label; \
468 ptr++; \
469 __res; \
470 })
471
472#define popl(base, ptr, err_label) \
473 ({ \
474 __u32 __res; \
475 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
476 goto err_label; \
477 ptr++; \
478 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
479 goto err_label; \
480 ptr++; \
481 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
482 goto err_label; \
483 ptr++; \
484 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
485 goto err_label; \
486 ptr++; \
487 __res; \
488 })
489
490/* There are so many possible reasons for this function to return
491 * VM86_INTx, so adding another doesn't bother me. We can expect
492 * userspace programs to be able to handle it. (Getting a problem
493 * in userspace is always better than an Oops anyway.) [KD]
494 */
495static void do_int(struct kernel_vm86_regs *regs, int i,
496 unsigned char __user *ssp, unsigned short sp)
497{
498 unsigned long __user *intr_ptr;
499 unsigned long segoffs;
500 struct vm86 *vm86 = current->thread.vm86;
501
502 if (regs->pt.cs == BIOSSEG)
503 goto cannot_handle;
504 if (is_revectored(i, &vm86->int_revectored))
505 goto cannot_handle;
506 if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
507 goto cannot_handle;
508 intr_ptr = (unsigned long __user *) (i << 2);
509 if (get_user(segoffs, intr_ptr))
510 goto cannot_handle;
511 if ((segoffs >> 16) == BIOSSEG)
512 goto cannot_handle;
513 pushw(ssp, sp, get_vflags(regs), cannot_handle);
514 pushw(ssp, sp, regs->pt.cs, cannot_handle);
515 pushw(ssp, sp, IP(regs), cannot_handle);
516 regs->pt.cs = segoffs >> 16;
517 SP(regs) -= 6;
518 IP(regs) = segoffs & 0xffff;
519 clear_TF(regs);
520 clear_IF(regs);
521 clear_AC(regs);
522 return;
523
524cannot_handle:
525 save_v86_state(regs, VM86_INTx + (i << 8));
526}
527
528int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
529{
530 struct vm86 *vm86 = current->thread.vm86;
531
532 if (vm86->vm86plus.is_vm86pus) {
533 if ((trapno == 3) || (trapno == 1)) {
534 save_v86_state(regs, VM86_TRAP + (trapno << 8));
535 return 0;
536 }
537 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
538 return 0;
539 }
540 if (trapno != 1)
541 return 1; /* we let this handle by the calling routine */
542 current->thread.trap_nr = trapno;
543 current->thread.error_code = error_code;
544 force_sig(SIGTRAP);
545 return 0;
546}
547
548void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
549{
550 unsigned char opcode;
551 unsigned char __user *csp;
552 unsigned char __user *ssp;
553 unsigned short ip, sp, orig_flags;
554 int data32, pref_done;
555 struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus;
556
557#define CHECK_IF_IN_TRAP \
558 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
559 newflags |= X86_EFLAGS_TF
560
561 orig_flags = *(unsigned short *)®s->pt.flags;
562
563 csp = (unsigned char __user *) (regs->pt.cs << 4);
564 ssp = (unsigned char __user *) (regs->pt.ss << 4);
565 sp = SP(regs);
566 ip = IP(regs);
567
568 data32 = 0;
569 pref_done = 0;
570 do {
571 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
572 case 0x66: /* 32-bit data */ data32 = 1; break;
573 case 0x67: /* 32-bit address */ break;
574 case 0x2e: /* CS */ break;
575 case 0x3e: /* DS */ break;
576 case 0x26: /* ES */ break;
577 case 0x36: /* SS */ break;
578 case 0x65: /* GS */ break;
579 case 0x64: /* FS */ break;
580 case 0xf2: /* repnz */ break;
581 case 0xf3: /* rep */ break;
582 default: pref_done = 1;
583 }
584 } while (!pref_done);
585
586 switch (opcode) {
587
588 /* pushf */
589 case 0x9c:
590 if (data32) {
591 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
592 SP(regs) -= 4;
593 } else {
594 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
595 SP(regs) -= 2;
596 }
597 IP(regs) = ip;
598 goto vm86_fault_return;
599
600 /* popf */
601 case 0x9d:
602 {
603 unsigned long newflags;
604 if (data32) {
605 newflags = popl(ssp, sp, simulate_sigsegv);
606 SP(regs) += 4;
607 } else {
608 newflags = popw(ssp, sp, simulate_sigsegv);
609 SP(regs) += 2;
610 }
611 IP(regs) = ip;
612 CHECK_IF_IN_TRAP;
613 if (data32)
614 set_vflags_long(newflags, regs);
615 else
616 set_vflags_short(newflags, regs);
617
618 goto check_vip;
619 }
620
621 /* int xx */
622 case 0xcd: {
623 int intno = popb(csp, ip, simulate_sigsegv);
624 IP(regs) = ip;
625 if (vmpi->vm86dbg_active) {
626 if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
627 save_v86_state(regs, VM86_INTx + (intno << 8));
628 return;
629 }
630 }
631 do_int(regs, intno, ssp, sp);
632 return;
633 }
634
635 /* iret */
636 case 0xcf:
637 {
638 unsigned long newip;
639 unsigned long newcs;
640 unsigned long newflags;
641 if (data32) {
642 newip = popl(ssp, sp, simulate_sigsegv);
643 newcs = popl(ssp, sp, simulate_sigsegv);
644 newflags = popl(ssp, sp, simulate_sigsegv);
645 SP(regs) += 12;
646 } else {
647 newip = popw(ssp, sp, simulate_sigsegv);
648 newcs = popw(ssp, sp, simulate_sigsegv);
649 newflags = popw(ssp, sp, simulate_sigsegv);
650 SP(regs) += 6;
651 }
652 IP(regs) = newip;
653 regs->pt.cs = newcs;
654 CHECK_IF_IN_TRAP;
655 if (data32) {
656 set_vflags_long(newflags, regs);
657 } else {
658 set_vflags_short(newflags, regs);
659 }
660 goto check_vip;
661 }
662
663 /* cli */
664 case 0xfa:
665 IP(regs) = ip;
666 clear_IF(regs);
667 goto vm86_fault_return;
668
669 /* sti */
670 /*
671 * Damn. This is incorrect: the 'sti' instruction should actually
672 * enable interrupts after the /next/ instruction. Not good.
673 *
674 * Probably needs some horsing around with the TF flag. Aiee..
675 */
676 case 0xfb:
677 IP(regs) = ip;
678 set_IF(regs);
679 goto check_vip;
680
681 default:
682 save_v86_state(regs, VM86_UNKNOWN);
683 }
684
685 return;
686
687check_vip:
688 if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
689 (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
690 save_v86_state(regs, VM86_STI);
691 return;
692 }
693
694vm86_fault_return:
695 if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
696 save_v86_state(regs, VM86_PICRETURN);
697 return;
698 }
699 if (orig_flags & X86_EFLAGS_TF)
700 handle_vm86_trap(regs, 0, X86_TRAP_DB);
701 return;
702
703simulate_sigsegv:
704 /* FIXME: After a long discussion with Stas we finally
705 * agreed, that this is wrong. Here we should
706 * really send a SIGSEGV to the user program.
707 * But how do we create the correct context? We
708 * are inside a general protection fault handler
709 * and has just returned from a page fault handler.
710 * The correct context for the signal handler
711 * should be a mixture of the two, but how do we
712 * get the information? [KD]
713 */
714 save_v86_state(regs, VM86_UNKNOWN);
715}
716
717/* ---------------- vm86 special IRQ passing stuff ----------------- */
718
719#define VM86_IRQNAME "vm86irq"
720
721static struct vm86_irqs {
722 struct task_struct *tsk;
723 int sig;
724} vm86_irqs[16];
725
726static DEFINE_SPINLOCK(irqbits_lock);
727static int irqbits;
728
729#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
730 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
731 | (1 << SIGUNUSED))
732
733static irqreturn_t irq_handler(int intno, void *dev_id)
734{
735 int irq_bit;
736 unsigned long flags;
737
738 spin_lock_irqsave(&irqbits_lock, flags);
739 irq_bit = 1 << intno;
740 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
741 goto out;
742 irqbits |= irq_bit;
743 if (vm86_irqs[intno].sig)
744 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
745 /*
746 * IRQ will be re-enabled when user asks for the irq (whether
747 * polling or as a result of the signal)
748 */
749 disable_irq_nosync(intno);
750 spin_unlock_irqrestore(&irqbits_lock, flags);
751 return IRQ_HANDLED;
752
753out:
754 spin_unlock_irqrestore(&irqbits_lock, flags);
755 return IRQ_NONE;
756}
757
758static inline void free_vm86_irq(int irqnumber)
759{
760 unsigned long flags;
761
762 free_irq(irqnumber, NULL);
763 vm86_irqs[irqnumber].tsk = NULL;
764
765 spin_lock_irqsave(&irqbits_lock, flags);
766 irqbits &= ~(1 << irqnumber);
767 spin_unlock_irqrestore(&irqbits_lock, flags);
768}
769
770void release_vm86_irqs(struct task_struct *task)
771{
772 int i;
773 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
774 if (vm86_irqs[i].tsk == task)
775 free_vm86_irq(i);
776}
777
778static inline int get_and_reset_irq(int irqnumber)
779{
780 int bit;
781 unsigned long flags;
782 int ret = 0;
783
784 if (invalid_vm86_irq(irqnumber)) return 0;
785 if (vm86_irqs[irqnumber].tsk != current) return 0;
786 spin_lock_irqsave(&irqbits_lock, flags);
787 bit = irqbits & (1 << irqnumber);
788 irqbits &= ~bit;
789 if (bit) {
790 enable_irq(irqnumber);
791 ret = 1;
792 }
793
794 spin_unlock_irqrestore(&irqbits_lock, flags);
795 return ret;
796}
797
798
799static int do_vm86_irq_handling(int subfunction, int irqnumber)
800{
801 int ret;
802 switch (subfunction) {
803 case VM86_GET_AND_RESET_IRQ: {
804 return get_and_reset_irq(irqnumber);
805 }
806 case VM86_GET_IRQ_BITS: {
807 return irqbits;
808 }
809 case VM86_REQUEST_IRQ: {
810 int sig = irqnumber >> 8;
811 int irq = irqnumber & 255;
812 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
813 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
814 if (invalid_vm86_irq(irq)) return -EPERM;
815 if (vm86_irqs[irq].tsk) return -EPERM;
816 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
817 if (ret) return ret;
818 vm86_irqs[irq].sig = sig;
819 vm86_irqs[irq].tsk = current;
820 return irq;
821 }
822 case VM86_FREE_IRQ: {
823 if (invalid_vm86_irq(irqnumber)) return -EPERM;
824 if (!vm86_irqs[irqnumber].tsk) return 0;
825 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
826 free_vm86_irq(irqnumber);
827 return 0;
828 }
829 }
830 return -EINVAL;
831}
832