Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Copyright IBM Corp. 1999, 2009
  3 *
  4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  5 */
  6
  7#ifndef __ASM_SWITCH_TO_H
  8#define __ASM_SWITCH_TO_H
  9
 10#include <linux/thread_info.h>
 
 
 
 11
 12extern struct task_struct *__switch_to(void *, void *);
 13extern void update_per_regs(struct task_struct *task);
 14
 15static inline void save_fp_regs(s390_fp_regs *fpregs)
 16{
 17	asm volatile(
 18		"	std	0,%O0+8(%R0)\n"
 19		"	std	2,%O0+24(%R0)\n"
 20		"	std	4,%O0+40(%R0)\n"
 21		"	std	6,%O0+56(%R0)"
 22		: "=Q" (*fpregs) : "Q" (*fpregs));
 23	if (!MACHINE_HAS_IEEE)
 24		return;
 25	asm volatile(
 26		"	stfpc	%0\n"
 27		"	std	1,%O0+16(%R0)\n"
 28		"	std	3,%O0+32(%R0)\n"
 29		"	std	5,%O0+48(%R0)\n"
 30		"	std	7,%O0+64(%R0)\n"
 31		"	std	8,%O0+72(%R0)\n"
 32		"	std	9,%O0+80(%R0)\n"
 33		"	std	10,%O0+88(%R0)\n"
 34		"	std	11,%O0+96(%R0)\n"
 35		"	std	12,%O0+104(%R0)\n"
 36		"	std	13,%O0+112(%R0)\n"
 37		"	std	14,%O0+120(%R0)\n"
 38		"	std	15,%O0+128(%R0)\n"
 39		: "=Q" (*fpregs) : "Q" (*fpregs));
 40}
 41
 42static inline void restore_fp_regs(s390_fp_regs *fpregs)
 43{
 44	asm volatile(
 45		"	ld	0,%O0+8(%R0)\n"
 46		"	ld	2,%O0+24(%R0)\n"
 47		"	ld	4,%O0+40(%R0)\n"
 48		"	ld	6,%O0+56(%R0)"
 49		: : "Q" (*fpregs));
 50	if (!MACHINE_HAS_IEEE)
 51		return;
 52	asm volatile(
 53		"	lfpc	%0\n"
 54		"	ld	1,%O0+16(%R0)\n"
 55		"	ld	3,%O0+32(%R0)\n"
 56		"	ld	5,%O0+48(%R0)\n"
 57		"	ld	7,%O0+64(%R0)\n"
 58		"	ld	8,%O0+72(%R0)\n"
 59		"	ld	9,%O0+80(%R0)\n"
 60		"	ld	10,%O0+88(%R0)\n"
 61		"	ld	11,%O0+96(%R0)\n"
 62		"	ld	12,%O0+104(%R0)\n"
 63		"	ld	13,%O0+112(%R0)\n"
 64		"	ld	14,%O0+120(%R0)\n"
 65		"	ld	15,%O0+128(%R0)\n"
 66		: : "Q" (*fpregs));
 67}
 68
 69static inline void save_access_regs(unsigned int *acrs)
 70{
 71	asm volatile("stam 0,15,%0" : "=Q" (*acrs));
 
 
 72}
 73
 74static inline void restore_access_regs(unsigned int *acrs)
 75{
 76	asm volatile("lam 0,15,%0" : : "Q" (*acrs));
 
 
 77}
 78
 79#define switch_to(prev,next,last) do {					\
 80	if (prev->mm) {							\
 81		save_fp_regs(&prev->thread.fp_regs);			\
 82		save_access_regs(&prev->thread.acrs[0]);		\
 83	}								\
 84	if (next->mm) {							\
 85		restore_fp_regs(&next->thread.fp_regs);			\
 86		restore_access_regs(&next->thread.acrs[0]);		\
 87		update_per_regs(next);					\
 88	}								\
 89	prev = __switch_to(prev,next);					\
 90} while (0)
 91
 92extern void account_vtime(struct task_struct *, struct task_struct *);
 93extern void account_tick_vtime(struct task_struct *);
 94
 95#define finish_arch_switch(prev) do {					     \
 96	set_fs(current->thread.mm_segment);				     \
 97	account_vtime(prev, current);					     \
 98} while (0)
 99
100#endif /* __ASM_SWITCH_TO_H */
v4.17
 1/* SPDX-License-Identifier: GPL-2.0 */
 2/*
 3 * Copyright IBM Corp. 1999, 2009
 4 *
 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
 6 */
 7
 8#ifndef __ASM_SWITCH_TO_H
 9#define __ASM_SWITCH_TO_H
10
11#include <linux/thread_info.h>
12#include <asm/fpu/api.h>
13#include <asm/ptrace.h>
14#include <asm/guarded_storage.h>
15
16extern struct task_struct *__switch_to(void *, void *);
17extern void update_cr_regs(struct task_struct *task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
19static inline void save_access_regs(unsigned int *acrs)
20{
21	typedef struct { int _[NUM_ACRS]; } acrstype;
22
23	asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
24}
25
26static inline void restore_access_regs(unsigned int *acrs)
27{
28	typedef struct { int _[NUM_ACRS]; } acrstype;
29
30	asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
31}
32
33#define switch_to(prev, next, last) do {				\
34	/* save_fpu_regs() sets the CIF_FPU flag, which enforces	\
35	 * a restore of the floating point / vector registers as	\
36	 * soon as the next task returns to user space			\
37	 */								\
38	save_fpu_regs();						\
39	save_access_regs(&prev->thread.acrs[0]);			\
40	save_ri_cb(prev->thread.ri_cb);					\
41	save_gs_cb(prev->thread.gs_cb);					\
42	update_cr_regs(next);						\
43	restore_access_regs(&next->thread.acrs[0]);			\
44	restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);		\
45	restore_gs_cb(next->thread.gs_cb);				\
46	prev = __switch_to(prev, next);					\
 
 
 
 
 
47} while (0)
48
49#endif /* __ASM_SWITCH_TO_H */