Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * In-kernel vector facility support functions
  4 *
  5 * Copyright IBM Corp. 2015
  6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  7 */
  8#include <linux/kernel.h>
  9#include <linux/cpu.h>
 10#include <linux/sched.h>
 11#include <asm/fpu.h>
 12
 13void __kernel_fpu_begin(struct kernel_fpu *state, int flags)
 14{
 15	__vector128 *vxrs = state->vxrs;
 16	int mask;
 17
 18	/*
 19	 * Limit the save to the FPU/vector registers already
 20	 * in use by the previous context.
 21	 */
 22	flags &= state->hdr.mask;
 23	if (flags & KERNEL_FPC)
 24		fpu_stfpc(&state->hdr.fpc);
 25	if (!cpu_has_vx()) {
 26		if (flags & KERNEL_VXR_LOW)
 27			save_fp_regs_vx(vxrs);
 28		return;
 29	}
 30	mask = flags & KERNEL_VXR;
 31	if (mask == KERNEL_VXR) {
 32		vxrs += fpu_vstm(0, 15, vxrs);
 33		vxrs += fpu_vstm(16, 31, vxrs);
 34		return;
 35	}
 36	if (mask == KERNEL_VXR_MID) {
 37		vxrs += fpu_vstm(8, 23, vxrs);
 38		return;
 39	}
 40	mask = flags & KERNEL_VXR_LOW;
 41	if (mask) {
 42		if (mask == KERNEL_VXR_LOW)
 43			vxrs += fpu_vstm(0, 15, vxrs);
 44		else if (mask == KERNEL_VXR_V0V7)
 45			vxrs += fpu_vstm(0, 7, vxrs);
 46		else
 47			vxrs += fpu_vstm(8, 15, vxrs);
 48	}
 49	mask = flags & KERNEL_VXR_HIGH;
 50	if (mask) {
 51		if (mask == KERNEL_VXR_HIGH)
 52			vxrs += fpu_vstm(16, 31, vxrs);
 53		else if (mask == KERNEL_VXR_V16V23)
 54			vxrs += fpu_vstm(16, 23, vxrs);
 55		else
 56			vxrs += fpu_vstm(24, 31, vxrs);
 57	}
 58}
 59EXPORT_SYMBOL(__kernel_fpu_begin);
 60
 61void __kernel_fpu_end(struct kernel_fpu *state, int flags)
 62{
 63	__vector128 *vxrs = state->vxrs;
 64	int mask;
 65
 66	/*
 67	 * Limit the restore to the FPU/vector registers of the
 68	 * previous context that have been overwritten by the
 69	 * current context.
 70	 */
 71	flags &= state->hdr.mask;
 72	if (flags & KERNEL_FPC)
 73		fpu_lfpc(&state->hdr.fpc);
 74	if (!cpu_has_vx()) {
 75		if (flags & KERNEL_VXR_LOW)
 76			load_fp_regs_vx(vxrs);
 77		return;
 78	}
 79	mask = flags & KERNEL_VXR;
 80	if (mask == KERNEL_VXR) {
 81		vxrs += fpu_vlm(0, 15, vxrs);
 82		vxrs += fpu_vlm(16, 31, vxrs);
 83		return;
 84	}
 85	if (mask == KERNEL_VXR_MID) {
 86		vxrs += fpu_vlm(8, 23, vxrs);
 87		return;
 88	}
 89	mask = flags & KERNEL_VXR_LOW;
 90	if (mask) {
 91		if (mask == KERNEL_VXR_LOW)
 92			vxrs += fpu_vlm(0, 15, vxrs);
 93		else if (mask == KERNEL_VXR_V0V7)
 94			vxrs += fpu_vlm(0, 7, vxrs);
 95		else
 96			vxrs += fpu_vlm(8, 15, vxrs);
 97	}
 98	mask = flags & KERNEL_VXR_HIGH;
 99	if (mask) {
100		if (mask == KERNEL_VXR_HIGH)
101			vxrs += fpu_vlm(16, 31, vxrs);
102		else if (mask == KERNEL_VXR_V16V23)
103			vxrs += fpu_vlm(16, 23, vxrs);
104		else
105			vxrs += fpu_vlm(24, 31, vxrs);
106	}
107}
108EXPORT_SYMBOL(__kernel_fpu_end);
109
110void load_fpu_state(struct fpu *state, int flags)
111{
112	__vector128 *vxrs = &state->vxrs[0];
113	int mask;
114
115	if (flags & KERNEL_FPC)
116		fpu_lfpc_safe(&state->fpc);
117	if (!cpu_has_vx()) {
118		if (flags & KERNEL_VXR_V0V7)
119			load_fp_regs_vx(state->vxrs);
120		return;
121	}
122	mask = flags & KERNEL_VXR;
123	if (mask == KERNEL_VXR) {
124		fpu_vlm(0, 15, &vxrs[0]);
125		fpu_vlm(16, 31, &vxrs[16]);
126		return;
127	}
128	if (mask == KERNEL_VXR_MID) {
129		fpu_vlm(8, 23, &vxrs[8]);
130		return;
131	}
132	mask = flags & KERNEL_VXR_LOW;
133	if (mask) {
134		if (mask == KERNEL_VXR_LOW)
135			fpu_vlm(0, 15, &vxrs[0]);
136		else if (mask == KERNEL_VXR_V0V7)
137			fpu_vlm(0, 7, &vxrs[0]);
138		else
139			fpu_vlm(8, 15, &vxrs[8]);
140	}
141	mask = flags & KERNEL_VXR_HIGH;
142	if (mask) {
143		if (mask == KERNEL_VXR_HIGH)
144			fpu_vlm(16, 31, &vxrs[16]);
145		else if (mask == KERNEL_VXR_V16V23)
146			fpu_vlm(16, 23, &vxrs[16]);
147		else
148			fpu_vlm(24, 31, &vxrs[24]);
149	}
150}
151
152void save_fpu_state(struct fpu *state, int flags)
153{
154	__vector128 *vxrs = &state->vxrs[0];
155	int mask;
156
157	if (flags & KERNEL_FPC)
158		fpu_stfpc(&state->fpc);
159	if (!cpu_has_vx()) {
160		if (flags & KERNEL_VXR_LOW)
161			save_fp_regs_vx(state->vxrs);
162		return;
163	}
164	mask = flags & KERNEL_VXR;
165	if (mask == KERNEL_VXR) {
166		fpu_vstm(0, 15, &vxrs[0]);
167		fpu_vstm(16, 31, &vxrs[16]);
168		return;
169	}
170	if (mask == KERNEL_VXR_MID) {
171		fpu_vstm(8, 23, &vxrs[8]);
172		return;
173	}
174	mask = flags & KERNEL_VXR_LOW;
175	if (mask) {
176		if (mask == KERNEL_VXR_LOW)
177			fpu_vstm(0, 15, &vxrs[0]);
178		else if (mask == KERNEL_VXR_V0V7)
179			fpu_vstm(0, 7, &vxrs[0]);
180		else
181			fpu_vstm(8, 15, &vxrs[8]);
182	}
183	mask = flags & KERNEL_VXR_HIGH;
184	if (mask) {
185		if (mask == KERNEL_VXR_HIGH)
186			fpu_vstm(16, 31, &vxrs[16]);
187		else if (mask == KERNEL_VXR_V16V23)
188			fpu_vstm(16, 23, &vxrs[16]);
189		else
190			fpu_vstm(24, 31, &vxrs[24]);
191	}
192}
193EXPORT_SYMBOL(save_fpu_state);