Linux Audio

Check our new training course

Loading...
v5.9
 1/* SPDX-License-Identifier: GPL-2.0 */
 2/*
 3 * Low-level task switching. This is based on information published in
 4 * the Processor Abstraction Layer and the System Abstraction Layer
 5 * manual.
 6 *
 7 * Copyright (C) 1998-2003 Hewlett-Packard Co
 8 *	David Mosberger-Tang <davidm@hpl.hp.com>
 9 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
10 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
11 */
12#ifndef _ASM_IA64_SWITCH_TO_H
13#define _ASM_IA64_SWITCH_TO_H
14
15#include <linux/percpu.h>
16
17struct task_struct;
18
19/*
20 * Context switch from one thread to another.  If the two threads have
21 * different address spaces, schedule() has already taken care of
22 * switching to the new address space by calling switch_mm().
23 *
24 * Disabling access to the fph partition and the debug-register
25 * context switch MUST be done before calling ia64_switch_to() since a
26 * newly created thread returns directly to
27 * ia64_ret_from_syscall_clear_r8.
28 */
29extern struct task_struct *ia64_switch_to (void *next_task);
30
31extern void ia64_save_extra (struct task_struct *task);
32extern void ia64_load_extra (struct task_struct *task);
33
 
 
 
 
 
 
 
34#ifdef CONFIG_PERFMON
35  DECLARE_PER_CPU(unsigned long, pfm_syst_info);
36# define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1)
37#else
38# define PERFMON_IS_SYSWIDE() (0)
39#endif
40
41#define IA64_HAS_EXTRA_STATE(t)							\
42	((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)	\
43	 || PERFMON_IS_SYSWIDE())
44
45#define __switch_to(prev,next,last) do {							 \
 
46	if (IA64_HAS_EXTRA_STATE(prev))								 \
47		ia64_save_extra(prev);								 \
48	if (IA64_HAS_EXTRA_STATE(next))								 \
49		ia64_load_extra(next);								 \
50	ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next);			 \
51	(last) = ia64_switch_to((next));							 \
52} while (0)
53
54#ifdef CONFIG_SMP
55/*
56 * In the SMP case, we save the fph state when context-switching away from a thread that
57 * modified fph.  This way, when the thread gets scheduled on another CPU, the CPU can
58 * pick up the state from task->thread.fph, avoiding the complication of having to fetch
59 * the latest fph state from another CPU.  In other words: eager save, lazy restore.
60 */
61# define switch_to(prev,next,last) do {						\
62	if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {				\
63		ia64_psr(task_pt_regs(prev))->mfh = 0;			\
64		(prev)->thread.flags |= IA64_THREAD_FPH_VALID;			\
65		__ia64_save_fpu((prev)->thread.fph);				\
66	}									\
67	__switch_to(prev, next, last);						\
68	/* "next" in old context is "current" in new context */			\
69	if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) &&	       \
70		     (task_cpu(current) !=				       \
71		      		      task_thread_info(current)->last_cpu))) { \
 
72		task_thread_info(current)->last_cpu = task_cpu(current);       \
73	}								       \
74} while (0)
75#else
76# define switch_to(prev,next,last)	__switch_to(prev, next, last)
77#endif
78
79#endif /* _ASM_IA64_SWITCH_TO_H */
v3.5.6
 
 1/*
 2 * Low-level task switching. This is based on information published in
 3 * the Processor Abstraction Layer and the System Abstraction Layer
 4 * manual.
 5 *
 6 * Copyright (C) 1998-2003 Hewlett-Packard Co
 7 *	David Mosberger-Tang <davidm@hpl.hp.com>
 8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
 9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10 */
11#ifndef _ASM_IA64_SWITCH_TO_H
12#define _ASM_IA64_SWITCH_TO_H
13
14#include <linux/percpu.h>
15
16struct task_struct;
17
18/*
19 * Context switch from one thread to another.  If the two threads have
20 * different address spaces, schedule() has already taken care of
21 * switching to the new address space by calling switch_mm().
22 *
23 * Disabling access to the fph partition and the debug-register
24 * context switch MUST be done before calling ia64_switch_to() since a
25 * newly created thread returns directly to
26 * ia64_ret_from_syscall_clear_r8.
27 */
28extern struct task_struct *ia64_switch_to (void *next_task);
29
30extern void ia64_save_extra (struct task_struct *task);
31extern void ia64_load_extra (struct task_struct *task);
32
33#ifdef CONFIG_VIRT_CPU_ACCOUNTING
34extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next);
35# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n)
36#else
37# define IA64_ACCOUNT_ON_SWITCH(p,n)
38#endif
39
40#ifdef CONFIG_PERFMON
41  DECLARE_PER_CPU(unsigned long, pfm_syst_info);
42# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
43#else
44# define PERFMON_IS_SYSWIDE() (0)
45#endif
46
47#define IA64_HAS_EXTRA_STATE(t)							\
48	((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)	\
49	 || PERFMON_IS_SYSWIDE())
50
51#define __switch_to(prev,next,last) do {							 \
52	IA64_ACCOUNT_ON_SWITCH(prev, next);							 \
53	if (IA64_HAS_EXTRA_STATE(prev))								 \
54		ia64_save_extra(prev);								 \
55	if (IA64_HAS_EXTRA_STATE(next))								 \
56		ia64_load_extra(next);								 \
57	ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next);			 \
58	(last) = ia64_switch_to((next));							 \
59} while (0)
60
61#ifdef CONFIG_SMP
62/*
63 * In the SMP case, we save the fph state when context-switching away from a thread that
64 * modified fph.  This way, when the thread gets scheduled on another CPU, the CPU can
65 * pick up the state from task->thread.fph, avoiding the complication of having to fetch
66 * the latest fph state from another CPU.  In other words: eager save, lazy restore.
67 */
68# define switch_to(prev,next,last) do {						\
69	if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {				\
70		ia64_psr(task_pt_regs(prev))->mfh = 0;			\
71		(prev)->thread.flags |= IA64_THREAD_FPH_VALID;			\
72		__ia64_save_fpu((prev)->thread.fph);				\
73	}									\
74	__switch_to(prev, next, last);						\
75	/* "next" in old context is "current" in new context */			\
76	if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) &&	       \
77		     (task_cpu(current) !=				       \
78		      		      task_thread_info(current)->last_cpu))) { \
79		platform_migrate(current);				       \
80		task_thread_info(current)->last_cpu = task_cpu(current);       \
81	}								       \
82} while (0)
83#else
84# define switch_to(prev,next,last)	__switch_to(prev, next, last)
85#endif
86
87#endif /* _ASM_IA64_SWITCH_TO_H */