Linux Audio

Check our new training course

Loading...
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4 */
  5#ifndef _ASM_PROCESSOR_H
  6#define _ASM_PROCESSOR_H
  7
  8#include <linux/atomic.h>
  9#include <linux/cpumask.h>
 10#include <linux/sizes.h>
 11
 12#include <asm/cpu.h>
 13#include <asm/cpu-info.h>
 14#include <asm/loongarch.h>
 15#include <asm/vdso/processor.h>
 16#include <uapi/asm/ptrace.h>
 17#include <uapi/asm/sigcontext.h>
 18
 19#ifdef CONFIG_32BIT
 20
 21#define TASK_SIZE	0x80000000UL
 22#define TASK_SIZE_MIN	TASK_SIZE
 23#define STACK_TOP_MAX	TASK_SIZE
 24
 25#define TASK_IS_32BIT_ADDR 1
 26
 27#endif
 28
 29#ifdef CONFIG_64BIT
 30
 31#define TASK_SIZE32	0x100000000UL
 32#define TASK_SIZE64     (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
 33
 34#define TASK_SIZE	(test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
 35#define TASK_SIZE_MIN	TASK_SIZE32
 36#define STACK_TOP_MAX	TASK_SIZE64
 37
 38#define TASK_SIZE_OF(tsk)						\
 39	(test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
 40
 41#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
 42
 43#endif
 44
 45#define VDSO_RANDOMIZE_SIZE	(TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
 46
 47unsigned long stack_top(void);
 48#define STACK_TOP stack_top()
 49
 50/*
 51 * This decides where the kernel will search for a free chunk of vm
 52 * space during mmap's.
 53 */
 54#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
 55
 56#define FPU_REG_WIDTH		256
 57#define FPU_ALIGN		__attribute__((aligned(32)))
 58
 59union fpureg {
 60	__u32	val32[FPU_REG_WIDTH / 32];
 61	__u64	val64[FPU_REG_WIDTH / 64];
 62};
 63
 64#define FPR_IDX(width, idx)	(idx)
 65
 66#define BUILD_FPR_ACCESS(width) \
 67static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx)	\
 68{									\
 69	return fpr->val##width[FPR_IDX(width, idx)];			\
 70}									\
 71									\
 72static inline void set_fpr##width(union fpureg *fpr, unsigned int idx,	\
 73				  u##width val)				\
 74{									\
 75	fpr->val##width[FPR_IDX(width, idx)] = val;			\
 76}
 77
 78BUILD_FPR_ACCESS(32)
 79BUILD_FPR_ACCESS(64)
 80
 81struct loongarch_fpu {
 82	unsigned int	fcsr;
 83	uint64_t	fcc;	/* 8x8 */
 84	union fpureg	fpr[NUM_FPU_REGS];
 85};
 86
 87#define INIT_CPUMASK { \
 88	{0,} \
 89}
 90
 91#define ARCH_MIN_TASKALIGN	32
 92
 93struct loongarch_vdso_info;
 94
 95/*
 96 * If you change thread_struct remember to change the #defines below too!
 97 */
 98struct thread_struct {
 99	/* Main processor registers. */
100	unsigned long reg01, reg03, reg22; /* ra sp fp */
101	unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
102	unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
103
104	/* __schedule() return address / call frame address */
105	unsigned long sched_ra;
106	unsigned long sched_cfa;
107
108	/* CSR registers */
109	unsigned long csr_prmd;
110	unsigned long csr_crmd;
111	unsigned long csr_euen;
112	unsigned long csr_ecfg;
113	unsigned long csr_badvaddr;	/* Last user fault */
114
115	/* Scratch registers */
116	unsigned long scr0;
117	unsigned long scr1;
118	unsigned long scr2;
119	unsigned long scr3;
120
121	/* Eflags register */
122	unsigned long eflags;
123
124	/* Other stuff associated with the thread. */
125	unsigned long trap_nr;
126	unsigned long error_code;
127	struct loongarch_vdso_info *vdso;
128
129	/*
130	 * FPU & vector registers, must be at last because
131	 * they are conditionally copied at fork().
132	 */
133	struct loongarch_fpu fpu FPU_ALIGN;
134};
135
136#define thread_saved_ra(tsk)	(tsk->thread.sched_ra)
137#define thread_saved_fp(tsk)	(tsk->thread.sched_cfa)
138
139#define INIT_THREAD  {						\
140	/*							\
141	 * Main processor registers				\
142	 */							\
143	.reg01			= 0,				\
144	.reg03			= 0,				\
145	.reg22			= 0,				\
146	.reg23			= 0,				\
147	.reg24			= 0,				\
148	.reg25			= 0,				\
149	.reg26			= 0,				\
150	.reg27			= 0,				\
151	.reg28			= 0,				\
152	.reg29			= 0,				\
153	.reg30			= 0,				\
154	.reg31			= 0,				\
155	.sched_ra		= 0,				\
156	.sched_cfa		= 0,				\
157	.csr_crmd		= 0,				\
158	.csr_prmd		= 0,				\
159	.csr_euen		= 0,				\
160	.csr_ecfg		= 0,				\
161	.csr_badvaddr		= 0,				\
162	/*							\
163	 * Other stuff associated with the process		\
164	 */							\
165	.trap_nr		= 0,				\
166	.error_code		= 0,				\
167	/*							\
168	 * FPU & vector registers				\
169	 */							\
170	.fpu			= {				\
171		.fcsr		= 0,				\
172		.fcc		= 0,				\
173		.fpr		= {{{0,},},},			\
174	},							\
175}
176
177struct task_struct;
178
179enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
180
181extern unsigned long		boot_option_idle_override;
182/*
183 * Do necessary setup to start up a newly executed thread.
184 */
185extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp);
186
187static inline void flush_thread(void)
188{
189}
190
191unsigned long __get_wchan(struct task_struct *p);
192
193#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
194			 THREAD_SIZE - sizeof(struct pt_regs))
195#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
196#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
197#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
198#define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
199#define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
200
201#define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
202
203#ifdef CONFIG_CPU_HAS_PREFETCH
204
205#define ARCH_HAS_PREFETCH
206#define prefetch(x) __builtin_prefetch((x), 0, 1)
207
208#define ARCH_HAS_PREFETCHW
209#define prefetchw(x) __builtin_prefetch((x), 1, 1)
210
211#endif
212
213#endif /* _ASM_PROCESSOR_H */