Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/processor.h
  4 *
  5 *  Copyright (C) 1995-1999 Russell King
  6 */
  7
  8#ifndef __ASM_ARM_PROCESSOR_H
  9#define __ASM_ARM_PROCESSOR_H
 10
 11#ifdef __KERNEL__
 12
 13#include <asm/hw_breakpoint.h>
 14#include <asm/ptrace.h>
 15#include <asm/types.h>
 16#include <asm/unified.h>
 17#include <asm/vdso/processor.h>
 18
 19#ifdef __KERNEL__
 20#define STACK_TOP	((current->personality & ADDR_LIMIT_32BIT) ? \
 21			 TASK_SIZE : TASK_SIZE_26)
 22#define STACK_TOP_MAX	TASK_SIZE
 23#endif
 24
 25struct debug_info {
 26#ifdef CONFIG_HAVE_HW_BREAKPOINT
 27	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];
 28#endif
 29};
 30
 31struct thread_struct {
 32							/* fault info	  */
 33	unsigned long		address;
 34	unsigned long		trap_no;
 35	unsigned long		error_code;
 36							/* debugging	  */
 37	struct debug_info	debug;
 38};
 39
 40/*
 41 * Everything usercopied to/from thread_struct is statically-sized, so
 42 * no hardened usercopy whitelist is needed.
 43 */
 44static inline void arch_thread_struct_whitelist(unsigned long *offset,
 45						unsigned long *size)
 46{
 47	*offset = *size = 0;
 48}
 49
 50#define INIT_THREAD  {	}
 51
 52#define start_thread(regs,pc,sp)					\
 53({									\
 54	unsigned long r7, r8, r9;					\
 55									\
 56	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) {			\
 57		r7 = regs->ARM_r7;					\
 58		r8 = regs->ARM_r8;					\
 59		r9 = regs->ARM_r9;					\
 60	}								\
 61	memset(regs->uregs, 0, sizeof(regs->uregs));			\
 62	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&			\
 63	    current->personality & FDPIC_FUNCPTRS) {			\
 64		regs->ARM_r7 = r7;					\
 65		regs->ARM_r8 = r8;					\
 66		regs->ARM_r9 = r9;					\
 67		regs->ARM_r10 = current->mm->start_data;		\
 68	} else if (!IS_ENABLED(CONFIG_MMU))				\
 69		regs->ARM_r10 = current->mm->start_data;		\
 70	if (current->personality & ADDR_LIMIT_32BIT)			\
 71		regs->ARM_cpsr = USR_MODE;				\
 72	else								\
 73		regs->ARM_cpsr = USR26_MODE;				\
 74	if (elf_hwcap & HWCAP_THUMB && pc & 1)				\
 75		regs->ARM_cpsr |= PSR_T_BIT;				\
 76	regs->ARM_cpsr |= PSR_ENDSTATE;					\
 77	regs->ARM_pc = pc & ~1;		/* pc */			\
 78	regs->ARM_sp = sp;		/* sp */			\
 79})
 80
 81/* Forward declaration, a strange C thing */
 82struct task_struct;
 83
 84unsigned long __get_wchan(struct task_struct *p);
 85
 86#define task_pt_regs(p) \
 87	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 88
 89#define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc
 90#define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp
 91
 92#ifdef CONFIG_SMP
 93#define __ALT_SMP_ASM(smp, up)						\
 94	"9998:	" smp "\n"						\
 95	"	.pushsection \".alt.smp.init\", \"a\"\n"		\
 96	"	.align	2\n"						\
 97	"	.long	9998b - .\n"					\
 98	"	" up "\n"						\
 99	"	.popsection\n"
100#else
101#define __ALT_SMP_ASM(smp, up)	up
102#endif
103
104/*
105 * Prefetching support - only ARMv5.
106 */
107#if __LINUX_ARM_ARCH__ >= 5
108
109#define ARCH_HAS_PREFETCH
110static inline void prefetch(const void *ptr)
111{
112	__asm__ __volatile__(
113		"pld\t%a0"
114		:: "p" (ptr));
115}
116
117#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
118#define ARCH_HAS_PREFETCHW
119static inline void prefetchw(const void *ptr)
120{
121	__asm__ __volatile__(
122		".arch_extension	mp\n"
123		__ALT_SMP_ASM(
124			"pldw\t%a0",
125			"pld\t%a0"
126		)
127		:: "p" (ptr));
128}
129#endif
130#endif
131
132#endif
133
134#endif /* __ASM_ARM_PROCESSOR_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/processor.h
  4 *
  5 *  Copyright (C) 1995-1999 Russell King
  6 */
  7
  8#ifndef __ASM_ARM_PROCESSOR_H
  9#define __ASM_ARM_PROCESSOR_H
 10
 11#ifdef __KERNEL__
 12
 13#include <asm/hw_breakpoint.h>
 14#include <asm/ptrace.h>
 15#include <asm/types.h>
 16#include <asm/unified.h>
 17#include <asm/vdso/processor.h>
 18
 19#ifdef __KERNEL__
 20#define STACK_TOP	((current->personality & ADDR_LIMIT_32BIT) ? \
 21			 TASK_SIZE : TASK_SIZE_26)
 22#define STACK_TOP_MAX	TASK_SIZE
 23#endif
 24
 25struct debug_info {
 26#ifdef CONFIG_HAVE_HW_BREAKPOINT
 27	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];
 28#endif
 29};
 30
 31struct thread_struct {
 32							/* fault info	  */
 33	unsigned long		address;
 34	unsigned long		trap_no;
 35	unsigned long		error_code;
 36							/* debugging	  */
 37	struct debug_info	debug;
 38};
 39
 40/*
 41 * Everything usercopied to/from thread_struct is statically-sized, so
 42 * no hardened usercopy whitelist is needed.
 43 */
 44static inline void arch_thread_struct_whitelist(unsigned long *offset,
 45						unsigned long *size)
 46{
 47	*offset = *size = 0;
 48}
 49
 50#define INIT_THREAD  {	}
 51
 52#define start_thread(regs,pc,sp)					\
 53({									\
 54	unsigned long r7, r8, r9;					\
 55									\
 56	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) {			\
 57		r7 = regs->ARM_r7;					\
 58		r8 = regs->ARM_r8;					\
 59		r9 = regs->ARM_r9;					\
 60	}								\
 61	memset(regs->uregs, 0, sizeof(regs->uregs));			\
 62	if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&			\
 63	    current->personality & FDPIC_FUNCPTRS) {			\
 64		regs->ARM_r7 = r7;					\
 65		regs->ARM_r8 = r8;					\
 66		regs->ARM_r9 = r9;					\
 67		regs->ARM_r10 = current->mm->start_data;		\
 68	} else if (!IS_ENABLED(CONFIG_MMU))				\
 69		regs->ARM_r10 = current->mm->start_data;		\
 70	if (current->personality & ADDR_LIMIT_32BIT)			\
 71		regs->ARM_cpsr = USR_MODE;				\
 72	else								\
 73		regs->ARM_cpsr = USR26_MODE;				\
 74	if (elf_hwcap & HWCAP_THUMB && pc & 1)				\
 75		regs->ARM_cpsr |= PSR_T_BIT;				\
 76	regs->ARM_cpsr |= PSR_ENDSTATE;					\
 77	regs->ARM_pc = pc & ~1;		/* pc */			\
 78	regs->ARM_sp = sp;		/* sp */			\
 79})
 80
 81/* Forward declaration, a strange C thing */
 82struct task_struct;
 83
 84unsigned long __get_wchan(struct task_struct *p);
 85
 86#define task_pt_regs(p) \
 87	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 88
 89#define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc
 90#define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp
 91
 92#ifdef CONFIG_SMP
 93#define __ALT_SMP_ASM(smp, up)						\
 94	"9998:	" smp "\n"						\
 95	"	.pushsection \".alt.smp.init\", \"a\"\n"		\
 96	"	.align	2\n"						\
 97	"	.long	9998b - .\n"					\
 98	"	" up "\n"						\
 99	"	.popsection\n"
100#else
101#define __ALT_SMP_ASM(smp, up)	up
102#endif
103
104/*
105 * Prefetching support - only ARMv5.
106 */
107#if __LINUX_ARM_ARCH__ >= 5
108
109#define ARCH_HAS_PREFETCH
110static inline void prefetch(const void *ptr)
111{
112	__asm__ __volatile__(
113		"pld\t%a0"
114		:: "p" (ptr));
115}
116
117#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
118#define ARCH_HAS_PREFETCHW
119static inline void prefetchw(const void *ptr)
120{
121	__asm__ __volatile__(
122		".arch_extension	mp\n"
123		__ALT_SMP_ASM(
124			"pldw\t%a0",
125			"pld\t%a0"
126		)
127		:: "p" (ptr));
128}
129#endif
130#endif
131
132#endif
133
134#endif /* __ASM_ARM_PROCESSOR_H */