Loading...
1/*
2 * include/asm-alpha/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_ALPHA_PROCESSOR_H
8#define __ASM_ALPHA_PROCESSOR_H
9
10#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
11
12/*
13 * Returns current instruction pointer ("program counter").
14 */
15#define current_text_addr() \
16 ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
17
18/*
19 * We have a 42-bit user address space: 4TB user VM...
20 */
21#define TASK_SIZE (0x40000000000UL)
22
23#define STACK_TOP \
24 (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
25
26#define STACK_TOP_MAX 0x00120000000UL
27
28/* This decides where the kernel will search for a free chunk of vm
29 * space during mmap's.
30 */
31#define TASK_UNMAPPED_BASE \
32 ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
33
34typedef struct {
35 unsigned long seg;
36} mm_segment_t;
37
38/* This is dead. Everything has been moved to thread_info. */
39struct thread_struct { };
40#define INIT_THREAD { }
41
42/* Return saved PC of a blocked thread. */
43struct task_struct;
44extern unsigned long thread_saved_pc(struct task_struct *);
45
46/* Do necessary setup to start up a newly executed thread. */
47extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
48
49/* Free all resources held by a thread. */
50extern void release_thread(struct task_struct *);
51
52/* Prepare to copy thread state - unlazy all lazy status */
53#define prepare_to_copy(tsk) do { } while (0)
54
55/* Create a kernel thread without removing it from tasklists. */
56extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
57
58unsigned long get_wchan(struct task_struct *p);
59
60#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
61
62#define KSTK_ESP(tsk) \
63 ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
64
65#define cpu_relax() barrier()
66
67#define ARCH_HAS_PREFETCH
68#define ARCH_HAS_PREFETCHW
69#define ARCH_HAS_SPINLOCK_PREFETCH
70
71#ifndef CONFIG_SMP
72/* Nothing to prefetch. */
73#define spin_lock_prefetch(lock) do { } while (0)
74#endif
75
76extern inline void prefetch(const void *ptr)
77{
78 __builtin_prefetch(ptr, 0, 3);
79}
80
81extern inline void prefetchw(const void *ptr)
82{
83 __builtin_prefetch(ptr, 1, 3);
84}
85
86#ifdef CONFIG_SMP
87extern inline void spin_lock_prefetch(const void *ptr)
88{
89 __builtin_prefetch(ptr, 1, 3);
90}
91#endif
92
93#endif /* __ASM_ALPHA_PROCESSOR_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * include/asm-alpha/processor.h
4 *
5 * Copyright (C) 1994 Linus Torvalds
6 */
7
8#ifndef __ASM_ALPHA_PROCESSOR_H
9#define __ASM_ALPHA_PROCESSOR_H
10
11#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
12
13/*
14 * Returns current instruction pointer ("program counter").
15 */
16#define current_text_addr() \
17 ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
18
19/*
20 * We have a 42-bit user address space: 4TB user VM...
21 */
22#define TASK_SIZE (0x40000000000UL)
23
24#define STACK_TOP \
25 (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
26
27#define STACK_TOP_MAX 0x00120000000UL
28
29/* This decides where the kernel will search for a free chunk of vm
30 * space during mmap's.
31 */
32#define TASK_UNMAPPED_BASE \
33 ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
34
35typedef struct {
36 unsigned long seg;
37} mm_segment_t;
38
39/* This is dead. Everything has been moved to thread_info. */
40struct thread_struct { };
41#define INIT_THREAD { }
42
43/* Do necessary setup to start up a newly executed thread. */
44struct pt_regs;
45extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
46
47/* Free all resources held by a thread. */
48struct task_struct;
49extern void release_thread(struct task_struct *);
50
51unsigned long get_wchan(struct task_struct *p);
52
53#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
54
55#define KSTK_ESP(tsk) \
56 ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
57
58#define cpu_relax() barrier()
59
60#define ARCH_HAS_PREFETCH
61#define ARCH_HAS_PREFETCHW
62#define ARCH_HAS_SPINLOCK_PREFETCH
63
64#ifndef CONFIG_SMP
65/* Nothing to prefetch. */
66#define spin_lock_prefetch(lock) do { } while (0)
67#endif
68
69extern inline void prefetch(const void *ptr)
70{
71 __builtin_prefetch(ptr, 0, 3);
72}
73
74extern inline void prefetchw(const void *ptr)
75{
76 __builtin_prefetch(ptr, 1, 3);
77}
78
79#ifdef CONFIG_SMP
80extern inline void spin_lock_prefetch(const void *ptr)
81{
82 __builtin_prefetch(ptr, 1, 3);
83}
84#endif
85
86#endif /* __ASM_ALPHA_PROCESSOR_H */