Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H #include <asm/cmpxchg.h> #include <asm/loongarch.h> /* * The "address" (in fact, offset from $r21) of a per-CPU variable is close to * the loading address of main kernel image, but far from where the modules are * loaded. Tell the compiler this fact when using explicit relocs. */ #if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) # if __has_attribute(model) # define PER_CPU_ATTRIBUTES __attribute__((model("extreme"))) # else # error compiler support for the model attribute is necessary when a recent assembler is used # endif #endif /* Use r21 for fast access */ register unsigned long __my_cpu_offset __asm__("$r21"); static inline void set_my_cpu_offset(unsigned long off) { __my_cpu_offset = off; csr_write64(off, PERCPU_BASE_KS); } #define __my_cpu_offset \ ({ \ __asm__ __volatile__("":"+r"(__my_cpu_offset)); \ __my_cpu_offset; \ }) #define PERCPU_OP(op, asm_op, c_op) \ static __always_inline unsigned long __percpu_##op(void *ptr, \ unsigned long val, int size) \ { \ unsigned long ret; \ \ switch (size) { \ case 4: \ __asm__ __volatile__( \ "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \ : [val] "r" (val)); \ break; \ case 8: \ __asm__ __volatile__( \ "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \ : [val] "r" (val)); \ break; \ default: \ ret = 0; \ BUILD_BUG(); \ } \ \ return ret c_op val; \ } PERCPU_OP(add, add, +) PERCPU_OP(and, and, &) PERCPU_OP(or, or, |) #undef PERCPU_OP static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size) { unsigned long ret; switch (size) { case 1: __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n" : [ret] "=&r"(ret) : [ptr] "r"(ptr) : "memory"); break; case 2: __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n" : [ret] "=&r"(ret) : [ptr] "r"(ptr) : "memory"); break; case 4: __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n" : [ret] "=&r"(ret) : [ptr] "r"(ptr) : "memory"); break; case 8: __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n" : [ret] "=&r"(ret) : [ptr] "r"(ptr) : "memory"); break; default: ret = 0; BUILD_BUG(); } return ret; } static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size) { switch (size) { case 1: __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n" : : [val] "r" (val), [ptr] "r" (ptr) : "memory"); break; case 2: __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n" : : [val] "r" (val), [ptr] "r" (ptr) : "memory"); break; case 4: __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n" : : [val] "r" (val), [ptr] "r" (ptr) : "memory"); break; case 8: __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n" : : [val] "r" (val), [ptr] "r" (ptr) : "memory"); break; default: BUILD_BUG(); } } static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size) { switch (size) { case 1: case 2: return __xchg_small((volatile void *)ptr, val, size); case 4: return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val); case 8: return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val); default: BUILD_BUG(); } return 0; } /* this_cpu_cmpxchg */ #define _protect_cmpxchg_local(pcp, o, n) \ ({ \ typeof(*raw_cpu_ptr(&(pcp))) __ret; \ preempt_disable_notrace(); \ __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ preempt_enable_notrace(); \ __ret; \ }) #define _percpu_read(pcp) \ ({ \ typeof(pcp) __retval; \ __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \ __retval; \ }) #define _percpu_write(pcp, val) \ do { \ __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \ } while (0) \ #define _pcp_protect(operation, pcp, val) \ ({ \ typeof(pcp) __retval; \ preempt_disable_notrace(); \ __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ (val), sizeof(pcp)); \ preempt_enable_notrace(); \ __retval; \ }) #define _percpu_add(pcp, val) \ _pcp_protect(__percpu_add, pcp, val) #define _percpu_add_return(pcp, val) _percpu_add(pcp, val) #define _percpu_and(pcp, val) \ _pcp_protect(__percpu_and, pcp, val) #define _percpu_or(pcp, val) \ _pcp_protect(__percpu_or, pcp, val) #define _percpu_xchg(pcp, val) ((typeof(pcp)) \ _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))) #define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) #define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) #define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val) #define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val) #define this_cpu_and_4(pcp, val) _percpu_and(pcp, val) #define this_cpu_and_8(pcp, val) _percpu_and(pcp, val) #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) #define this_cpu_read_1(pcp) _percpu_read(pcp) #define this_cpu_read_2(pcp) _percpu_read(pcp) #define this_cpu_read_4(pcp) _percpu_read(pcp) #define this_cpu_read_8(pcp) _percpu_read(pcp) #define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) #define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) #include <asm-generic/percpu.h> #endif /* __ASM_PERCPU_H */ |