Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> */ #ifndef _ASM_POWERPC_DCR_NATIVE_H #define _ASM_POWERPC_DCR_NATIVE_H #ifdef __KERNEL__ #ifndef __ASSEMBLY__ #include <linux/spinlock.h> #include <asm/cputable.h> #include <asm/cpu_has_feature.h> #include <linux/stringify.h> typedef struct { unsigned int base; } dcr_host_native_t; static inline bool dcr_map_ok_native(dcr_host_native_t host) { return true; } #define dcr_map_native(dev, dcr_n, dcr_c) \ ((dcr_host_native_t){ .base = (dcr_n) }) #define dcr_unmap_native(host, dcr_c) do {} while (0) #define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base) #define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value) /* Table based DCR accessors */ extern void __mtdcr(unsigned int reg, unsigned int val); extern unsigned int __mfdcr(unsigned int reg); /* mfdcrx/mtdcrx instruction based accessors. We hand code * the opcodes in order not to depend on newer binutils */ static inline unsigned int mfdcrx(unsigned int reg) { unsigned int ret; asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)" : "=r" (ret) : "r" (reg)); return ret; } static inline void mtdcrx(unsigned int reg, unsigned int val) { asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)" : : "r" (val), "r" (reg)); } #define mfdcr(rn) \ ({unsigned int rval; \ if (__builtin_constant_p(rn) && rn < 1024) \ asm volatile("mfdcr %0, %1" : "=r" (rval) \ : "n" (rn)); \ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ rval = mfdcrx(rn); \ else \ rval = __mfdcr(rn); \ rval;}) #define mtdcr(rn, v) \ do { \ if (__builtin_constant_p(rn) && rn < 1024) \ asm volatile("mtdcr %0, %1" \ : : "n" (rn), "r" (v)); \ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ mtdcrx(rn, v); \ else \ __mtdcr(rn, v); \ } while (0) /* R/W of indirect DCRs make use of standard naming conventions for DCRs */ extern spinlock_t dcr_ind_lock; static inline unsigned __mfdcri(int base_addr, int base_data, int reg) { unsigned long flags; unsigned int val; spin_lock_irqsave(&dcr_ind_lock, flags); if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { mtdcrx(base_addr, reg); val = mfdcrx(base_data); } else { __mtdcr(base_addr, reg); val = __mfdcr(base_data); } spin_unlock_irqrestore(&dcr_ind_lock, flags); return val; } static inline void __mtdcri(int base_addr, int base_data, int reg, unsigned val) { unsigned long flags; spin_lock_irqsave(&dcr_ind_lock, flags); if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { mtdcrx(base_addr, reg); mtdcrx(base_data, val); } else { __mtdcr(base_addr, reg); __mtdcr(base_data, val); } spin_unlock_irqrestore(&dcr_ind_lock, flags); } static inline void __dcri_clrset(int base_addr, int base_data, int reg, unsigned clr, unsigned set) { unsigned long flags; unsigned int val; spin_lock_irqsave(&dcr_ind_lock, flags); if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { mtdcrx(base_addr, reg); val = (mfdcrx(base_data) & ~clr) | set; mtdcrx(base_data, val); } else { __mtdcr(base_addr, reg); val = (__mfdcr(base_data) & ~clr) | set; __mtdcr(base_data, val); } spin_unlock_irqrestore(&dcr_ind_lock, flags); } #define mfdcri(base, reg) __mfdcri(DCRN_ ## base ## _CONFIG_ADDR, \ DCRN_ ## base ## _CONFIG_DATA, \ reg) #define mtdcri(base, reg, data) __mtdcri(DCRN_ ## base ## _CONFIG_ADDR, \ DCRN_ ## base ## _CONFIG_DATA, \ reg, data) #define dcri_clrset(base, reg, clr, set) __dcri_clrset(DCRN_ ## base ## _CONFIG_ADDR, \ DCRN_ ## base ## _CONFIG_DATA, \ reg, clr, set) #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_DCR_NATIVE_H */ |