Linux Audio

Check our new training course

Loading...
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2#ifndef _ASM_POWERPC_CODE_PATCHING_H
  3#define _ASM_POWERPC_CODE_PATCHING_H
  4
  5/*
  6 * Copyright 2008, Michael Ellerman, IBM Corporation.
  7 */
  8
  9#include <asm/types.h>
 10#include <asm/ppc-opcode.h>
 11#include <linux/string.h>
 12#include <linux/kallsyms.h>
 13#include <asm/asm-compat.h>
 14#include <asm/inst.h>
 15
 16/* Flags for create_branch:
 17 * "b"   == create_branch(addr, target, 0);
 18 * "ba"  == create_branch(addr, target, BRANCH_ABSOLUTE);
 19 * "bl"  == create_branch(addr, target, BRANCH_SET_LINK);
 20 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
 21 */
 22#define BRANCH_SET_LINK	0x1
 23#define BRANCH_ABSOLUTE	0x2
 24
 25/*
 26 * Powerpc branch instruction is :
 27 *
 28 *  0         6                 30   31
 29 *  +---------+----------------+---+---+
 30 *  | opcode  |     LI         |AA |LK |
 31 *  +---------+----------------+---+---+
 32 *  Where AA = 0 and LK = 0
 33 *
 34 * LI is a signed 24 bits integer. The real branch offset is computed
 35 * by: imm32 = SignExtend(LI:'0b00', 32);
 36 *
 37 * So the maximum forward branch should be:
 38 *   (0x007fffff << 2) = 0x01fffffc =  0x1fffffc
 39 * The maximum backward branch should be:
 40 *   (0xff800000 << 2) = 0xfe000000 = -0x2000000
 41 */
 42static inline bool is_offset_in_branch_range(long offset)
 43{
 44	return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
 45}
 46
 47static inline bool is_offset_in_cond_branch_range(long offset)
 48{
 49	return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
 50}
 51
 52static inline int create_branch(ppc_inst_t *instr, const u32 *addr,
 53				unsigned long target, int flags)
 54{
 55	long offset;
 56
 57	*instr = ppc_inst(0);
 58	offset = target;
 59	if (! (flags & BRANCH_ABSOLUTE))
 60		offset = offset - (unsigned long)addr;
 61
 62	/* Check we can represent the target in the instruction format */
 63	if (!is_offset_in_branch_range(offset))
 64		return 1;
 65
 66	/* Mask out the flags and target, so they don't step on each other. */
 67	*instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
 68
 69	return 0;
 70}
 71
 72int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
 73		       unsigned long target, int flags);
 74int patch_branch(u32 *addr, unsigned long target, int flags);
 75int patch_instruction(u32 *addr, ppc_inst_t instr);
 76int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
 77int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr);
 78
 79static inline unsigned long patch_site_addr(s32 *site)
 80{
 81	return (unsigned long)site + *site;
 82}
 83
 84static inline int patch_instruction_site(s32 *site, ppc_inst_t instr)
 85{
 86	return patch_instruction((u32 *)patch_site_addr(site), instr);
 87}
 88
 89static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
 90{
 91	return patch_branch((u32 *)patch_site_addr(site), target, flags);
 92}
 93
 94static inline int modify_instruction(unsigned int *addr, unsigned int clr,
 95				     unsigned int set)
 96{
 97	return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
 98}
 99
100static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
101{
102	return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
103}
104
105static inline unsigned int branch_opcode(ppc_inst_t instr)
106{
107	return ppc_inst_primary_opcode(instr) & 0x3F;
108}
109
110static inline int instr_is_branch_iform(ppc_inst_t instr)
111{
112	return branch_opcode(instr) == 18;
113}
114
115static inline int instr_is_branch_bform(ppc_inst_t instr)
116{
117	return branch_opcode(instr) == 16;
118}
119
120int instr_is_relative_branch(ppc_inst_t instr);
121int instr_is_relative_link_branch(ppc_inst_t instr);
122unsigned long branch_target(const u32 *instr);
123int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src);
124bool is_conditional_branch(ppc_inst_t instr);
125
126#define OP_RT_RA_MASK	0xffff0000UL
127#define LIS_R2		(PPC_RAW_LIS(_R2, 0))
128#define ADDIS_R2_R12	(PPC_RAW_ADDIS(_R2, _R12, 0))
129#define ADDI_R2_R2	(PPC_RAW_ADDI(_R2, _R2, 0))
130
131
132static inline unsigned long ppc_function_entry(void *func)
133{
134#ifdef CONFIG_PPC64_ELF_ABI_V2
135	u32 *insn = func;
136
137	/*
138	 * A PPC64 ABIv2 function may have a local and a global entry
139	 * point. We need to use the local entry point when patching
140	 * functions, so identify and step over the global entry point
141	 * sequence.
142	 *
143	 * The global entry point sequence is always of the form:
144	 *
145	 * addis r2,r12,XXXX
146	 * addi  r2,r2,XXXX
147	 *
148	 * A linker optimisation may convert the addis to lis:
149	 *
150	 * lis   r2,XXXX
151	 * addi  r2,r2,XXXX
152	 */
153	if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
154	     ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
155	    ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
156		return (unsigned long)(insn + 2);
157	else
158		return (unsigned long)func;
159#elif defined(CONFIG_PPC64_ELF_ABI_V1)
160	/*
161	 * On PPC64 ABIv1 the function pointer actually points to the
162	 * function's descriptor. The first entry in the descriptor is the
163	 * address of the function text.
164	 */
165	return ((struct func_desc *)func)->addr;
166#else
167	return (unsigned long)func;
168#endif
169}
170
171static inline unsigned long ppc_global_function_entry(void *func)
172{
173#ifdef CONFIG_PPC64_ELF_ABI_V2
174	/* PPC64 ABIv2 the global entry point is at the address */
175	return (unsigned long)func;
176#else
177	/* All other cases there is no change vs ppc_function_entry() */
178	return ppc_function_entry(func);
179#endif
180}
181
182/*
183 * Wrapper around kallsyms_lookup() to return function entry address:
184 * - For ABIv1, we lookup the dot variant.
185 * - For ABIv2, we return the local entry point.
186 */
187static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
188{
189	unsigned long addr;
190#ifdef CONFIG_PPC64_ELF_ABI_V1
191	/* check for dot variant */
192	char dot_name[1 + KSYM_NAME_LEN];
193	bool dot_appended = false;
194
195	if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
196		return 0;
197
198	if (name[0] != '.') {
199		dot_name[0] = '.';
200		dot_name[1] = '\0';
201		strlcat(dot_name, name, sizeof(dot_name));
202		dot_appended = true;
203	} else {
204		dot_name[0] = '\0';
205		strlcat(dot_name, name, sizeof(dot_name));
206	}
207	addr = kallsyms_lookup_name(dot_name);
208	if (!addr && dot_appended)
209		/* Let's try the original non-dot symbol lookup	*/
210		addr = kallsyms_lookup_name(name);
211#elif defined(CONFIG_PPC64_ELF_ABI_V2)
212	addr = kallsyms_lookup_name(name);
213	if (addr)
214		addr = ppc_function_entry((void *)addr);
215#else
216	addr = kallsyms_lookup_name(name);
217#endif
218	return addr;
219}
220
 
221/*
222 * Some instruction encodings commonly used in dynamic ftracing
223 * and function live patching.
224 */
225
226/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
227#ifdef CONFIG_PPC64_ELF_ABI_V2
228#define R2_STACK_OFFSET         24
229#else
230#define R2_STACK_OFFSET         40
231#endif
232
233#define PPC_INST_LD_TOC		PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
 
234
235/* usually preceded by a mflr r0 */
236#define PPC_INST_STD_LR		PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
 
 
237
238#endif /* _ASM_POWERPC_CODE_PATCHING_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2#ifndef _ASM_POWERPC_CODE_PATCHING_H
  3#define _ASM_POWERPC_CODE_PATCHING_H
  4
  5/*
  6 * Copyright 2008, Michael Ellerman, IBM Corporation.
  7 */
  8
  9#include <asm/types.h>
 10#include <asm/ppc-opcode.h>
 11#include <linux/string.h>
 12#include <linux/kallsyms.h>
 13#include <asm/asm-compat.h>
 
 14
 15/* Flags for create_branch:
 16 * "b"   == create_branch(addr, target, 0);
 17 * "ba"  == create_branch(addr, target, BRANCH_ABSOLUTE);
 18 * "bl"  == create_branch(addr, target, BRANCH_SET_LINK);
 19 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
 20 */
 21#define BRANCH_SET_LINK	0x1
 22#define BRANCH_ABSOLUTE	0x2
 23
 24bool is_offset_in_branch_range(long offset);
 25unsigned int create_branch(const unsigned int *addr,
 26			   unsigned long target, int flags);
 27unsigned int create_cond_branch(const unsigned int *addr,
 28				unsigned long target, int flags);
 29int patch_branch(unsigned int *addr, unsigned long target, int flags);
 30int patch_instruction(unsigned int *addr, unsigned int instr);
 31int raw_patch_instruction(unsigned int *addr, unsigned int instr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32
 33static inline unsigned long patch_site_addr(s32 *site)
 34{
 35	return (unsigned long)site + *site;
 36}
 37
 38static inline int patch_instruction_site(s32 *site, unsigned int instr)
 39{
 40	return patch_instruction((unsigned int *)patch_site_addr(site), instr);
 41}
 42
 43static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
 44{
 45	return patch_branch((unsigned int *)patch_site_addr(site), target, flags);
 46}
 47
 48static inline int modify_instruction(unsigned int *addr, unsigned int clr,
 49				     unsigned int set)
 50{
 51	return patch_instruction(addr, (*addr & ~clr) | set);
 52}
 53
 54static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
 55{
 56	return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
 57}
 58
 59int instr_is_relative_branch(unsigned int instr);
 60int instr_is_relative_link_branch(unsigned int instr);
 61int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
 62unsigned long branch_target(const unsigned int *instr);
 63unsigned int translate_branch(const unsigned int *dest,
 64			      const unsigned int *src);
 65extern bool is_conditional_branch(unsigned int instr);
 66#ifdef CONFIG_PPC_BOOK3E_64
 67void __patch_exception(int exc, unsigned long addr);
 68#define patch_exception(exc, name) do { \
 69	extern unsigned int name; \
 70	__patch_exception((exc), (unsigned long)&name); \
 71} while (0)
 72#endif
 
 
 
 
 
 
 73
 74#define OP_RT_RA_MASK	0xffff0000UL
 75#define LIS_R2		0x3c020000UL
 76#define ADDIS_R2_R12	0x3c4c0000UL
 77#define ADDI_R2_R2	0x38420000UL
 
 78
 79static inline unsigned long ppc_function_entry(void *func)
 80{
 81#ifdef PPC64_ELF_ABI_v2
 82	u32 *insn = func;
 83
 84	/*
 85	 * A PPC64 ABIv2 function may have a local and a global entry
 86	 * point. We need to use the local entry point when patching
 87	 * functions, so identify and step over the global entry point
 88	 * sequence.
 89	 *
 90	 * The global entry point sequence is always of the form:
 91	 *
 92	 * addis r2,r12,XXXX
 93	 * addi  r2,r2,XXXX
 94	 *
 95	 * A linker optimisation may convert the addis to lis:
 96	 *
 97	 * lis   r2,XXXX
 98	 * addi  r2,r2,XXXX
 99	 */
100	if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
101	     ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
102	    ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
103		return (unsigned long)(insn + 2);
104	else
105		return (unsigned long)func;
106#elif defined(PPC64_ELF_ABI_v1)
107	/*
108	 * On PPC64 ABIv1 the function pointer actually points to the
109	 * function's descriptor. The first entry in the descriptor is the
110	 * address of the function text.
111	 */
112	return ((func_descr_t *)func)->entry;
113#else
114	return (unsigned long)func;
115#endif
116}
117
118static inline unsigned long ppc_global_function_entry(void *func)
119{
120#ifdef PPC64_ELF_ABI_v2
121	/* PPC64 ABIv2 the global entry point is at the address */
122	return (unsigned long)func;
123#else
124	/* All other cases there is no change vs ppc_function_entry() */
125	return ppc_function_entry(func);
126#endif
127}
128
129/*
130 * Wrapper around kallsyms_lookup() to return function entry address:
131 * - For ABIv1, we lookup the dot variant.
132 * - For ABIv2, we return the local entry point.
133 */
134static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
135{
136	unsigned long addr;
137#ifdef PPC64_ELF_ABI_v1
138	/* check for dot variant */
139	char dot_name[1 + KSYM_NAME_LEN];
140	bool dot_appended = false;
141
142	if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
143		return 0;
144
145	if (name[0] != '.') {
146		dot_name[0] = '.';
147		dot_name[1] = '\0';
148		strlcat(dot_name, name, sizeof(dot_name));
149		dot_appended = true;
150	} else {
151		dot_name[0] = '\0';
152		strlcat(dot_name, name, sizeof(dot_name));
153	}
154	addr = kallsyms_lookup_name(dot_name);
155	if (!addr && dot_appended)
156		/* Let's try the original non-dot symbol lookup	*/
157		addr = kallsyms_lookup_name(name);
158#elif defined(PPC64_ELF_ABI_v2)
159	addr = kallsyms_lookup_name(name);
160	if (addr)
161		addr = ppc_function_entry((void *)addr);
162#else
163	addr = kallsyms_lookup_name(name);
164#endif
165	return addr;
166}
167
168#ifdef CONFIG_PPC64
169/*
170 * Some instruction encodings commonly used in dynamic ftracing
171 * and function live patching.
172 */
173
174/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
175#ifdef PPC64_ELF_ABI_v2
176#define R2_STACK_OFFSET         24
177#else
178#define R2_STACK_OFFSET         40
179#endif
180
181#define PPC_INST_LD_TOC		(PPC_INST_LD  | ___PPC_RT(__REG_R2) | \
182				 ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
183
184/* usually preceded by a mflr r0 */
185#define PPC_INST_STD_LR		(PPC_INST_STD | ___PPC_RS(__REG_R0) | \
186				 ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
187#endif /* CONFIG_PPC64 */
188
189#endif /* _ASM_POWERPC_CODE_PATCHING_H */