blob: 3f881548fb61c12fcd04a9d8a5cf518aeb6d706b [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Michael Ellermanaaddd3e2008-06-24 11:32:21 +10002#ifndef _ASM_POWERPC_CODE_PATCHING_H
3#define _ASM_POWERPC_CODE_PATCHING_H
4
5/*
6 * Copyright 2008, Michael Ellerman, IBM Corporation.
Michael Ellermanaaddd3e2008-06-24 11:32:21 +10007 */
8
Michael Ellerman07630a32008-06-24 11:32:28 +10009#include <asm/types.h>
Kumar Gala16c57b32009-02-10 20:10:44 +000010#include <asm/ppc-opcode.h>
Naveen N. Rao1b32cd12017-04-19 18:22:27 +053011#include <linux/string.h>
12#include <linux/kallsyms.h>
Christophe Leroyec0c4642018-07-05 16:24:57 +000013#include <asm/asm-compat.h>
Jordan Niethe75346252020-05-06 13:40:26 +100014#include <asm/inst.h>
Michael Ellermanb7bcda62008-06-24 11:32:35 +100015
Michael Ellermanaaddd3e2008-06-24 11:32:21 +100016/* Flags for create_branch:
17 * "b" == create_branch(addr, target, 0);
18 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
19 * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
20 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
21 */
22#define BRANCH_SET_LINK 0x1
23#define BRANCH_ABSOLUTE 0x2
24
Christophe Leroy1acbf272022-05-09 07:36:01 +020025/*
26 * Powerpc branch instruction is :
27 *
28 * 0 6 30 31
29 * +---------+----------------+---+---+
30 * | opcode | LI |AA |LK |
31 * +---------+----------------+---+---+
32 * Where AA = 0 and LK = 0
33 *
34 * LI is a signed 24 bits integer. The real branch offset is computed
35 * by: imm32 = SignExtend(LI:'0b00', 32);
36 *
37 * So the maximum forward branch should be:
38 * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
39 * The maximum backward branch should be:
40 * (0xff800000 << 2) = 0xfe000000 = -0x2000000
41 */
42static inline bool is_offset_in_branch_range(long offset)
43{
44 return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
45}
46
47static inline bool is_offset_in_cond_branch_range(long offset)
48{
49 return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
50}
51
Christophe Leroyd2f47da2022-05-09 07:36:03 +020052static inline int create_branch(ppc_inst_t *instr, const u32 *addr,
53 unsigned long target, int flags)
54{
55 long offset;
56
57 *instr = ppc_inst(0);
58 offset = target;
59 if (! (flags & BRANCH_ABSOLUTE))
60 offset = offset - (unsigned long)addr;
61
62 /* Check we can represent the target in the instruction format */
63 if (!is_offset_in_branch_range(offset))
64 return 1;
65
66 /* Mask out the flags and target, so they don't step on each other. */
67 *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
68
69 return 0;
70}
71
Christophe Leroyc545b9f2021-11-29 18:49:38 +010072int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
Jordan Niethe7c95d882020-05-06 13:40:25 +100073 unsigned long target, int flags);
Christophe Leroy69d4d6e2021-05-20 13:50:45 +000074int patch_branch(u32 *addr, unsigned long target, int flags);
Christophe Leroyc545b9f2021-11-29 18:49:38 +010075int patch_instruction(u32 *addr, ppc_inst_t instr);
76int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
Michael Ellermanaaddd3e2008-06-24 11:32:21 +100077
Christophe Leroy082e28692018-10-19 06:55:04 +000078static inline unsigned long patch_site_addr(s32 *site)
79{
80 return (unsigned long)site + *site;
81}
82
Christophe Leroyc545b9f2021-11-29 18:49:38 +010083static inline int patch_instruction_site(s32 *site, ppc_inst_t instr)
Christophe Leroy45090c22018-11-09 17:33:15 +000084{
Christophe Leroy69d4d6e2021-05-20 13:50:45 +000085 return patch_instruction((u32 *)patch_site_addr(site), instr);
Christophe Leroy45090c22018-11-09 17:33:15 +000086}
87
88static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
89{
Christophe Leroy69d4d6e2021-05-20 13:50:45 +000090 return patch_branch((u32 *)patch_site_addr(site), target, flags);
Christophe Leroy45090c22018-11-09 17:33:15 +000091}
92
Christophe Leroy36b08b42018-11-09 17:33:17 +000093static inline int modify_instruction(unsigned int *addr, unsigned int clr,
94 unsigned int set)
95{
Christophe Leroy69d4d6e2021-05-20 13:50:45 +000096 return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
Christophe Leroy36b08b42018-11-09 17:33:17 +000097}
98
99static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
100{
101 return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
102}
103
Christophe Leroy31acc592021-12-02 13:00:25 +0100104static inline unsigned int branch_opcode(ppc_inst_t instr)
105{
106 return ppc_inst_primary_opcode(instr) & 0x3F;
107}
108
109static inline int instr_is_branch_iform(ppc_inst_t instr)
110{
111 return branch_opcode(instr) == 18;
112}
113
114static inline int instr_is_branch_bform(ppc_inst_t instr)
115{
116 return branch_opcode(instr) == 16;
117}
118
Christophe Leroyc545b9f2021-11-29 18:49:38 +0100119int instr_is_relative_branch(ppc_inst_t instr);
120int instr_is_relative_link_branch(ppc_inst_t instr);
Christophe Leroy69d4d6e2021-05-20 13:50:45 +0000121unsigned long branch_target(const u32 *instr);
Christophe Leroyc545b9f2021-11-29 18:49:38 +0100122int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src);
123bool is_conditional_branch(ppc_inst_t instr);
Michael Ellerman411781a2008-06-24 11:32:29 +1000124
Anton Blanchardc71b7ef2014-02-04 16:09:15 +1100125#define OP_RT_RA_MASK 0xffff0000UL
Christophe Leroy8804d5b2021-05-20 10:23:02 +0000126#define LIS_R2 (PPC_RAW_LIS(_R2, 0))
127#define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
128#define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
Michael Ellerman7aed41c2021-03-09 17:53:15 +1100129
Anton Blanchardc71b7ef2014-02-04 16:09:15 +1100130
Michael Ellerman07630a32008-06-24 11:32:28 +1000131static inline unsigned long ppc_function_entry(void *func)
132{
Christophe Leroy7d40aff2022-05-09 07:36:07 +0200133#ifdef CONFIG_PPC64_ELF_ABI_V2
Anton Blanchardc71b7ef2014-02-04 16:09:15 +1100134 u32 *insn = func;
135
Michael Ellerman07630a32008-06-24 11:32:28 +1000136 /*
Anton Blanchardc71b7ef2014-02-04 16:09:15 +1100137 * A PPC64 ABIv2 function may have a local and a global entry
138 * point. We need to use the local entry point when patching
139 * functions, so identify and step over the global entry point
140 * sequence.
141 *
142 * The global entry point sequence is always of the form:
143 *
144 * addis r2,r12,XXXX
145 * addi r2,r2,XXXX
146 *
147 * A linker optimisation may convert the addis to lis:
148 *
149 * lis r2,XXXX
150 * addi r2,r2,XXXX
151 */
152 if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
153 ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
154 ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
155 return (unsigned long)(insn + 2);
156 else
157 return (unsigned long)func;
Christophe Leroy7d40aff2022-05-09 07:36:07 +0200158#elif defined(CONFIG_PPC64_ELF_ABI_V1)
Anton Blanchardc71b7ef2014-02-04 16:09:15 +1100159 /*
160 * On PPC64 ABIv1 the function pointer actually points to the
161 * function's descriptor. The first entry in the descriptor is the
162 * address of the function text.
Michael Ellerman07630a32008-06-24 11:32:28 +1000163 */
Christophe Leroy5b23cb82022-02-15 13:40:57 +0100164 return ((struct func_desc *)func)->addr;
Michael Ellerman07630a32008-06-24 11:32:28 +1000165#else
166 return (unsigned long)func;
167#endif
168}
169
Michael Ellermand997c002014-06-17 16:15:32 +1000170static inline unsigned long ppc_global_function_entry(void *func)
171{
Christophe Leroy7d40aff2022-05-09 07:36:07 +0200172#ifdef CONFIG_PPC64_ELF_ABI_V2
Michael Ellermand997c002014-06-17 16:15:32 +1000173 /* PPC64 ABIv2 the global entry point is at the address */
174 return (unsigned long)func;
175#else
176 /* All other cases there is no change vs ppc_function_entry() */
177 return ppc_function_entry(func);
178#endif
179}
180
Naveen N. Rao1b32cd12017-04-19 18:22:27 +0530181/*
182 * Wrapper around kallsyms_lookup() to return function entry address:
183 * - For ABIv1, we lookup the dot variant.
184 * - For ABIv2, we return the local entry point.
185 */
186static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
187{
188 unsigned long addr;
Christophe Leroy7d40aff2022-05-09 07:36:07 +0200189#ifdef CONFIG_PPC64_ELF_ABI_V1
Naveen N. Rao1b32cd12017-04-19 18:22:27 +0530190 /* check for dot variant */
191 char dot_name[1 + KSYM_NAME_LEN];
192 bool dot_appended = false;
193
194 if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
195 return 0;
196
197 if (name[0] != '.') {
198 dot_name[0] = '.';
199 dot_name[1] = '\0';
200 strlcat(dot_name, name, sizeof(dot_name));
201 dot_appended = true;
202 } else {
203 dot_name[0] = '\0';
204 strlcat(dot_name, name, sizeof(dot_name));
205 }
206 addr = kallsyms_lookup_name(dot_name);
207 if (!addr && dot_appended)
208 /* Let's try the original non-dot symbol lookup */
209 addr = kallsyms_lookup_name(name);
Christophe Leroy7d40aff2022-05-09 07:36:07 +0200210#elif defined(CONFIG_PPC64_ELF_ABI_V2)
Naveen N. Rao1b32cd12017-04-19 18:22:27 +0530211 addr = kallsyms_lookup_name(name);
212 if (addr)
213 addr = ppc_function_entry((void *)addr);
214#else
215 addr = kallsyms_lookup_name(name);
216#endif
217 return addr;
218}
219
Torsten Duwe15308662016-03-03 15:26:59 +1100220/*
221 * Some instruction encodings commonly used in dynamic ftracing
222 * and function live patching.
223 */
224
225/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
Christophe Leroy7d40aff2022-05-09 07:36:07 +0200226#ifdef CONFIG_PPC64_ELF_ABI_V2
Torsten Duwe15308662016-03-03 15:26:59 +1100227#define R2_STACK_OFFSET 24
228#else
229#define R2_STACK_OFFSET 40
230#endif
231
Christophe Leroy8804d5b2021-05-20 10:23:02 +0000232#define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
Torsten Duwe15308662016-03-03 15:26:59 +1100233
234/* usually preceded by a mflr r0 */
Christophe Leroy8804d5b2021-05-20 10:23:02 +0000235#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
Torsten Duwe15308662016-03-03 15:26:59 +1100236
Michael Ellermanaaddd3e2008-06-24 11:32:21 +1000237#endif /* _ASM_POWERPC_CODE_PATCHING_H */