// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/printk.h>
#include <linux/sizes.h>
#include <linux/types.h>
#include <asm/debug-monitors.h>
#include <asm/errno.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
#define AARCH64_INSN_SF_BIT BIT(31)
#define AARCH64_INSN_N_BIT BIT(22)
#define AARCH64_INSN_LSL_12 BIT(22)
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u32 *maskp, int *shiftp)
{
u32 mask;
int shift;
switch (type) {
case AARCH64_INSN_IMM_26:
mask = BIT(26) - 1;
shift = 0;
break;
case AARCH64_INSN_IMM_19:
mask = BIT(19) - 1;
shift = 5;
break;
case AARCH64_INSN_IMM_16:
mask = BIT(16) - 1;
shift = 5;
break;
case AARCH64_INSN_IMM_14:
mask = BIT(14) - 1;
shift = 5;
break;
case AARCH64_INSN_IMM_12:
mask = BIT(12) - 1;
shift = 10;
break;
case AARCH64_INSN_IMM_9:
mask = BIT(9) - 1;
shift = 12;
break;
case AARCH64_INSN_IMM_7:
mask = BIT(7) - 1;
shift = 15;
break;
case AARCH64_INSN_IMM_6:
case AARCH64_INSN_IMM_S:
mask = BIT(6) - 1;
shift = 10;
break;
case AARCH64_INSN_IMM_R:
mask = BIT(6) - 1;
shift = 16;
break;
case AARCH64_INSN_IMM_N:
mask = 1;
shift = 22;
break;
default:
return -EINVAL;
}
*maskp = mask;
*shiftp = shift;
return 0;
}
#define ADR_IMM_HILOSPLIT 2
#define ADR_IMM_SIZE SZ_2M
#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
#define ADR_IMM_LOSHIFT 29
#define ADR_IMM_HISHIFT 5
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
{
u32 immlo, immhi, mask;
int shift;
switch (type) {
case AARCH64_INSN_IMM_ADR:
shift = 0;
immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
mask = ADR_IMM_SIZE - 1;
break;
default:
if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
pr_err("%s: unknown immediate encoding %d\n", __func__,
type);
return 0;
}
}
return (insn >> shift) & mask;
}
u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm)
{
u32 immlo, immhi, mask;
int shift;
if (insn == AARCH64_BREAK_FAULT)
return AARCH64_BREAK_FAULT;
switch (type) {
case AARCH64_INSN_IMM_ADR:
shift = 0;
immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
imm >>= ADR_IMM_HILOSPLIT;
immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
imm = immlo | immhi;
mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
break;
default:
if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
pr_err("%s: unknown immediate encoding %d\n", __func__,
type);
return AARCH64_BREAK_FAULT;
}
}
/* Update the immediate field. */
insn &= ~(mask << shift);
insn |= (imm & mask) << shift;
return insn;
}
u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
u32 insn)
{
int shift;
switch (type) {
case AARCH64_INSN_REGTYPE_RT:
case AARCH64_INSN_REGTYPE_RD:
shift = 0;
break;
case AARCH64_INSN_REGTYPE_RN:
shift = 5;
break;
case AARCH64_INSN_REGTYPE_RT2:
case AARCH64_INSN_REGTYPE_RA:
shift = 10;
break;
case AARCH64_INSN_REGTYPE_RM:
shift = 16;
break;
default:
pr_err("%s: unknown register type encoding %d\n", __func__,
type);
return 0;
}
return (insn >> shift) & GENMASK(4, 0);
}
static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
u32 insn,
enum aarch64_insn_register reg)
{
int shift;
if (insn == AARCH64_BREAK_FAULT)
return AARCH64_BREAK_FAULT;
if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
pr_err("%s: unknown register encoding %d\n", __func__, reg);
return AARCH64_BREAK_FAULT;
}
switch (type) {
case AARCH64_INSN_REGTYPE_RT:
case AARCH64_INSN_REGTYPE_RD:
shift = 0;
break;
case AARCH64_INSN_REGTYPE_RN:
shift = 5;
break;
case AARCH64_INSN_REGTYPE_RT2:
case AARCH64_INSN_REGTYPE_RA:
shift = 10;
break;
case AARCH64_INSN_REGTYPE_RM:
case AARCH64_INSN_REGTYPE_RS:
shift = 16;
break;
default:
pr_err("%s: unknown register type encoding %d\n", __func__,
type);
return AARCH64_BREAK_FAULT;
}
insn &= ~(GENMASK(4, 0) << shift);
insn |= reg << shift;
return insn;
}
static const u32 aarch64_insn_ldst_size[] = {
[AARCH64_INSN_SIZE_8] = 0,
[AARCH64_INSN_SIZE_16] = 1,
[AARCH64_INSN_SIZE_32] = 2,
[AARCH64_INSN_SIZE_64] = 3,
};
static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
u32 insn)
{
u32 size;
if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
pr_err("%s: unknown size encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
size = aarch64_insn_ldst_size[type];
insn &= ~GENMASK(31, 30);
insn |= size << 30;
return insn;
}
static inline long label_imm_common(unsigned long pc, unsigned long addr,
long range)
{
long offset;
if ((pc & 0x3) || (addr & 0x3)) {
pr_err("%s: A64 instructions must be word aligned\n", __func__);
return range;
}
offset = ((long)addr - (long)pc);
if (offset < -range || offset >= range) {
pr_err("%s: offset out of range\n", __func__);
return range;
}
return offset;
}
u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type)
{
u32 insn;
long offset;
/*
* B/BL support [-128M, 128M) offset
* ARM64 virtual address arrangement guarantees all kernel and module
* texts are within +/-128M.
*/
offset = label_imm_common(pc, addr, SZ_128M);
if (offset >= SZ_128M)
return AARCH64_BREAK_FAULT;
switch (type) {
case AARCH64_INSN_BRANCH_LINK:
insn = aarch64_insn_get_bl_value();
break;
case AARCH64_INSN_BRANCH_NOLINK:
insn = aarch64_insn_get_b_value();
break;
default:
pr_err("%s: unknown branch encoding %d\n", __func__, type);
return
|