// SPDX-License-Identifier: GPL-2.0-only
/*
* eBPF JIT compiler for PPC32
*
* Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu>
* CS GROUP France
*
* Based on PPC64 eBPF JIT compiler by Naveen N. Rao
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
#include <asm/asm-compat.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
#include <linux/bpf.h>
#include "bpf_jit.h"
/*
* Stack layout:
*
* [ prev sp ] <-------------
* [ nv gpr save area ] 16 * 4 |
* fp (r31) --> [ ebpf stack space ] upto 512 |
* [ frame header ] 16 |
* sp (r1) ---> [ stack pointer ] --------------
*/
/* for gpr non volatile registers r17 to r31 (14) + tail call */
#define BPF_PPC_STACK_SAVE (15 * 4 + 4)
/* stack frame, ensure this is quadword aligned */
#define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
#define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
/* PPC NVR range -- update this if we ever use NVRs below r17 */
#define BPF_PPC_NVR_MIN _R17
#define BPF_PPC_TC _R16
/* BPF register usage */
#define TMP_REG (MAX_BPF_JIT_REG + 0)
/* BPF to ppc register mappings */
void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
{
/* function return value */
ctx->b2p[BPF_REG_0] = _R12;
/* function arguments */
ctx->b2p[BPF_REG_1] = _R4;
ctx->b2p[BPF_REG_2] = _R6;
ctx->b2p[BPF_REG_3] = _R8;
ctx->b2p[BPF_REG_4] = _R10;
ctx->b2p[BPF_REG_5] = _R22;
/* non volatile registers */
ctx->b2p[BPF_REG_6] = _R24;
ctx->b2p[BPF_REG_7] = _R26;
ctx->b2p[BPF_REG_8] = _R28;
ctx->b2p[BPF_REG_9] = _R30;
/* frame pointer aka BPF_REG_10 */
ctx->b2p[BPF_REG_FP] = _R18;
/* eBPF jit internal registers */
ctx->b2p[BPF_REG_AX] = _R20;
ctx->b2p[TMP_REG] = _R31; /* 32 bits */
}
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
{
if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
/* Use the hole we have left for alignment */
return BPF_PPC_STACKFRAME(ctx) - 4;
}
#define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
#define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */
#define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
{
/*
* We only need a stack frame if:
* - we call other functions (kernel helpers), or
* - we use non volatile registers, or
* - we use tail call counter
* - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP
*/
return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
}
void bpf_jit_realloc_regs(struct codegen_context *ctx)
{
unsigned int nvreg_mask;
if (ctx->seen & SEEN_FUNC)
nvreg_mask = SEEN_NVREG_TEMP_MASK;
else
nvreg_mask = SEEN_NVREG_FULL_MASK;
while (ctx->seen & nvreg_mask &&
(ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
int old =