mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 00:04:15 +08:00
powerpc/bpf: use bpf_jit_binary_pack_[alloc|finalize|free]
commit90d862f370
upstream. Use bpf_jit_binary_pack_alloc in powerpc jit. The jit engine first writes the program to the rw buffer. When the jit is done, the program is copied to the final location with bpf_jit_binary_pack_finalize. With multiple jit_subprogs, bpf_jit_free is called on some subprograms that haven't got bpf_jit_binary_pack_finalize() yet. Implement custom bpf_jit_free() like in commit1d5f82d9dd
("bpf, x86: fix freeing of not-finalized bpf_prog_pack") to call bpf_jit_binary_pack_finalize(), if necessary. As bpf_flush_icache() is not needed anymore, remove it. Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> Acked-by: Song Liu <song@kernel.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20231020141358.643575-6-hbathini@linux.ibm.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
1033919400
commit
f99feda568
@ -36,9 +36,6 @@
|
||||
EMIT(PPC_RAW_BRANCH(offset)); \
|
||||
} while (0)
|
||||
|
||||
/* bl (unconditional 'branch' with link) */
|
||||
#define PPC_BL(dest) EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx)))
|
||||
|
||||
/* "cond" here covers BO:BI fields. */
|
||||
#define PPC_BCC_SHORT(cond, dest) \
|
||||
do { \
|
||||
@ -147,12 +144,6 @@ struct codegen_context {
|
||||
#define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */
|
||||
#endif
|
||||
|
||||
static inline void bpf_flush_icache(void *start, void *end)
|
||||
{
|
||||
smp_wmb(); /* smp write barrier */
|
||||
flush_icache_range((unsigned long)start, (unsigned long)end);
|
||||
}
|
||||
|
||||
static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
|
||||
{
|
||||
return ctx->seen & (1 << (31 - i));
|
||||
@ -169,16 +160,17 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
|
||||
}
|
||||
|
||||
void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
|
||||
int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
|
||||
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
|
||||
int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func);
|
||||
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
|
||||
u32 *addrs, int pass, bool extra_pass);
|
||||
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
|
||||
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
|
||||
void bpf_jit_realloc_regs(struct codegen_context *ctx);
|
||||
int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr);
|
||||
|
||||
int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
|
||||
int insn_idx, int jmp_off, int dst_reg);
|
||||
int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
|
||||
struct codegen_context *ctx, int insn_idx,
|
||||
int jmp_off, int dst_reg);
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -40,9 +40,12 @@ int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg,
|
||||
}
|
||||
|
||||
struct powerpc_jit_data {
|
||||
struct bpf_binary_header *header;
|
||||
/* address of rw header */
|
||||
struct bpf_binary_header *hdr;
|
||||
/* address of ro final header */
|
||||
struct bpf_binary_header *fhdr;
|
||||
u32 *addrs;
|
||||
u8 *image;
|
||||
u8 *fimage;
|
||||
u32 proglen;
|
||||
struct codegen_context ctx;
|
||||
};
|
||||
@ -63,11 +66,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
struct codegen_context cgctx;
|
||||
int pass;
|
||||
int flen;
|
||||
struct bpf_binary_header *bpf_hdr;
|
||||
struct bpf_binary_header *fhdr = NULL;
|
||||
struct bpf_binary_header *hdr = NULL;
|
||||
struct bpf_prog *org_fp = fp;
|
||||
struct bpf_prog *tmp_fp;
|
||||
bool bpf_blinded = false;
|
||||
bool extra_pass = false;
|
||||
u8 *fimage = NULL;
|
||||
u32 *fcode_base;
|
||||
u32 extable_len;
|
||||
u32 fixup_len;
|
||||
|
||||
@ -97,9 +103,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
addrs = jit_data->addrs;
|
||||
if (addrs) {
|
||||
cgctx = jit_data->ctx;
|
||||
image = jit_data->image;
|
||||
bpf_hdr = jit_data->header;
|
||||
/*
|
||||
* JIT compiled to a writable location (image/code_base) first.
|
||||
* It is then moved to the readonly final location (fimage/fcode_base)
|
||||
* using instruction patching.
|
||||
*/
|
||||
fimage = jit_data->fimage;
|
||||
fhdr = jit_data->fhdr;
|
||||
proglen = jit_data->proglen;
|
||||
hdr = jit_data->hdr;
|
||||
image = (void *)hdr + ((void *)fimage - (void *)fhdr);
|
||||
extra_pass = true;
|
||||
/* During extra pass, ensure index is reset before repopulating extable entries */
|
||||
cgctx.exentry_idx = 0;
|
||||
@ -119,7 +132,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
|
||||
|
||||
/* Scouting faux-generate pass 0 */
|
||||
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
|
||||
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
|
||||
/* We hit something illegal or unsupported. */
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
@ -134,7 +147,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
*/
|
||||
if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
|
||||
cgctx.idx = 0;
|
||||
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
|
||||
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
@ -156,17 +169,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
proglen = cgctx.idx * 4;
|
||||
alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
|
||||
|
||||
bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
|
||||
if (!bpf_hdr) {
|
||||
fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
|
||||
bpf_jit_fill_ill_insns);
|
||||
if (!fhdr) {
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
|
||||
if (extable_len)
|
||||
fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
|
||||
fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
|
||||
|
||||
skip_init_ctx:
|
||||
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
|
||||
fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE);
|
||||
|
||||
/* Code generation passes 1-2 */
|
||||
for (pass = 1; pass < 3; pass++) {
|
||||
@ -174,8 +189,10 @@ skip_init_ctx:
|
||||
cgctx.idx = 0;
|
||||
cgctx.alt_exit_addr = 0;
|
||||
bpf_jit_build_prologue(code_base, &cgctx);
|
||||
if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass, extra_pass)) {
|
||||
bpf_jit_binary_free(bpf_hdr);
|
||||
if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass,
|
||||
extra_pass)) {
|
||||
bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
|
||||
bpf_jit_binary_pack_free(fhdr, hdr);
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
@ -195,17 +212,19 @@ skip_init_ctx:
|
||||
|
||||
#ifdef CONFIG_PPC64_ELF_ABI_V1
|
||||
/* Function descriptor nastiness: Address + TOC */
|
||||
((u64 *)image)[0] = (u64)code_base;
|
||||
((u64 *)image)[0] = (u64)fcode_base;
|
||||
((u64 *)image)[1] = local_paca->kernel_toc;
|
||||
#endif
|
||||
|
||||
fp->bpf_func = (void *)image;
|
||||
fp->bpf_func = (void *)fimage;
|
||||
fp->jited = 1;
|
||||
fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
|
||||
|
||||
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
|
||||
if (!fp->is_func || extra_pass) {
|
||||
bpf_jit_binary_lock_ro(bpf_hdr);
|
||||
if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) {
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
bpf_prog_fill_jited_linfo(fp, addrs);
|
||||
out_addrs:
|
||||
kfree(addrs);
|
||||
@ -215,8 +234,9 @@ out_addrs:
|
||||
jit_data->addrs = addrs;
|
||||
jit_data->ctx = cgctx;
|
||||
jit_data->proglen = proglen;
|
||||
jit_data->image = image;
|
||||
jit_data->header = bpf_hdr;
|
||||
jit_data->fimage = fimage;
|
||||
jit_data->fhdr = fhdr;
|
||||
jit_data->hdr = hdr;
|
||||
}
|
||||
|
||||
out:
|
||||
@ -230,12 +250,13 @@ out:
|
||||
* The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
|
||||
* this function, as this only applies to BPF_PROBE_MEM, for now.
|
||||
*/
|
||||
int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
|
||||
int insn_idx, int jmp_off, int dst_reg)
|
||||
int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
|
||||
struct codegen_context *ctx, int insn_idx, int jmp_off,
|
||||
int dst_reg)
|
||||
{
|
||||
off_t offset;
|
||||
unsigned long pc;
|
||||
struct exception_table_entry *ex;
|
||||
struct exception_table_entry *ex, *ex_entry;
|
||||
u32 *fixup;
|
||||
|
||||
/* Populate extable entries only in the last pass */
|
||||
@ -246,9 +267,16 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct code
|
||||
WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Program is first written to image before copying to the
|
||||
* final location (fimage). Accordingly, update in the image first.
|
||||
* As all offsets used are relative, copying as is to the
|
||||
* final location should be alright.
|
||||
*/
|
||||
pc = (unsigned long)&image[insn_idx];
|
||||
ex = (void *)fp->aux->extable - (void *)fimage + (void *)image;
|
||||
|
||||
fixup = (void *)fp->aux->extable -
|
||||
fixup = (void *)ex -
|
||||
(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
|
||||
(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
|
||||
|
||||
@ -259,18 +287,42 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct code
|
||||
fixup[BPF_FIXUP_LEN - 1] =
|
||||
PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
|
||||
|
||||
ex = &fp->aux->extable[ctx->exentry_idx];
|
||||
ex_entry = &ex[ctx->exentry_idx];
|
||||
|
||||
offset = pc - (long)&ex->insn;
|
||||
offset = pc - (long)&ex_entry->insn;
|
||||
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
|
||||
return -ERANGE;
|
||||
ex->insn = offset;
|
||||
ex_entry->insn = offset;
|
||||
|
||||
offset = (long)fixup - (long)&ex->fixup;
|
||||
offset = (long)fixup - (long)&ex_entry->fixup;
|
||||
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
|
||||
return -ERANGE;
|
||||
ex->fixup = offset;
|
||||
ex_entry->fixup = offset;
|
||||
|
||||
ctx->exentry_idx++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
if (fp->jited) {
|
||||
struct powerpc_jit_data *jit_data = fp->aux->jit_data;
|
||||
struct bpf_binary_header *hdr;
|
||||
|
||||
/*
|
||||
* If we fail the final pass of JIT (from jit_subprogs),
|
||||
* the program may not be finalized yet. Call finalize here
|
||||
* before freeing it.
|
||||
*/
|
||||
if (jit_data) {
|
||||
bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr);
|
||||
kvfree(jit_data->addrs);
|
||||
kfree(jit_data);
|
||||
}
|
||||
hdr = bpf_jit_binary_pack_hdr(fp);
|
||||
bpf_jit_binary_pack_free(hdr, NULL);
|
||||
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
|
||||
}
|
||||
|
||||
bpf_prog_unlock_free(fp);
|
||||
}
|
||||
|
@ -200,12 +200,13 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
|
||||
EMIT(PPC_RAW_BLR());
|
||||
}
|
||||
|
||||
int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
|
||||
/* Relative offset needs to be calculated based on final image location */
|
||||
int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
|
||||
{
|
||||
s32 rel = (s32)func - (s32)(image + ctx->idx);
|
||||
s32 rel = (s32)func - (s32)(fimage + ctx->idx);
|
||||
|
||||
if (image && rel < 0x2000000 && rel >= -0x2000000) {
|
||||
PPC_BL(func);
|
||||
EMIT(PPC_RAW_BL(rel));
|
||||
} else {
|
||||
/* Load function address into r0 */
|
||||
EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
|
||||
@ -278,7 +279,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
|
||||
}
|
||||
|
||||
/* Assemble the body code between the prologue & epilogue */
|
||||
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
|
||||
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
|
||||
u32 *addrs, int pass, bool extra_pass)
|
||||
{
|
||||
const struct bpf_insn *insn = fp->insnsi;
|
||||
@ -1009,7 +1010,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||
jmp_off += 4;
|
||||
}
|
||||
|
||||
ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx,
|
||||
ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx,
|
||||
jmp_off, dst_reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1065,7 +1066,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
||||
EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
|
||||
}
|
||||
|
||||
ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
|
||||
ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -240,7 +240,7 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
|
||||
int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
|
||||
{
|
||||
unsigned int i, ctx_idx = ctx->idx;
|
||||
|
||||
@ -361,7 +361,7 @@ asm (
|
||||
);
|
||||
|
||||
/* Assemble the body code between the prologue & epilogue */
|
||||
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
|
||||
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
|
||||
u32 *addrs, int pass, bool extra_pass)
|
||||
{
|
||||
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
|
||||
@ -952,8 +952,8 @@ emit_clear:
|
||||
addrs[++i] = ctx->idx * 4;
|
||||
|
||||
if (BPF_MODE(code) == BPF_PROBE_MEM) {
|
||||
ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
|
||||
4, dst_reg);
|
||||
ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
|
||||
ctx->idx - 1, 4, dst_reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1007,7 +1007,7 @@ emit_clear:
|
||||
if (func_addr_fixed)
|
||||
ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
|
||||
else
|
||||
ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
|
||||
ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
Loading…
Reference in New Issue
Block a user