riscv, bpf: Use bpf_prog_pack for RV64 bpf trampoline
We used bpf_prog_pack to aggregate bpf programs into huge page to relieve the iTLB pressure on the system. We can apply it to bpf trampoline, as Song had been implemented it in core and x86 [0]. This patch is going to use bpf_prog_pack to RV64 bpf trampoline. Since Song and Puranjay have done a lot of work for bpf_prog_pack on RV64, implementing this function will be easy. Signed-off-by: Pu Lehui <pulehui@huawei.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Tested-by: Björn Töpel <bjorn@rivosinc.com> #riscv Link: https://lore.kernel.org/all/20231206224054.492250-1-song@kernel.org [0] Link: https://lore.kernel.org/bpf/20240622030437.3973492-4-pulehui@huaweicloud.com
This commit is contained in:
committed by
Daniel Borkmann
parent
9f1e16fb1f
commit
2382a405c5
@@ -957,7 +957,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
goto out;
|
||||
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
|
||||
emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
|
||||
im->ip_after_call = ctx->insns + ctx->ninsns;
|
||||
im->ip_after_call = ctx->ro_insns + ctx->ninsns;
|
||||
/* 2 nops reserved for auipc+jalr pair */
|
||||
emit(rv_nop(), ctx);
|
||||
emit(rv_nop(), ctx);
|
||||
@@ -978,7 +978,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
}
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
im->ip_epilogue = ctx->insns + ctx->ninsns;
|
||||
im->ip_epilogue = ctx->ro_insns + ctx->ninsns;
|
||||
emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
|
||||
ret = emit_call((const u64)__bpf_tramp_exit, true, ctx);
|
||||
if (ret)
|
||||
@@ -1041,25 +1041,33 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
|
||||
return ret < 0 ? ret : ninsns_rvoff(ctx.ninsns);
|
||||
}
|
||||
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
void *image_end, const struct btf_func_model *m,
|
||||
void *arch_alloc_bpf_trampoline(unsigned int size)
|
||||
{
|
||||
return bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
|
||||
}
|
||||
|
||||
void arch_free_bpf_trampoline(void *image, unsigned int size)
|
||||
{
|
||||
bpf_prog_pack_free(image, size);
|
||||
}
|
||||
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
|
||||
void *ro_image_end, const struct btf_func_model *m,
|
||||
u32 flags, struct bpf_tramp_links *tlinks,
|
||||
void *func_addr)
|
||||
{
|
||||
int ret;
|
||||
void *image, *res;
|
||||
struct rv_jit_context ctx;
|
||||
u32 size = image_end - image;
|
||||
u32 size = ro_image_end - ro_image;
|
||||
|
||||
image = kvmalloc(size, GFP_KERNEL);
|
||||
if (!image)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx.ninsns = 0;
|
||||
/*
|
||||
* The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the
|
||||
* JITed instructions and later copies it to a RX region (ctx.ro_insns).
|
||||
* It also uses ctx.ro_insns to calculate offsets for jumps etc. As the
|
||||
* trampoline image uses the same memory area for writing and execution,
|
||||
* both ctx.insns and ctx.ro_insns can be set to image.
|
||||
*/
|
||||
ctx.insns = image;
|
||||
ctx.ro_insns = image;
|
||||
ctx.ro_insns = ro_image;
|
||||
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -1069,8 +1077,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
goto out;
|
||||
}
|
||||
|
||||
bpf_flush_icache(image, image_end);
|
||||
res = bpf_arch_text_copy(ro_image, image, size);
|
||||
if (IS_ERR(res)) {
|
||||
ret = PTR_ERR(res);
|
||||
goto out;
|
||||
}
|
||||
|
||||
bpf_flush_icache(ro_image, ro_image_end);
|
||||
out:
|
||||
kvfree(image);
|
||||
return ret < 0 ? ret : size;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user