*ptr++ = 0x91d02005; /* ta 5 */
}
+struct sparc64_jit_data {
+ struct bpf_binary_header *header;
+ u8 *image;
+ struct jit_ctx ctx;
+};
+
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_prog *tmp, *orig_prog = prog;
+ struct sparc64_jit_data *jit_data;
struct bpf_binary_header *header;
bool tmp_blinded = false;
+ bool extra_pass = false;
struct jit_ctx ctx;
u32 image_size;
u8 *image_ptr;
prog = tmp;
}
+ jit_data = prog->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ prog = orig_prog;
+ goto out;
+ }
+ prog->aux->jit_data = jit_data;
+ }
+ if (jit_data->ctx.offset) {
+ ctx = jit_data->ctx;
+ image_ptr = jit_data->image;
+ header = jit_data->header;
+ extra_pass = true;
+ image_size = sizeof(u32) * ctx.idx;
+ goto skip_init_ctx;
+ }
+
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
- goto out;
+ goto out_off;
}
/* Fake pass to detect features used, and get an accurate assessment
}
ctx.image = (u32 *)image_ptr;
-
+skip_init_ctx:
for (pass = 1; pass < 3; pass++) {
ctx.idx = 0;
bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
- bpf_jit_binary_lock_ro(header);
+ if (!prog->is_func || extra_pass) {
+ bpf_jit_binary_lock_ro(header);
+ } else {
+ jit_data->ctx = ctx;
+ jit_data->image = image_ptr;
+ jit_data->header = header;
+ }
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
prog->jited_len = image_size;
+ if (!prog->is_func || extra_pass) {
out_off:
- kfree(ctx.offset);
+ kfree(ctx.offset);
+ kfree(jit_data);
+ prog->aux->jit_data = NULL;
+ }
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?