|
@@ -738,3 +738,202 @@ cmp_x:
|
|
|
case BPF_S_JMP_JSET_K:
|
|
|
/* pc += (A & K) ? pc->jt : pc->jf */
|
|
|
condt = ARM_COND_NE;
|
|
|
+ /* not set iff all zeroes iff Z==1 iff EQ */
|
|
|
+
|
|
|
+ imm12 = imm8m(k);
|
|
|
+ if (imm12 < 0) {
|
|
|
+ emit_mov_i_no8m(r_scratch, k, ctx);
|
|
|
+ emit(ARM_TST_R(r_A, r_scratch), ctx);
|
|
|
+ } else {
|
|
|
+ emit(ARM_TST_I(r_A, imm12), ctx);
|
|
|
+ }
|
|
|
+ goto cond_jump;
|
|
|
+ case BPF_S_JMP_JSET_X:
|
|
|
+ /* pc += (A & X) ? pc->jt : pc->jf */
|
|
|
+ update_on_xread(ctx);
|
|
|
+ condt = ARM_COND_NE;
|
|
|
+ emit(ARM_TST_R(r_A, r_X), ctx);
|
|
|
+ goto cond_jump;
|
|
|
+ case BPF_S_RET_A:
|
|
|
+ emit(ARM_MOV_R(ARM_R0, r_A), ctx);
|
|
|
+ goto b_epilogue;
|
|
|
+ case BPF_S_RET_K:
|
|
|
+ if ((k == 0) && (ctx->ret0_fp_idx < 0))
|
|
|
+ ctx->ret0_fp_idx = i;
|
|
|
+ emit_mov_i(ARM_R0, k, ctx);
|
|
|
+b_epilogue:
|
|
|
+ if (i != ctx->skf->len - 1)
|
|
|
+ emit(ARM_B(b_imm(prog->len, ctx)), ctx);
|
|
|
+ break;
|
|
|
+ case BPF_S_MISC_TAX:
|
|
|
+ /* X = A */
|
|
|
+ ctx->seen |= SEEN_X;
|
|
|
+ emit(ARM_MOV_R(r_X, r_A), ctx);
|
|
|
+ break;
|
|
|
+ case BPF_S_MISC_TXA:
|
|
|
+ /* A = X */
|
|
|
+ update_on_xread(ctx);
|
|
|
+ emit(ARM_MOV_R(r_A, r_X), ctx);
|
|
|
+ break;
|
|
|
+ case BPF_S_ANC_PROTOCOL:
|
|
|
+ /* A = ntohs(skb->protocol) */
|
|
|
+ ctx->seen |= SEEN_SKB;
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
|
|
|
+ protocol) != 2);
|
|
|
+ off = offsetof(struct sk_buff, protocol);
|
|
|
+ emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
|
|
|
+ emit_swap16(r_A, r_scratch, ctx);
|
|
|
+ break;
|
|
|
+ case BPF_S_ANC_CPU:
|
|
|
+ /* r_scratch = current_thread_info() */
|
|
|
+ OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
|
|
|
+ /* A = current_thread_info()->cpu */
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
|
|
|
+ off = offsetof(struct thread_info, cpu);
|
|
|
+ emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
|
|
|
+ break;
|
|
|
+ case BPF_S_ANC_IFINDEX:
|
|
|
+ /* A = skb->dev->ifindex */
|
|
|
+ ctx->seen |= SEEN_SKB;
|
|
|
+ off = offsetof(struct sk_buff, dev);
|
|
|
+ emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
|
|
|
+
|
|
|
+ emit(ARM_CMP_I(r_scratch, 0), ctx);
|
|
|
+ emit_err_ret(ARM_COND_EQ, ctx);
|
|
|
+
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
|
|
|
+ ifindex) != 4);
|
|
|
+ off = offsetof(struct net_device, ifindex);
|
|
|
+ emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
|
|
|
+ break;
|
|
|
+ case BPF_S_ANC_MARK:
|
|
|
+ ctx->seen |= SEEN_SKB;
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
|
|
|
+ off = offsetof(struct sk_buff, mark);
|
|
|
+ emit(ARM_LDR_I(r_A, r_skb, off), ctx);
|
|
|
+ break;
|
|
|
+ case BPF_S_ANC_RXHASH:
|
|
|
+ ctx->seen |= SEEN_SKB;
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
|
|
|
+ off = offsetof(struct sk_buff, rxhash);
|
|
|
+ emit(ARM_LDR_I(r_A, r_skb, off), ctx);
|
|
|
+ break;
|
|
|
+ case BPF_S_ANC_VLAN_TAG:
|
|
|
+ case BPF_S_ANC_VLAN_TAG_PRESENT:
|
|
|
+ ctx->seen |= SEEN_SKB;
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
|
|
|
+ off = offsetof(struct sk_buff, vlan_tci);
|
|
|
+ emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
|
|
|
+ if (inst->code == BPF_S_ANC_VLAN_TAG)
|
|
|
+ OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
|
|
|
+ else
|
|
|
+ OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
|
|
|
+ break;
|
|
|
+ case BPF_S_ANC_QUEUE:
|
|
|
+ ctx->seen |= SEEN_SKB;
|
|
|
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
|
|
|
+ queue_mapping) != 2);
|
|
|
+ BUILD_BUG_ON(offsetof(struct sk_buff,
|
|
|
+ queue_mapping) > 0xff);
|
|
|
+ off = offsetof(struct sk_buff, queue_mapping);
|
|
|
+ emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* compute offsets only during the first pass */
|
|
|
+ if (ctx->target == NULL)
|
|
|
+ ctx->offsets[i] = ctx->idx * 4;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+void bpf_jit_compile(struct sk_filter *fp)
|
|
|
+{
|
|
|
+ struct jit_ctx ctx;
|
|
|
+ unsigned tmp_idx;
|
|
|
+ unsigned alloc_size;
|
|
|
+
|
|
|
+ if (!bpf_jit_enable)
|
|
|
+ return;
|
|
|
+
|
|
|
+ memset(&ctx, 0, sizeof(ctx));
|
|
|
+ ctx.skf = fp;
|
|
|
+ ctx.ret0_fp_idx = -1;
|
|
|
+
|
|
|
+ ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
|
|
|
+ if (ctx.offsets == NULL)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* fake pass to fill in the ctx->seen */
|
|
|
+ if (unlikely(build_body(&ctx)))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ tmp_idx = ctx.idx;
|
|
|
+ build_prologue(&ctx);
|
|
|
+ ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
|
|
|
+
|
|
|
+#if __LINUX_ARM_ARCH__ < 7
|
|
|
+ tmp_idx = ctx.idx;
|
|
|
+ build_epilogue(&ctx);
|
|
|
+ ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
|
|
|
+
|
|
|
+ ctx.idx += ctx.imm_count;
|
|
|
+ if (ctx.imm_count) {
|
|
|
+ ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
|
|
|
+ if (ctx.imms == NULL)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+#else
|
|
|
+ /* there's nothing after the epilogue on ARMv7 */
|
|
|
+ build_epilogue(&ctx);
|
|
|
+#endif
|
|
|
+
|
|
|
+ alloc_size = 4 * ctx.idx;
|
|
|
+ ctx.target = module_alloc(max(sizeof(struct work_struct),
|
|
|
+ alloc_size));
|
|
|
+ if (unlikely(ctx.target == NULL))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ ctx.idx = 0;
|
|
|
+ build_prologue(&ctx);
|
|
|
+ build_body(&ctx);
|
|
|
+ build_epilogue(&ctx);
|
|
|
+
|
|
|
+ flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
|
|
|
+
|
|
|
+#if __LINUX_ARM_ARCH__ < 7
|
|
|
+ if (ctx.imm_count)
|
|
|
+ kfree(ctx.imms);
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (bpf_jit_enable > 1)
|
|
|
+ print_hex_dump(KERN_INFO, "BPF JIT code: ",
|
|
|
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
|
|
|
+ alloc_size, false);
|
|
|
+
|
|
|
+ fp->bpf_func = (void *)ctx.target;
|
|
|
+out:
|
|
|
+ kfree(ctx.offsets);
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
+static void bpf_jit_free_worker(struct work_struct *work)
|
|
|
+{
|
|
|
+ module_free(NULL, work);
|
|
|
+}
|
|
|
+
|
|
|
+void bpf_jit_free(struct sk_filter *fp)
|
|
|
+{
|
|
|
+ struct work_struct *work;
|
|
|
+
|
|
|
+ if (fp->bpf_func != sk_run_filter) {
|
|
|
+ work = (struct work_struct *)fp->bpf_func;
|
|
|
+
|
|
|
+ INIT_WORK(work, bpf_jit_free_worker);
|
|
|
+ schedule_work(work);
|
|
|
+ }
|
|
|
+}
|