|
@@ -55,3 +55,60 @@
|
|
|
#define FLAG_NEED_X_RESET (1 << 0)
|
|
|
|
|
|
struct jit_ctx {
|
|
|
+ const struct sk_filter *skf;
|
|
|
+ unsigned idx;
|
|
|
+ unsigned prologue_bytes;
|
|
|
+ int ret0_fp_idx;
|
|
|
+ u32 seen;
|
|
|
+ u32 flags;
|
|
|
+ u32 *offsets;
|
|
|
+ u32 *target;
|
|
|
+#if __LINUX_ARM_ARCH__ < 7
|
|
|
+ u16 epilogue_bytes;
|
|
|
+ u16 imm_count;
|
|
|
+ u32 *imms;
|
|
|
+#endif
|
|
|
+};
|
|
|
+
|
|
|
+int bpf_jit_enable __read_mostly;
|
|
|
+
|
|
|
+static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
|
|
|
+{
|
|
|
+ u8 ret;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ err = skb_copy_bits(skb, offset, &ret, 1);
|
|
|
+
|
|
|
+ return (u64)err << 32 | ret;
|
|
|
+}
|
|
|
+
|
|
|
+static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
|
|
|
+{
|
|
|
+ u16 ret;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ err = skb_copy_bits(skb, offset, &ret, 2);
|
|
|
+
|
|
|
+ return (u64)err << 32 | ntohs(ret);
|
|
|
+}
|
|
|
+
|
|
|
+static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
|
|
|
+{
|
|
|
+ u32 ret;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ err = skb_copy_bits(skb, offset, &ret, 4);
|
|
|
+
|
|
|
+ return (u64)err << 32 | ntohl(ret);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
|
|
|
+ * (where the assembly routines like __aeabi_uidiv could cause problems).
|
|
|
+ */
|
|
|
+static u32 jit_udiv(u32 dividend, u32 divisor)
|
|
|
+{
|
|
|
+ return dividend / divisor;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
|