main.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /*
  2. * Just-In-Time compiler for BPF filters on 32bit ARM
  3. *
  4. * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/compiler.h>
  12. #include <linux/errno.h>
  13. #include <linux/filter.h>
  14. #include <linux/moduleloader.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/string.h>
  17. #include <linux/slab.h>
  18. #include <linux/if_vlan.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/hwcap.h>
  21. #include "bpf_jit_32.h"
  22. /*
  23. * ABI:
  24. *
  25. * r0 scratch register
  26. * r4 BPF register A
  27. * r5 BPF register X
  28. * r6 pointer to the skb
  29. * r7 skb->data
  30. * r8 skb_headlen(skb)
  31. */
  32. #define r_scratch ARM_R0
  33. /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
  34. #define r_off ARM_R1
  35. #define r_A ARM_R4
  36. #define r_X ARM_R5
  37. #define r_skb ARM_R6
  38. #define r_skb_data ARM_R7
  39. #define r_skb_hl ARM_R8
  40. #define SCRATCH_SP_OFFSET 0
  41. #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
  42. #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
  43. #define SEEN_MEM_WORD(k) (1 << (k))
  44. #define SEEN_X (1 << BPF_MEMWORDS)
  45. #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
  46. #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
  47. #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
  48. #define FLAG_NEED_X_RESET (1 << 0)
  49. struct jit_ctx {
  50. const struct sk_filter *skf;
  51. unsigned idx;
  52. unsigned prologue_bytes;
  53. int ret0_fp_idx;
  54. u32 seen;
  55. u32 flags;
  56. u32 *offsets;
  57. u32 *target;
  58. #if __LINUX_ARM_ARCH__ < 7
  59. u16 epilogue_bytes;
  60. u16 imm_count;
  61. u32 *imms;
  62. #endif
  63. };
  64. int bpf_jit_enable __read_mostly;
  65. static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
  66. {
  67. u8 ret;
  68. int err;
  69. err = skb_copy_bits(skb, offset, &ret, 1);
  70. return (u64)err << 32 | ret;
  71. }
  72. static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
  73. {
  74. u16 ret;
  75. int err;
  76. err = skb_copy_bits(skb, offset, &ret, 2);
  77. return (u64)err << 32 | ntohs(ret);
  78. }
  79. static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
  80. {
  81. u32 ret;
  82. int err;
  83. err = skb_copy_bits(skb, offset, &ret, 4);
  84. return (u64)err << 32 | ntohl(ret);
  85. }
  86. /*
  87. * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
  88. * (where the assembly routines like __aeabi_uidiv could cause problems).
  89. */
  90. static u32 jit_udiv(u32 dividend, u32 divisor)
  91. {
  92. return dividend / divisor;
  93. }
  94. static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
  95. {
  96. if (ctx->target != NULL)
  97. ctx->target[ctx->idx] = inst | (cond << 28);
  98. ctx->idx++;
  99. }
  100. /*
  101. * Emit an instruction that will be executed unconditionally.
  102. */
  103. static inline void emit(u32 inst, struct jit_ctx *ctx)
  104. {
  105. _emit(ARM_COND_AL, inst, ctx);
  106. }
  107. static u16 saved_regs(struct jit_ctx *ctx)
  108. {
  109. u16 ret = 0;
  110. if ((ctx->skf->len > 1) ||
  111. (ctx->skf->insns[0].code == BPF_S_RET_A))
  112. ret |= 1 << r_A;
  113. #ifdef CONFIG_FRAME_POINTER
  114. ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
  115. #else
  116. if (ctx->seen & SEEN_CALL)
  117. ret |= 1 << ARM_LR;
  118. #endif
  119. if (ctx->seen & (SEEN_DATA | SEEN_SKB))
  120. ret |= 1 << r_skb;
  121. if (ctx->seen & SEEN_DATA)
  122. ret |= (1 << r_skb_data) | (1 << r_skb_hl);
  123. if (ctx->seen & SEEN_X)
  124. ret |= 1 << r_X;
  125. return ret;
  126. }
  127. static inline int mem_words_used(struct jit_ctx *ctx)
  128. {
  129. /* yes, we do waste some stack space IF there are "holes" in the set" */
  130. return fls(ctx->seen & SEEN_MEM);
  131. }
  132. static inline bool is_load_to_a(u16 inst)
  133. {
  134. switch (inst) {
  135. case BPF_S_LD_W_LEN:
  136. case BPF_S_LD_W_ABS:
  137. case BPF_S_LD_H_ABS:
  138. case BPF_S_LD_B_ABS:
  139. case BPF_S_ANC_CPU:
  140. case BPF_S_ANC_IFINDEX:
  141. case BPF_S_ANC_MARK:
  142. case BPF_S_ANC_PROTOCOL:
  143. case BPF_S_ANC_RXHASH:
  144. case BPF_S_ANC_VLAN_TAG:
  145. case BPF_S_ANC_VLAN_TAG_PRESENT:
  146. case BPF_S_ANC_QUEUE:
  147. return true;
  148. default:
  149. return false;
  150. }
  151. }
  152. static void build_prologue(struct jit_ctx *ctx)
  153. {
  154. u16 reg_set = saved_regs(ctx);
  155. u16 first_inst = ctx->skf->insns[0].code;
  156. u16 off;
  157. #ifdef CONFIG_FRAME_POINTER
  158. emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
  159. emit(ARM_PUSH(reg_set), ctx);
  160. emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
  161. #else
  162. if (reg_set)
  163. emit(ARM_PUSH(reg_set), ctx);
  164. #endif
  165. if (ctx->seen & (SEEN_DATA | SEEN_SKB))
  166. emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
  167. if (ctx->seen & SEEN_DATA) {
  168. off = offsetof(struct sk_buff, data);
  169. emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
  170. /* headlen = len - data_len */
  171. off = offsetof(struct sk_buff, len);
  172. emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
  173. off = offsetof(struct sk_buff, data_len);
  174. emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
  175. emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
  176. }
  177. if (ctx->flags & FLAG_NEED_X_RESET)
  178. emit(ARM_MOV_I(r_X, 0), ctx);
  179. /* do not leak kernel data to userspace */
  180. if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
  181. emit(ARM_MOV_I(r_A, 0), ctx);
  182. /* stack space for the BPF_MEM words */
  183. if (ctx->seen & SEEN_MEM)
  184. emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
  185. }
  186. static void build_epilogue(struct jit_ctx *ctx)
  187. {
  188. u16 reg_set = saved_regs(ctx);
  189. if (ctx->seen & SEEN_MEM)
  190. emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
  191. reg_set &= ~(1 << ARM_LR);
  192. #ifdef CONFIG_FRAME_POINTER
  193. /* the first instruction of the prologue was: mov ip, sp */
  194. reg_set &= ~(1 << ARM_IP);
  195. reg_set |= (1 << ARM_SP);
  196. emit(ARM_LDM(ARM_SP, reg_set), ctx);
  197. #else
  198. if (reg_set) {
  199. if (ctx->seen & SEEN_CALL)
  200. reg_set |= 1 << ARM_PC;
  201. emit(ARM_POP(reg_set), ctx);
  202. }
  203. if (!(ctx->seen & SEEN_CALL))
  204. emit(ARM_BX(ARM_LR), ctx);
  205. #endif
  206. }
  207. static int16_t imm8m(u32 x)
  208. {
  209. u32 rot;
  210. for (rot = 0; rot < 16; rot++)
  211. if ((x & ~ror32(0xff, 2 * rot)) == 0)
  212. return rol32(x, 2 * rot) | (rot << 8);
  213. return -1;
  214. }
  215. #if __LINUX_ARM_ARCH__ < 7
  216. static u16 imm_offset(u32 k, struct jit_ctx *ctx)
  217. {
  218. unsigned i = 0, offset;
  219. u16 imm;
  220. /* on the "fake" run we just count them (duplicates included) */
  221. if (ctx->target == NULL) {
  222. ctx->imm_count++;
  223. return 0;
  224. }
  225. while ((i < ctx->imm_count) && ctx->imms[i]) {
  226. if (ctx->imms[i] == k)
  227. break;
  228. i++;
  229. }
  230. if (ctx->imms[i] == 0)
  231. ctx->imms[i] = k;
  232. /* constants go just after the epilogue */
  233. offset = ctx->offsets[ctx->skf->len];
  234. offset += ctx->prologue_bytes;
  235. offset += ctx->epilogue_bytes;
  236. offset += i * 4;
  237. ctx->target[offset / 4] = k;
  238. /* PC in ARM mode == address of the instruction + 8 */
  239. imm = offset - (8 + ctx->idx * 4);
  240. return imm;
  241. }
  242. #endif /* __LINUX_ARM_ARCH__ */
  243. /*
  244. * Move an immediate that's not an imm8m to a core register.
  245. */
  246. static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
  247. {
  248. #if __LINUX_ARM_ARCH__ < 7
  249. emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
  250. #else
  251. emit(ARM_MOVW(rd, val & 0xffff), ctx);
  252. if (val > 0xffff)
  253. emit(ARM_MOVT(rd, val >> 16), ctx);
  254. #endif
  255. }
  256. static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
  257. {
  258. int imm12 = imm8m(val);
  259. if (imm12 >= 0)
  260. emit(ARM_MOV_I(rd, imm12), ctx);
  261. else
  262. emit_mov_i_no8m(rd, val, ctx);
  263. }
  264. #if __LINUX_ARM_ARCH__ < 6
  265. static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  266. {
  267. _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
  268. _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
  269. _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
  270. _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
  271. _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
  272. _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
  273. _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
  274. _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
  275. }
  276. static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  277. {
  278. _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
  279. _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
  280. _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
  281. }
  282. static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
  283. {
  284. emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx);
  285. emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx);
  286. emit(ARM_LSL_I(r_dst, r_dst, 8), ctx);
  287. emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
  288. }
  289. #else /* ARMv6+ */
  290. static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  291. {
  292. _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
  293. #ifdef __LITTLE_ENDIAN
  294. _emit(cond, ARM_REV(r_res, r_res), ctx);
  295. #endif
  296. }
  297. static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  298. {
  299. _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
  300. #ifdef __LITTLE_ENDIAN
  301. _emit(cond, ARM_REV16(r_res, r_res), ctx);
  302. #endif
  303. }
  304. static inline void emit_swap16(u8 r_dst __maybe_unused,
  305. u8 r_src __maybe_unused,