preliminaryDataProcessing.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * linux/arch/arm/mm/alignment.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. * Modifications for ARM processor (c) 1995-2001 Russell King
  6. * Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc.
  7. * - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation.
  8. * Copyright (C) 1996, Cygnus Software Technologies Ltd.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/moduleparam.h>
  15. #include <linux/compiler.h>
  16. #include <linux/kernel.h>
  17. #include <linux/errno.h>
  18. #include <linux/string.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/init.h>
  22. #include <linux/sched.h>
  23. #include <linux/uaccess.h>
  24. #include <asm/cp15.h>
  25. #include <asm/system_info.h>
  26. #include <asm/unaligned.h>
  27. #include "fault.h"
  28. /*
  29. * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
  30. * /proc/sys/debug/alignment, modified and integrated into
  31. * Linux 2.1 by Russell King
  32. *
  33. * Speed optimisations and better fault handling by Russell King.
  34. *
  35. * *** NOTE ***
  36. * This code is not portable to processors with late data abort handling.
  37. */
  38. #define CODING_BITS(i) (i & 0x0e000000)
  39. #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
  40. #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
  41. #define LDST_U_BIT(i) (i & (1 << 23)) /* Add offset */
  42. #define LDST_W_BIT(i) (i & (1 << 21)) /* Writeback */
  43. #define LDST_L_BIT(i) (i & (1 << 20)) /* Load */
  44. #define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
  45. #define LDSTHD_I_BIT(i) (i & (1 << 22)) /* double/half-word immed */
  46. #define LDM_S_BIT(i) (i & (1 << 22)) /* write CPSR from SPSR */
  47. #define RN_BITS(i) ((i >> 16) & 15) /* Rn */
  48. #define RD_BITS(i) ((i >> 12) & 15) /* Rd */
  49. #define RM_BITS(i) (i & 15) /* Rm */
  50. #define REGMASK_BITS(i) (i & 0xffff)
  51. #define OFFSET_BITS(i) (i & 0x0fff)
  52. #define IS_SHIFT(i) (i & 0x0ff0)
  53. #define SHIFT_BITS(i) ((i >> 7) & 0x1f)
  54. #define SHIFT_TYPE(i) (i & 0x60)
  55. #define SHIFT_LSL 0x00
  56. #define SHIFT_LSR 0x20
  57. #define SHIFT_ASR 0x40
  58. #define SHIFT_RORRRX 0x60
  59. #define BAD_INSTR 0xdeadc0de
  60. /* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
  61. #define IS_T32(hi16) \
  62. (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
  63. static unsigned long ai_user;
  64. static unsigned long ai_sys;
  65. static unsigned long ai_skipped;
  66. static unsigned long ai_half;
  67. static unsigned long ai_word;
  68. static unsigned long ai_dword;
  69. static unsigned long ai_multi;
  70. static int ai_usermode;
  71. core_param(alignment, ai_usermode, int, 0600);
  72. #define UM_WARN (1 << 0)
  73. #define UM_FIXUP (1 << 1)
  74. #define UM_SIGNAL (1 << 2)
  75. /* Return true if and only if the ARMv6 unaligned access model is in use. */
  76. static bool cpu_is_v6_unaligned(void)
  77. {
  78. return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U);
  79. }
  80. static int safe_usermode(int new_usermode, bool warn)
  81. {
  82. /*
  83. * ARMv6 and later CPUs can perform unaligned accesses for
  84. * most single load and store instructions up to word size.
  85. * LDM, STM, LDRD and STRD still need to be handled.
  86. *
  87. * Ignoring the alignment fault is not an option on these
  88. * CPUs since we spin re-faulting the instruction without
  89. * making any progress.
  90. */
  91. if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
  92. new_usermode |= UM_FIXUP;
  93. if (warn)
  94. printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
  95. }
  96. return new_usermode;
  97. }
  98. #ifdef CONFIG_PROC_FS
  99. static const char *usermode_action[] = {
  100. "ignored",
  101. "warn",
  102. "fixup",
  103. "fixup+warn",
  104. "signal",
  105. "signal+warn"
  106. };
  107. static int alignment_proc_show(struct seq_file *m, void *v)
  108. {
  109. seq_printf(m, "User:\t\t%lu\n", ai_user);
  110. seq_printf(m, "System:\t\t%lu\n", ai_sys);
  111. seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
  112. seq_printf(m, "Half:\t\t%lu\n", ai_half);
  113. seq_printf(m, "Word:\t\t%lu\n", ai_word);
  114. if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
  115. seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
  116. seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
  117. seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
  118. usermode_action[ai_usermode]);
  119. return 0;
  120. }
  121. static int alignment_proc_open(struct inode *inode, struct file *file)
  122. {
  123. return single_open(file, alignment_proc_show, NULL);
  124. }
  125. static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
  126. size_t count, loff_t *pos)
  127. {
  128. char mode;
  129. if (count > 0) {
  130. if (get_user(mode, buffer))
  131. return -EFAULT;
  132. if (mode >= '0' && mode <= '5')
  133. ai_usermode = safe_usermode(mode - '0', true);
  134. }
  135. return count;
  136. }
  137. static const struct file_operations alignment_proc_fops = {
  138. .open = alignment_proc_open,
  139. .read = seq_read,
  140. .llseek = seq_lseek,
  141. .release = single_release,
  142. .write = alignment_proc_write,
  143. };
  144. #endif /* CONFIG_PROC_FS */
  145. union offset_union {
  146. unsigned long un;
  147. signed long sn;
  148. };
  149. #define TYPE_ERROR 0
  150. #define TYPE_FAULT 1
  151. #define TYPE_LDST 2
  152. #define TYPE_DONE 3
  153. #ifdef __ARMEB__
  154. #define BE 1
  155. #define FIRST_BYTE_16 "mov %1, %1, ror #8\n"
  156. #define FIRST_BYTE_32 "mov %1, %1, ror #24\n"
  157. #define NEXT_BYTE "ror #24"
  158. #else
  159. #define BE 0
  160. #define FIRST_BYTE_16
  161. #define FIRST_BYTE_32
  162. #define NEXT_BYTE "lsr #8"
  163. #endif
  164. #define __get8_unaligned_check(ins,val,addr,err) \
  165. __asm__( \
  166. ARM( "1: "ins" %1, [%2], #1\n" ) \
  167. THUMB( "1: "ins" %1, [%2]\n" ) \
  168. THUMB( " add %2, %2, #1\n" ) \
  169. "2:\n" \
  170. " .pushsection .fixup,\"ax\"\n" \
  171. " .align 2\n" \
  172. "3: mov %0, #1\n" \
  173. " b 2b\n" \
  174. " .popsection\n" \
  175. " .pushsection __ex_table,\"a\"\n" \
  176. " .align 3\n" \
  177. " .long 1b, 3b\n" \
  178. " .popsection\n" \
  179. : "=r" (err), "=&r" (val), "=r" (addr) \
  180. : "0" (err), "2" (addr))
  181. #define __get16_unaligned_check(ins,val,addr) \
  182. do { \
  183. unsigned int err = 0, v, a = addr; \
  184. __get8_unaligned_check(ins,v,a,err); \
  185. val = v << ((BE) ? 8 : 0); \
  186. __get8_unaligned_check(ins,v,a,err); \
  187. val |= v << ((BE) ? 0 : 8); \
  188. if (err) \
  189. goto fault; \
  190. } while (0)
  191. #define get16_unaligned_check(val,addr) \
  192. __get16_unaligned_check("ldrb",val,addr)
  193. #define get16t_unaligned_check(val,addr) \
  194. __get16_unaligned_check("ldrbt",val,addr)
  195. #define __get32_unaligned_check(ins,val,addr) \
  196. do { \
  197. unsigned int err = 0, v, a = addr; \
  198. __get8_unaligned_check(ins,v,a,err); \
  199. val = v << ((BE) ? 24 : 0); \
  200. __get8_unaligned_check(ins,v,a,err); \
  201. val |= v << ((BE) ? 16 : 8); \
  202. __get8_unaligned_check(ins,v,a,err); \
  203. val |= v << ((BE) ? 8 : 16); \
  204. __get8_unaligned_check(ins,v,a,err); \
  205. val |= v << ((BE) ? 0 : 24); \
  206. if (err) \
  207. goto fault; \
  208. } while (0)
  209. #define get32_unaligned_check(val,addr) \
  210. __get32_unaligned_check("ldrb",val,addr)
  211. #define get32t_unaligned_check(val,addr) \
  212. __get32_unaligned_check("ldrbt",val,addr)
  213. #define __put16_unaligned_check(ins,val,addr) \
  214. do { \
  215. unsigned int err = 0, v = val, a = addr; \
  216. __asm__( FIRST_BYTE_16 \
  217. ARM( "1: "ins" %1, [%2], #1\n" ) \
  218. THUMB( "1: "ins" %1, [%2]\n" ) \
  219. THUMB( " add %2, %2, #1\n" ) \
  220. " mov %1, %1, "NEXT_BYTE"\n" \
  221. "2: "ins" %1, [%2]\n" \
  222. "3:\n" \
  223. " .pushsection .fixup,\"ax\"\n" \