/* * linux/arch/arm/mm/alignment.c * * Copyright (C) 1995 Linus Torvalds * Modifications for ARM processor (c) 1995-2001 Russell King * Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc. * - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation. * Copyright (C) 1996, Cygnus Software Technologies Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fault.h" /* * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998 * /proc/sys/debug/alignment, modified and integrated into * Linux 2.1 by Russell King * * Speed optimisations and better fault handling by Russell King. * * *** NOTE *** * This code is not portable to processors with late data abort handling. */ #define CODING_BITS(i) (i & 0x0e000000) #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */ #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */ #define LDST_U_BIT(i) (i & (1 << 23)) /* Add offset */ #define LDST_W_BIT(i) (i & (1 << 21)) /* Writeback */ #define LDST_L_BIT(i) (i & (1 << 20)) /* Load */ #define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0) #define LDSTHD_I_BIT(i) (i & (1 << 22)) /* double/half-word immed */ #define LDM_S_BIT(i) (i & (1 << 22)) /* write CPSR from SPSR */ #define RN_BITS(i) ((i >> 16) & 15) /* Rn */ #define RD_BITS(i) ((i >> 12) & 15) /* Rd */ #define RM_BITS(i) (i & 15) /* Rm */ #define REGMASK_BITS(i) (i & 0xffff) #define OFFSET_BITS(i) (i & 0x0fff) #define IS_SHIFT(i) (i & 0x0ff0) #define SHIFT_BITS(i) ((i >> 7) & 0x1f) #define SHIFT_TYPE(i) (i & 0x60) #define SHIFT_LSL 0x00 #define SHIFT_LSR 0x20 #define SHIFT_ASR 0x40 #define SHIFT_RORRRX 0x60 #define BAD_INSTR 0xdeadc0de /* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */ #define IS_T32(hi16) \ (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800)) static unsigned long ai_user; static unsigned long ai_sys; static unsigned long ai_skipped; static unsigned long ai_half; static unsigned long ai_word; static unsigned long ai_dword; static unsigned long ai_multi; static int ai_usermode; core_param(alignment, ai_usermode, int, 0600); #define UM_WARN (1 << 0) #define UM_FIXUP (1 << 1) #define UM_SIGNAL (1 << 2) /* Return true if and only if the ARMv6 unaligned access model is in use. */ static bool cpu_is_v6_unaligned(void) { return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U); } static int safe_usermode(int new_usermode, bool warn) { /* * ARMv6 and later CPUs can perform unaligned accesses for * most single load and store instructions up to word size. * LDM, STM, LDRD and STRD still need to be handled. * * Ignoring the alignment fault is not an option on these * CPUs since we spin re-faulting the instruction without * making any progress. */ if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) { new_usermode |= UM_FIXUP; if (warn) printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); } return new_usermode; } #ifdef CONFIG_PROC_FS static const char *usermode_action[] = { "ignored", "warn", "fixup", "fixup+warn", "signal", "signal+warn" }; static int alignment_proc_show(struct seq_file *m, void *v) { seq_printf(m, "User:\t\t%lu\n", ai_user); seq_printf(m, "System:\t\t%lu\n", ai_sys); seq_printf(m, "Skipped:\t%lu\n", ai_skipped); seq_printf(m, "Half:\t\t%lu\n", ai_half); seq_printf(m, "Word:\t\t%lu\n", ai_word); if (cpu_architecture() >= CPU_ARCH_ARMv5TE) seq_printf(m, "DWord:\t\t%lu\n", ai_dword); seq_printf(m, "Multi:\t\t%lu\n", ai_multi); seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode, usermode_action[ai_usermode]); return 0; } static int alignment_proc_open(struct inode *inode, struct file *file) { return single_open(file, alignment_proc_show, NULL); } static ssize_t alignment_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char mode; if (count > 0) { if (get_user(mode, buffer)) return -EFAULT; if (mode >= '0' && mode <= '5') ai_usermode = safe_usermode(mode - '0', true); } return count; } static const struct file_operations alignment_proc_fops = { .open = alignment_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = alignment_proc_write, }; #endif /* CONFIG_PROC_FS */ union offset_union { unsigned long un; signed long sn; }; #define TYPE_ERROR 0 #define TYPE_FAULT 1 #define TYPE_LDST 2 #define TYPE_DONE 3 #ifdef __ARMEB__ #define BE 1 #define FIRST_BYTE_16 "mov %1, %1, ror #8\n" #define FIRST_BYTE_32 "mov %1, %1, ror #24\n" #define NEXT_BYTE "ror #24" #else #define BE 0 #define FIRST_BYTE_16 #define FIRST_BYTE_32 #define NEXT_BYTE "lsr #8" #endif #define __get8_unaligned_check(ins,val,addr,err) \ __asm__( \ ARM( "1: "ins" %1, [%2], #1\n" ) \ THUMB( "1: "ins" %1, [%2]\n" ) \ THUMB( " add %2, %2, #1\n" ) \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, #1\n" \ " b 2b\n" \ " .popsection\n" \ " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ " .popsection\n" \ : "=r" (err), "=&r" (val), "=r" (addr) \ : "0" (err), "2" (addr)) #define __get16_unaligned_check(ins,val,addr) \ do { \ unsigned int err = 0, v, a = addr; \ __get8_unaligned_check(ins,v,a,err); \ val = v << ((BE) ? 8 : 0); \ __get8_unaligned_check(ins,v,a,err); \ val |= v << ((BE) ? 0 : 8); \ if (err) \ goto fault; \ } while (0) #define get16_unaligned_check(val,addr) \ __get16_unaligned_check("ldrb",val,addr) #define get16t_unaligned_check(val,addr) \ __get16_unaligned_check("ldrbt",val,addr) #define __get32_unaligned_check(ins,val,addr) \ do { \ unsigned int err = 0, v, a = addr; \ __get8_unaligned_check(ins,v,a,err); \ val = v << ((BE) ? 24 : 0); \ __get8_unaligned_check(ins,v,a,err); \ val |= v << ((BE) ? 16 : 8); \ __get8_unaligned_check(ins,v,a,err); \ val |= v << ((BE) ? 8 : 16); \ __get8_unaligned_check(ins,v,a,err); \ val |= v << ((BE) ? 0 : 24); \ if (err) \ goto fault; \ } while (0) #define get32_unaligned_check(val,addr) \ __get32_unaligned_check("ldrb",val,addr) #define get32t_unaligned_check(val,addr) \ __get32_unaligned_check("ldrbt",val,addr) #define __put16_unaligned_check(ins,val,addr) \ do { \ unsigned int err = 0, v = val, a = addr; \ __asm__( FIRST_BYTE_16 \ ARM( "1: "ins" %1, [%2], #1\n" ) \ THUMB( "1: "ins" %1, [%2]\n" ) \ THUMB( " add %2, %2, #1\n" ) \ " mov %1, %1, "NEXT_BYTE"\n" \ "2: "ins" %1, [%2]\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \