|
@@ -113,3 +113,190 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
|
|
|
printk(", *pte=%08llx", (long long)pte_val(*pte));
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
|
printk(", *ppte=%08llx",
|
|
|
+ (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
|
|
|
+#endif
|
|
|
+ pte_unmap(pte);
|
|
|
+ } while(0);
|
|
|
+
|
|
|
+ printk("\n");
|
|
|
+}
|
|
|
+#else /* CONFIG_MMU */
|
|
|
+void show_pte(struct mm_struct *mm, unsigned long addr)
|
|
|
+{ }
|
|
|
+#endif /* CONFIG_MMU */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Oops. The kernel tried to access some page that wasn't present.
|
|
|
+ */
|
|
|
+static void
|
|
|
+__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
|
|
+ struct pt_regs *regs)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Are we prepared to handle this kernel fault?
|
|
|
+ */
|
|
|
+ if (fixup_exception(regs))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * No handler, we'll have to terminate things with extreme prejudice.
|
|
|
+ */
|
|
|
+ bust_spinlocks(1);
|
|
|
+ printk(KERN_ALERT
|
|
|
+ "Unable to handle kernel %s at virtual address %08lx\n",
|
|
|
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
|
|
|
+ "paging request", addr);
|
|
|
+
|
|
|
+ show_pte(mm, addr);
|
|
|
+ die("Oops", regs, fsr);
|
|
|
+ bust_spinlocks(0);
|
|
|
+ do_exit(SIGKILL);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Something tried to access memory that isn't in our memory map..
|
|
|
+ * User mode accesses just cause a SIGSEGV
|
|
|
+ */
|
|
|
+static void
|
|
|
+__do_user_fault(struct task_struct *tsk, unsigned long addr,
|
|
|
+ unsigned int fsr, unsigned int sig, int code,
|
|
|
+ struct pt_regs *regs)
|
|
|
+{
|
|
|
+ struct siginfo si;
|
|
|
+
|
|
|
+#ifdef CONFIG_DEBUG_USER
|
|
|
+ if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
|
|
|
+ ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
|
|
|
+ printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
|
|
|
+ tsk->comm, sig, addr, fsr);
|
|
|
+ show_pte(tsk->mm, addr);
|
|
|
+ show_regs(regs);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
+ tsk->thread.address = addr;
|
|
|
+ tsk->thread.error_code = fsr;
|
|
|
+ tsk->thread.trap_no = 14;
|
|
|
+ si.si_signo = sig;
|
|
|
+ si.si_errno = 0;
|
|
|
+ si.si_code = code;
|
|
|
+ si.si_addr = (void __user *)addr;
|
|
|
+ force_sig_info(sig, &si, tsk);
|
|
|
+}
|
|
|
+
|
|
|
+void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
|
+{
|
|
|
+ struct task_struct *tsk = current;
|
|
|
+ struct mm_struct *mm = tsk->active_mm;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we are in kernel mode at this point, we
|
|
|
+ * have no context to handle this fault with.
|
|
|
+ */
|
|
|
+ if (user_mode(regs))
|
|
|
+ __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
|
|
|
+ else
|
|
|
+ __do_kernel_fault(mm, addr, fsr, regs);
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_MMU
|
|
|
+#define VM_FAULT_BADMAP 0x010000
|
|
|
+#define VM_FAULT_BADACCESS 0x020000
|
|
|
+
|
|
|
+/*
|
|
|
+ * Check that the permissions on the VMA allow for the fault which occurred.
|
|
|
+ * If we encountered a write fault, we must have write permission, otherwise
|
|
|
+ * we allow any permission.
|
|
|
+ */
|
|
|
+static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
|
|
|
+
|
|
|
+ if (fsr & FSR_WRITE)
|
|
|
+ mask = VM_WRITE;
|
|
|
+ if (fsr & FSR_LNX_PF)
|
|
|
+ mask = VM_EXEC;
|
|
|
+
|
|
|
+ return vma->vm_flags & mask ? false : true;
|
|
|
+}
|
|
|
+
|
|
|
+static int __kprobes
|
|
|
+__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
|
|
+ unsigned int flags, struct task_struct *tsk)
|
|
|
+{
|
|
|
+ struct vm_area_struct *vma;
|
|
|
+ int fault;
|
|
|
+
|
|
|
+ vma = find_vma(mm, addr);
|
|
|
+ fault = VM_FAULT_BADMAP;
|
|
|
+ if (unlikely(!vma))
|
|
|
+ goto out;
|
|
|
+ if (unlikely(vma->vm_start > addr))
|
|
|
+ goto check_stack;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ok, we have a good vm_area for this
|
|
|
+ * memory access, so we can handle it.
|
|
|
+ */
|
|
|
+good_area:
|
|
|
+ if (access_error(fsr, vma)) {
|
|
|
+ fault = VM_FAULT_BADACCESS;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
|
|
|
+
|
|
|
+check_stack:
|
|
|
+ /* Don't allow expansion below FIRST_USER_ADDRESS */
|
|
|
+ if (vma->vm_flags & VM_GROWSDOWN &&
|
|
|
+ addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
|
|
|
+ goto good_area;
|
|
|
+out:
|
|
|
+ return fault;
|
|
|
+}
|
|
|
+
|
|
|
+static int __kprobes
|
|
|
+do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
|
+{
|
|
|
+ struct task_struct *tsk;
|
|
|
+ struct mm_struct *mm;
|
|
|
+ int fault, sig, code;
|
|
|
+ int write = fsr & FSR_WRITE;
|
|
|
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
|
|
|
+ (write ? FAULT_FLAG_WRITE : 0);
|
|
|
+
|
|
|
+ if (notify_page_fault(regs, fsr))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ tsk = current;
|
|
|
+ mm = tsk->mm;
|
|
|
+
|
|
|
+ /* Enable interrupts if they were enabled in the parent context. */
|
|
|
+ if (interrupts_enabled(regs))
|
|
|
+ local_irq_enable();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we're in an interrupt or have no user
|
|
|
+ * context, we must not take the fault..
|
|
|
+ */
|
|
|
+ if (in_atomic() || !mm)
|
|
|
+ goto no_context;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * As per x86, we may deadlock here. However, since the kernel only
|
|
|
+ * validly references user space from well defined areas of the code,
|
|
|
+ * we can bug out early if this is from code which shouldn't.
|
|
|
+ */
|
|
|
+ if (!down_read_trylock(&mm->mmap_sem)) {
|
|
|
+ if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
|
|
|
+ goto no_context;
|
|
|
+retry:
|
|
|
+ down_read(&mm->mmap_sem);
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * The above down_read_trylock() might have succeeded in
|
|
|
+ * which case, we'll have missed the might_sleep() from
|
|
|
+ * down_read()
|
|
|
+ */
|
|
|
+ might_sleep();
|
|
|
+#ifdef CONFIG_DEBUG_VM
|