|
@@ -119,3 +119,93 @@
|
|
#else
|
|
#else
|
|
#define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
|
|
#define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
|
|
#endif
|
|
#endif
|
|
|
|
+#define FIRST_USER_ADDRESS 0UL
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * TLB refill handlers also map the vmalloc area into xuseg. Avoid
|
|
|
|
+ * the first couple of pages so NULL pointer dereferences will still
|
|
|
|
+ * reliably trap.
|
|
|
|
+ */
|
|
|
|
+#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
|
|
|
|
+#define VMALLOC_END \
|
|
|
|
+ (MAP_BASE + \
|
|
|
|
+ min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
|
|
|
|
+ (1UL << cpu_vmbits)) - (1UL << 32))
|
|
|
|
+
|
|
|
|
+#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
|
|
|
|
+ VMALLOC_START != CKSSEG
|
|
|
|
+/* Load modules into 32bit-compatible segment. */
|
|
|
|
+#define MODULE_START CKSSEG
|
|
|
|
+#define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#define pte_ERROR(e) \
|
|
|
|
+ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
|
|
|
+#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
+#define pmd_ERROR(e) \
|
|
|
|
+ printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
|
|
|
|
+#endif
|
|
|
|
+#define pgd_ERROR(e) \
|
|
|
|
+ printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
|
|
|
|
+
|
|
|
|
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
|
|
|
|
+extern pte_t empty_bad_page_table[PTRS_PER_PTE];
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
+/*
|
|
|
|
+ * For 3-level pagetables we defines these ourselves, for 2-level the
|
|
|
|
+ * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
|
|
|
|
+ */
|
|
|
|
+typedef struct { unsigned long pmd; } pmd_t;
|
|
|
|
+#define pmd_val(x) ((x).pmd)
|
|
|
|
+#define __pmd(x) ((pmd_t) { (x) } )
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Empty pgd/pmd entries point to the invalid_pte_table.
|
|
|
|
+ */
|
|
|
|
+static inline int pmd_none(pmd_t pmd)
|
|
|
|
+{
|
|
|
|
+ return pmd_val(pmd) == (unsigned long) invalid_pte_table;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int pmd_bad(pmd_t pmd)
|
|
|
|
+{
|
|
|
|
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
|
|
|
|
+ /* pmd_huge(pmd) but inline */
|
|
|
|
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
|
|
|
|
+ return 0;
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
|
|
|
|
+ return 1;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int pmd_present(pmd_t pmd)
|
|
|
|
+{
|
|
|
|
+ return pmd_val(pmd) != (unsigned long) invalid_pte_table;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void pmd_clear(pmd_t *pmdp)
|
|
|
|
+{
|
|
|
|
+ pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
|
|
|
|
+}
|
|
|
|
+#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Empty pud entries point to the invalid_pmd_table.
|
|
|
|
+ */
|
|
|
|
+static inline int pud_none(pud_t pud)
|
|
|
|
+{
|
|
|
|
+ return pud_val(pud) == (unsigned long) invalid_pmd_table;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int pud_bad(pud_t pud)
|
|
|
|
+{
|
|
|
|
+ return pud_val(pud) & ~PAGE_MASK;
|