|
@@ -326,3 +326,130 @@ static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
/*
|
|
|
+ * The FR451 can do execute protection by virtue of having separate TLB miss handlers for
|
|
|
+ * instruction access and for data access. However, we don't have enough reserved bits to say
|
|
|
+ * "execute only", so we don't bother. If you can read it, you can execute it and vice versa.
|
|
|
+ */
|
|
|
+#define __P000 PAGE_NONE
|
|
|
+#define __P001 PAGE_READONLY
|
|
|
+#define __P010 PAGE_COPY
|
|
|
+#define __P011 PAGE_COPY
|
|
|
+#define __P100 PAGE_READONLY
|
|
|
+#define __P101 PAGE_READONLY
|
|
|
+#define __P110 PAGE_COPY
|
|
|
+#define __P111 PAGE_COPY
|
|
|
+
|
|
|
+#define __S000 PAGE_NONE
|
|
|
+#define __S001 PAGE_READONLY
|
|
|
+#define __S010 PAGE_SHARED
|
|
|
+#define __S011 PAGE_SHARED
|
|
|
+#define __S100 PAGE_READONLY
|
|
|
+#define __S101 PAGE_READONLY
|
|
|
+#define __S110 PAGE_SHARED
|
|
|
+#define __S111 PAGE_SHARED
|
|
|
+
|
|
|
+/*
|
|
|
+ * Define this to warn about kernel memory accesses that are
|
|
|
+ * done without a 'access_ok(VERIFY_WRITE,..)'
|
|
|
+ */
|
|
|
+#undef TEST_ACCESS_OK
|
|
|
+
|
|
|
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
|
|
|
+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
|
|
|
+
|
|
|
+#define pmd_none(x) (!pmd_val(x))
|
|
|
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
|
|
|
+#define pmd_bad(x) (pmd_val(x) & xAMPRx_SS)
|
|
|
+#define pmd_clear(xp) do { __set_pmd(xp, 0); } while(0)
|
|
|
+
|
|
|
+#define pmd_page_vaddr(pmd) \
|
|
|
+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
|
|
|
+
|
|
|
+#ifndef CONFIG_DISCONTIGMEM
|
|
|
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
|
|
|
+#endif
|
|
|
+
|
|
|
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
|
|
|
+
|
|
|
+/*
|
|
|
+ * The following only work if pte_present() is true.
|
|
|
+ * Undefined behaviour if not..
|
|
|
+ */
|
|
|
+static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; }
|
|
|
+static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; }
|
|
|
+static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); }
|
|
|
+static inline int pte_special(pte_t pte) { return 0; }
|
|
|
+
|
|
|
+static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; }
|
|
|
+static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; }
|
|
|
+static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte |= _PAGE_WP; return pte; }
|
|
|
+static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; }
|
|
|
+static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; }
|
|
|
+static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; }
|
|
|
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
|
|
+
|
|
|
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
|
|
+{
|
|
|
+ int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
|
|
|
+ asm volatile("dcf %M0" :: "U"(*ptep));
|
|
|
+ return i;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
+{
|
|
|
+ unsigned long x = xchg(&ptep->pte, 0);
|
|
|
+ asm volatile("dcf %M0" :: "U"(*ptep));
|
|
|
+ return __pte(x);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
+{
|
|
|
+ set_bit(_PAGE_BIT_WP, ptep);
|
|
|
+ asm volatile("dcf %M0" :: "U"(*ptep));
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Macro to mark a page protection value as "uncacheable"
|
|
|
+ */
|
|
|
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE))
|
|
|
+
|
|
|
+/*
|
|
|
+ * Conversion functions: convert a page and protection to a page entry,
|
|
|
+ * and a page entry and page directory to the page they refer to.
|
|
|
+ */
|
|
|
+
|
|
|
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
|
|
+#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
|
|
|
+
|
|
|
+/* This takes a physical page address that is used by the remapping functions */
|
|
|
+#define mk_pte_phys(physpage, pgprot) pfn_pte((physpage) >> PAGE_SHIFT, pgprot)
|
|
|
+
|
|
|
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|
|
+{
|
|
|
+ pte.pte &= _PAGE_CHG_MASK;
|
|
|
+ pte.pte |= pgprot_val(newprot);
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+
|
|
|
+/* to find an entry in a page-table-directory. */
|
|
|
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
|
|
|
+#define pgd_index_k(addr) pgd_index(addr)
|
|
|
+
|
|
|
+/* Find an entry in the bottom-level page table.. */
|
|
|
+#define __pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
|
|
+
|
|
|
+/*
|
|
|
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
|
|
+ *
|
|
|
+ * this macro returns the index of the entry in the pte page which would
|
|
|
+ * control the given virtual address
|
|
|
+ */
|
|
|
+#define pte_index(address) \
|
|
|
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
|
|
+#define pte_offset_kernel(dir, address) \
|
|
|
+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
|
|
|
+
|
|
|
+#if defined(CONFIG_HIGHPTE)
|
|
|
+#define pte_offset_map(dir, address) \
|
|
|
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address))
|
|
|
+#define pte_unmap(pte) kunmap_atomic(pte)
|