|
@@ -109,3 +109,181 @@ extern unsigned long empty_zero_page[1024];
|
|
|
| _PAGE_DIRTY )
|
|
|
#define _KERNPG_TABLE \
|
|
|
( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
|
|
|
+ | _PAGE_DIRTY )
|
|
|
+#define _PAGE_CHG_MASK \
|
|
|
+ ( PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY )
|
|
|
+
|
|
|
+#ifdef CONFIG_MMU
|
|
|
+#define PAGE_NONE \
|
|
|
+ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
|
|
|
+#define PAGE_SHARED \
|
|
|
+ __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED)
|
|
|
+#define PAGE_SHARED_EXEC \
|
|
|
+ __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \
|
|
|
+ | _PAGE_ACCESSED)
|
|
|
+#define PAGE_COPY \
|
|
|
+ __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
|
|
|
+#define PAGE_COPY_EXEC \
|
|
|
+ __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
|
|
|
+#define PAGE_READONLY \
|
|
|
+ __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
|
|
|
+#define PAGE_READONLY_EXEC \
|
|
|
+ __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
|
|
|
+
|
|
|
+#define __PAGE_KERNEL \
|
|
|
+ ( _PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ | _PAGE_DIRTY \
|
|
|
+ | _PAGE_ACCESSED )
|
|
|
+#define __PAGE_KERNEL_RO ( __PAGE_KERNEL & ~_PAGE_WRITE )
|
|
|
+#define __PAGE_KERNEL_NOCACHE ( __PAGE_KERNEL | _PAGE_NONCACHABLE)
|
|
|
+
|
|
|
+#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
|
|
|
+
|
|
|
+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
|
|
|
+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
|
|
|
+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
|
|
|
+
|
|
|
+#else
|
|
|
+#define PAGE_NONE __pgprot(0)
|
|
|
+#define PAGE_SHARED __pgprot(0)
|
|
|
+#define PAGE_SHARED_EXEC __pgprot(0)
|
|
|
+#define PAGE_COPY __pgprot(0)
|
|
|
+#define PAGE_COPY_EXEC __pgprot(0)
|
|
|
+#define PAGE_READONLY __pgprot(0)
|
|
|
+#define PAGE_READONLY_EXEC __pgprot(0)
|
|
|
+
|
|
|
+#define PAGE_KERNEL __pgprot(0)
|
|
|
+#define PAGE_KERNEL_RO __pgprot(0)
|
|
|
+#define PAGE_KERNEL_NOCACHE __pgprot(0)
|
|
|
+#endif /* CONFIG_MMU */
|
|
|
+
|
|
|
+ /* xwr */
|
|
|
+#define __P000 PAGE_NONE
|
|
|
+#define __P001 PAGE_READONLY
|
|
|
+#define __P010 PAGE_COPY
|
|
|
+#define __P011 PAGE_COPY
|
|
|
+#define __P100 PAGE_READONLY_EXEC
|
|
|
+#define __P101 PAGE_READONLY_EXEC
|
|
|
+#define __P110 PAGE_COPY_EXEC
|
|
|
+#define __P111 PAGE_COPY_EXEC
|
|
|
+
|
|
|
+#define __S000 PAGE_NONE
|
|
|
+#define __S001 PAGE_READONLY
|
|
|
+#define __S010 PAGE_SHARED
|
|
|
+#define __S011 PAGE_SHARED
|
|
|
+#define __S100 PAGE_READONLY_EXEC
|
|
|
+#define __S101 PAGE_READONLY_EXEC
|
|
|
+#define __S110 PAGE_SHARED_EXEC
|
|
|
+#define __S111 PAGE_SHARED_EXEC
|
|
|
+
|
|
|
+/* page table for 0-4MB for everybody */
|
|
|
+
|
|
|
+#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
|
|
|
+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
|
|
|
+
|
|
|
+#define pmd_none(x) (!pmd_val(x))
|
|
|
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
|
|
|
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
|
|
|
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _KERNPG_TABLE)
|
|
|
+
|
|
|
+#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
|
|
|
+
|
|
|
+/*
|
|
|
+ * The following only work if pte_present() is true.
|
|
|
+ * Undefined behaviour if not..
|
|
|
+ */
|
|
|
+static inline int pte_dirty(pte_t pte)
|
|
|
+{
|
|
|
+ return pte_val(pte) & _PAGE_DIRTY;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int pte_young(pte_t pte)
|
|
|
+{
|
|
|
+ return pte_val(pte) & _PAGE_ACCESSED;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int pte_write(pte_t pte)
|
|
|
+{
|
|
|
+ return pte_val(pte) & _PAGE_WRITE;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * The following only works if pte_present() is not true.
|
|
|
+ */
|
|
|
+static inline int pte_file(pte_t pte)
|
|
|
+{
|
|
|
+ return pte_val(pte) & _PAGE_FILE;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int pte_special(pte_t pte)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t pte_mkclean(pte_t pte)
|
|
|
+{
|
|
|
+ pte_val(pte) &= ~_PAGE_DIRTY;
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t pte_mkold(pte_t pte)
|
|
|
+{
|
|
|
+ pte_val(pte) &= ~_PAGE_ACCESSED;
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t pte_wrprotect(pte_t pte)
|
|
|
+{
|
|
|
+ pte_val(pte) &= ~_PAGE_WRITE;
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t pte_mkdirty(pte_t pte)
|
|
|
+{
|
|
|
+ pte_val(pte) |= _PAGE_DIRTY;
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t pte_mkyoung(pte_t pte)
|
|
|
+{
|
|
|
+ pte_val(pte) |= _PAGE_ACCESSED;
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t pte_mkwrite(pte_t pte)
|
|
|
+{
|
|
|
+ pte_val(pte) |= _PAGE_WRITE;
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t pte_mkspecial(pte_t pte)
|
|
|
+{
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
|
|
+{
|
|
|
+ return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
+{
|
|
|
+ clear_bit(_PAGE_BIT_WRITE, ptep);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Macro and implementation to make a page protection as uncachable.
|
|
|
+ */
|
|
|
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
|
|
|
+{
|
|
|
+ unsigned long prot = pgprot_val(_prot);
|
|
|
+
|
|
|
+ prot |= _PAGE_NONCACHABLE;
|
|
|
+ return __pgprot(prot);
|
|
|
+}
|
|
|
+
|
|
|
+#define pgprot_writecombine(prot) pgprot_noncached(prot)
|
|
|
+
|
|
|
+/*
|
|
|
+ * Conversion functions: convert a page and protection to a page entry,
|
|
|
+ * and a page entry and page directory to the page they refer to.
|
|
|
+ */
|