|
@@ -259,3 +259,58 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|
|
*/
|
|
|
#define __SWP_TYPE_SHIFT 3
|
|
|
#define __SWP_TYPE_BITS 5
|
|
|
+#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
|
|
|
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
|
|
|
+
|
|
|
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
|
|
|
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
|
|
|
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
|
|
|
+
|
|
|
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
|
|
+#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
|
|
|
+
|
|
|
+/*
|
|
|
+ * It is an error for the kernel to have more swap files than we can
|
|
|
+ * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
|
|
|
+ * is increased beyond what we presently support.
|
|
|
+ */
|
|
|
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
|
|
|
+
|
|
|
+/*
|
|
|
+ * Encode and decode a file entry. File entries are stored in the Linux
|
|
|
+ * page tables as follows:
|
|
|
+ *
|
|
|
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
|
|
|
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
|
|
+ * <----------------------- offset ------------------------> 1 0 0
|
|
|
+ */
|
|
|
+#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
|
|
|
+#define pte_to_pgoff(x) (pte_val(x) >> 3)
|
|
|
+#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
|
|
|
+
|
|
|
+#define PTE_FILE_MAX_BITS 29
|
|
|
+
|
|
|
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
|
|
+/* FIXME: this is not correct */
|
|
|
+#define kern_addr_valid(addr) (1)
|
|
|
+
|
|
|
+#include <asm-generic/pgtable.h>
|
|
|
+
|
|
|
+/*
|
|
|
+ * We provide our own arch_get_unmapped_area to cope with VIPT caches.
|
|
|
+ */
|
|
|
+#define HAVE_ARCH_UNMAPPED_AREA
|
|
|
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
|
|
+
|
|
|
+/*
|
|
|
+ * remap a physical page `pfn' of size `size' with page protection `prot'
|
|
|
+ * into virtual address `from'
|
|
|
+ */
|
|
|
+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
|
|
|
+ remap_pfn_range(vma, from, pfn, size, prot)
|
|
|
+
|
|
|
+#define pgtable_cache_init() do { } while (0)
|
|
|
+
|
|
|
+#endif /* !__ASSEMBLY__ */
|
|
|
+
|
|
|
+#endif /* CONFIG_MMU */
|