123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121 |
- /*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
- * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
- */
- #ifndef _ASM_PGTABLE_64_H
- #define _ASM_PGTABLE_64_H
- #include <linux/compiler.h>
- #include <linux/linkage.h>
- #include <asm/addrspace.h>
- #include <asm/page.h>
- #include <asm/cachectl.h>
- #include <asm/fixmap.h>
- #ifdef CONFIG_PAGE_SIZE_64KB
- #include <asm-generic/pgtable-nopmd.h>
- #else
- #include <asm-generic/pgtable-nopud.h>
- #endif
- /*
- * Each address space has 2 4K pages as its page directory, giving 1024
- * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
- * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
- * tables. Each page table is also a single 4K page, giving 512 (==
- * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
- * invalid_pmd_table, each pmd entry is initialized to point to
- * invalid_pte_table, each pte is initialized to 0. When memory is low,
- * and a pmd table or a page table allocation fails, empty_bad_pmd_table
- * and empty_bad_page_table is returned back to higher layer code, so
- * that the failure is recognized later on. Linux does not seem to
- * handle these failures very well though. The empty_bad_page_table has
- * invalid pte entries in it, to force page faults.
- *
- * Kernel mappings: kernel mappings are held in the swapper_pg_table.
- * The layout is identical to userspace except it's indexed with the
- * fault address - VMALLOC_START.
- */
- /* PGDIR_SHIFT determines what a third-level page table entry can map */
- #ifdef __PAGETABLE_PMD_FOLDED
- #define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
- #else
- /* PMD_SHIFT determines the size of the area a second-level page table can map */
- #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
- #define PMD_SIZE (1UL << PMD_SHIFT)
- #define PMD_MASK (~(PMD_SIZE-1))
- #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
- #endif
- #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
- #define PGDIR_MASK (~(PGDIR_SIZE-1))
- /*
- * For 4kB page size we use a 3 level page tree and an 8kB pud, which
- * permits us mapping 40 bits of virtual address space.
- *
- * We used to implement 41 bits by having an order 1 pmd level but that seemed
- * rather pointless.
- *
- * For 8kB page size we use a 3 level page tree which permits a total of
- * 8TB of address space. Alternatively a 33-bit / 8GB organization using
- * two levels would be easy to implement.
- *
- * For 16kB page size we use a 2 level page tree which permits a total of
- * 36 bits of virtual address space. We could add a third level but it seems
- * like at the moment there's no need for this.
- *
- * For 64kB page size we use a 2 level page table tree for a total of 42 bits
- * of virtual address space.
- */
- #ifdef CONFIG_PAGE_SIZE_4KB
- #define PGD_ORDER 1
- #define PUD_ORDER aieeee_attempt_to_allocate_pud
- #define PMD_ORDER 0
- #define PTE_ORDER 0
- #endif
- #ifdef CONFIG_PAGE_SIZE_8KB
- #define PGD_ORDER 0
- #define PUD_ORDER aieeee_attempt_to_allocate_pud
- #define PMD_ORDER 0
- #define PTE_ORDER 0
- #endif
- #ifdef CONFIG_PAGE_SIZE_16KB
- #define PGD_ORDER 0
- #define PUD_ORDER aieeee_attempt_to_allocate_pud
- #define PMD_ORDER 0
- #define PTE_ORDER 0
- #endif
- #ifdef CONFIG_PAGE_SIZE_32KB
- #define PGD_ORDER 0
- #define PUD_ORDER aieeee_attempt_to_allocate_pud
- #define PMD_ORDER 0
- #define PTE_ORDER 0
- #endif
- #ifdef CONFIG_PAGE_SIZE_64KB
- #define PGD_ORDER 0
- #define PUD_ORDER aieeee_attempt_to_allocate_pud
- #define PMD_ORDER aieeee_attempt_to_allocate_pmd
- #define PTE_ORDER 0
- #endif
- #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
- #ifndef __PAGETABLE_PMD_FOLDED
- #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
- #endif
- #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
- #if PGDIR_SIZE >= TASK_SIZE64
- #define USER_PTRS_PER_PGD (1)
- #else
- #define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
- #endif
|