| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211 | 
							- /*
 
-  * This file is subject to the terms and conditions of the GNU General Public
 
-  * License.  See the file "COPYING" in the main directory of this archive
 
-  * for more details.
 
-  *
 
-  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
 
-  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
 
-  */
 
- #ifndef _ASM_PGTABLE_64_H
 
- #define _ASM_PGTABLE_64_H
 
- #include <linux/compiler.h>
 
- #include <linux/linkage.h>
 
- #include <asm/addrspace.h>
 
- #include <asm/page.h>
 
- #include <asm/cachectl.h>
 
- #include <asm/fixmap.h>
 
- #ifdef CONFIG_PAGE_SIZE_64KB
 
- #include <asm-generic/pgtable-nopmd.h>
 
- #else
 
- #include <asm-generic/pgtable-nopud.h>
 
- #endif
 
- /*
 
-  * Each address space has 2 4K pages as its page directory, giving 1024
 
-  * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
 
-  * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
 
-  * tables. Each page table is also a single 4K page, giving 512 (==
 
-  * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
 
-  * invalid_pmd_table, each pmd entry is initialized to point to
 
-  * invalid_pte_table, each pte is initialized to 0. When memory is low,
 
-  * and a pmd table or a page table allocation fails, empty_bad_pmd_table
 
-  * and empty_bad_page_table is returned back to higher layer code, so
 
-  * that the failure is recognized later on. Linux does not seem to
 
-  * handle these failures very well though. The empty_bad_page_table has
 
-  * invalid pte entries in it, to force page faults.
 
-  *
 
-  * Kernel mappings: kernel mappings are held in the swapper_pg_table.
 
-  * The layout is identical to userspace except it's indexed with the
 
-  * fault address - VMALLOC_START.
 
-  */
 
- /* PGDIR_SHIFT determines what a third-level page table entry can map */
 
- #ifdef __PAGETABLE_PMD_FOLDED
 
- #define PGDIR_SHIFT	(PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
 
- #else
 
- /* PMD_SHIFT determines the size of the area a second-level page table can map */
 
- #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
 
- #define PMD_SIZE	(1UL << PMD_SHIFT)
 
- #define PMD_MASK	(~(PMD_SIZE-1))
 
- #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
 
- #endif
 
- #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 
- #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
- /*
 
-  * For 4kB page size we use a 3 level page tree and an 8kB pud, which
 
-  * permits us mapping 40 bits of virtual address space.
 
-  *
 
-  * We used to implement 41 bits by having an order 1 pmd level but that seemed
 
-  * rather pointless.
 
-  *
 
-  * For 8kB page size we use a 3 level page tree which permits a total of
 
-  * 8TB of address space.  Alternatively a 33-bit / 8GB organization using
 
-  * two levels would be easy to implement.
 
-  *
 
-  * For 16kB page size we use a 2 level page tree which permits a total of
 
-  * 36 bits of virtual address space.  We could add a third level but it seems
 
-  * like at the moment there's no need for this.
 
-  *
 
-  * For 64kB page size we use a 2 level page table tree for a total of 42 bits
 
-  * of virtual address space.
 
-  */
 
- #ifdef CONFIG_PAGE_SIZE_4KB
 
- #define PGD_ORDER		1
 
- #define PUD_ORDER		aieeee_attempt_to_allocate_pud
 
- #define PMD_ORDER		0
 
- #define PTE_ORDER		0
 
- #endif
 
- #ifdef CONFIG_PAGE_SIZE_8KB
 
- #define PGD_ORDER		0
 
- #define PUD_ORDER		aieeee_attempt_to_allocate_pud
 
- #define PMD_ORDER		0
 
- #define PTE_ORDER		0
 
- #endif
 
- #ifdef CONFIG_PAGE_SIZE_16KB
 
- #define PGD_ORDER		0
 
- #define PUD_ORDER		aieeee_attempt_to_allocate_pud
 
- #define PMD_ORDER		0
 
- #define PTE_ORDER		0
 
- #endif
 
- #ifdef CONFIG_PAGE_SIZE_32KB
 
- #define PGD_ORDER		0
 
- #define PUD_ORDER		aieeee_attempt_to_allocate_pud
 
- #define PMD_ORDER		0
 
- #define PTE_ORDER		0
 
- #endif
 
- #ifdef CONFIG_PAGE_SIZE_64KB
 
- #define PGD_ORDER		0
 
- #define PUD_ORDER		aieeee_attempt_to_allocate_pud
 
- #define PMD_ORDER		aieeee_attempt_to_allocate_pmd
 
- #define PTE_ORDER		0
 
- #endif
 
- #define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
 
- #ifndef __PAGETABLE_PMD_FOLDED
 
- #define PTRS_PER_PMD	((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
 
- #endif
 
- #define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
 
- #if PGDIR_SIZE >= TASK_SIZE64
 
- #define USER_PTRS_PER_PGD       (1)
 
- #else
 
- #define USER_PTRS_PER_PGD	(TASK_SIZE64 / PGDIR_SIZE)
 
- #endif
 
- #define FIRST_USER_ADDRESS	0UL
 
- /*
 
-  * TLB refill handlers also map the vmalloc area into xuseg.  Avoid
 
-  * the first couple of pages so NULL pointer dereferences will still
 
-  * reliably trap.
 
-  */
 
- #define VMALLOC_START		(MAP_BASE + (2 * PAGE_SIZE))
 
- #define VMALLOC_END	\
 
- 	(MAP_BASE + \
 
- 	 min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
 
- 	     (1UL << cpu_vmbits)) - (1UL << 32))
 
- #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
 
- 	VMALLOC_START != CKSSEG
 
- /* Load modules into 32bit-compatible segment. */
 
- #define MODULE_START	CKSSEG
 
- #define MODULE_END	(FIXADDR_START-2*PAGE_SIZE)
 
- #endif
 
- #define pte_ERROR(e) \
 
- 	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 
- #ifndef __PAGETABLE_PMD_FOLDED
 
- #define pmd_ERROR(e) \
 
- 	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 
- #endif
 
- #define pgd_ERROR(e) \
 
- 	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 
- extern pte_t invalid_pte_table[PTRS_PER_PTE];
 
- extern pte_t empty_bad_page_table[PTRS_PER_PTE];
 
- #ifndef __PAGETABLE_PMD_FOLDED
 
- /*
 
-  * For 3-level pagetables we defines these ourselves, for 2-level the
 
-  * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
 
-  */
 
- typedef struct { unsigned long pmd; } pmd_t;
 
- #define pmd_val(x)	((x).pmd)
 
- #define __pmd(x)	((pmd_t) { (x) } )
 
- extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
 
- #endif
 
- /*
 
-  * Empty pgd/pmd entries point to the invalid_pte_table.
 
-  */
 
- static inline int pmd_none(pmd_t pmd)
 
- {
 
- 	return pmd_val(pmd) == (unsigned long) invalid_pte_table;
 
- }
 
- static inline int pmd_bad(pmd_t pmd)
 
- {
 
- #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 
- 	/* pmd_huge(pmd) but inline */
 
- 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
 
- 		return 0;
 
- #endif
 
- 	if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
 
- 		return 1;
 
- 	return 0;
 
- }
 
- static inline int pmd_present(pmd_t pmd)
 
- {
 
- 	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
 
- }
 
- static inline void pmd_clear(pmd_t *pmdp)
 
- {
 
- 	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
 
- }
 
- #ifndef __PAGETABLE_PMD_FOLDED
 
- /*
 
-  * Empty pud entries point to the invalid_pmd_table.
 
-  */
 
- static inline int pud_none(pud_t pud)
 
- {
 
- 	return pud_val(pud) == (unsigned long) invalid_pmd_table;
 
- }
 
- static inline int pud_bad(pud_t pud)
 
- {
 
- 	return pud_val(pud) & ~PAGE_MASK;
 
 
  |