| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111 | #ifndef _ASM_M32R_PGTABLE_H#define _ASM_M32R_PGTABLE_H#include <asm-generic/4level-fixup.h>#ifdef __KERNEL__/* * The Linux memory management assumes a three-level page table setup. On * the M32R, we use that, but "fold" the mid level into the top-level page * table, so that we physically have the same two-level page table as the * M32R mmu expects. * * This file contains the functions and defines necessary to modify and use * the M32R page table tree. *//* CAUTION!: If you change macro definitions in this file, you might have to * change arch/m32r/mmu.S manually. */#ifndef __ASSEMBLY__#include <linux/threads.h>#include <linux/bitops.h>#include <asm/processor.h>#include <asm/addrspace.h>#include <asm/page.h>struct mm_struct;struct vm_area_struct;extern pgd_t swapper_pg_dir[1024];extern void paging_init(void);/* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */extern unsigned long empty_zero_page[1024];#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))#endif /* !__ASSEMBLY__ */#ifndef __ASSEMBLY__#include <asm/pgtable-2level.h>#endif#define pgtable_cache_init()	do { } while (0)#define PMD_SIZE	(1UL << PMD_SHIFT)#define PMD_MASK	(~(PMD_SIZE - 1))#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)#define PGDIR_MASK	(~(PGDIR_SIZE - 1))#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)#define FIRST_USER_ADDRESS	0#ifndef __ASSEMBLY__/* Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the * physical memory until the kernel virtual memory starts.  That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */#define VMALLOC_START		KSEG2#define VMALLOC_END		KSEG3/* *     M32R TLB format * *     [0]    [1:19]           [20:23]       [24:31] *     +-----------------------+----+-------------+ *     |          VPN          |0000|    ASID     | *     +-----------------------+----+-------------+ *     +-+---------------------+----+-+---+-+-+-+-+ *     |0         PPN          |0000|N|AC |L|G|V| | *     +-+---------------------+----+-+---+-+-+-+-+ *                                     RWX */#define _PAGE_BIT_DIRTY		0	/* software: page changed */#define _PAGE_BIT_FILE		0	/* when !present: nonlinear file					   mapping */#define _PAGE_BIT_PRESENT	1	/* Valid: page is valid */#define _PAGE_BIT_GLOBAL	2	/* Global */#define _PAGE_BIT_LARGE		3	/* Large */#define _PAGE_BIT_EXEC		4	/* Execute */#define _PAGE_BIT_WRITE		5	/* Write */#define _PAGE_BIT_READ		6	/* Read */#define _PAGE_BIT_NONCACHABLE	7	/* Non cachable */#define _PAGE_BIT_ACCESSED	8	/* software: page referenced */#define _PAGE_BIT_PROTNONE	9	/* software: if not present */#define _PAGE_DIRTY		(1UL << _PAGE_BIT_DIRTY)#define _PAGE_FILE		(1UL << _PAGE_BIT_FILE)#define _PAGE_PRESENT		(1UL << _PAGE_BIT_PRESENT)#define _PAGE_GLOBAL		(1UL << _PAGE_BIT_GLOBAL)#define _PAGE_LARGE		(1UL << _PAGE_BIT_LARGE)#define _PAGE_EXEC		(1UL << _PAGE_BIT_EXEC)#define _PAGE_WRITE		(1UL << _PAGE_BIT_WRITE)#define _PAGE_READ		(1UL << _PAGE_BIT_READ)#define _PAGE_NONCACHABLE	(1UL << _PAGE_BIT_NONCACHABLE)#define _PAGE_ACCESSED		(1UL << _PAGE_BIT_ACCESSED)#define _PAGE_PROTNONE		(1UL << _PAGE_BIT_PROTNONE)#define _PAGE_TABLE	\	( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \	| _PAGE_DIRTY )#define _KERNPG_TABLE	\	( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
 |