|
@@ -1079,3 +1079,171 @@ static inline void prepare_page_table(void)
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
|
+ * Reserve the special regions of memory
|
|
|
+ */
|
|
|
+void __init arm_mm_memblock_reserve(void)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Reserve the page tables. These are already in use,
|
|
|
+ * and can only be in node 0.
|
|
|
+ */
|
|
|
+ memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
|
|
|
+
|
|
|
+#ifdef CONFIG_SA1111
|
|
|
+ /*
|
|
|
+ * Because of the SA1111 DMA bug, we want to preserve our
|
|
|
+ * precious DMA-able memory...
|
|
|
+ */
|
|
|
+ memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Set up the device mappings. Since we clear out the page tables for all
|
|
|
+ * mappings above VMALLOC_START, we will remove any debug device mappings.
|
|
|
+ * This means you have to be careful how you debug this function, or any
|
|
|
+ * called function. This means you can't use any function or debugging
|
|
|
+ * method which may touch any device, otherwise the kernel _will_ crash.
|
|
|
+ */
|
|
|
+static void __init devicemaps_init(struct machine_desc *mdesc)
|
|
|
+{
|
|
|
+ struct map_desc map;
|
|
|
+ unsigned long addr;
|
|
|
+ void *vectors;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Allocate the vector page early.
|
|
|
+ */
|
|
|
+ vectors = early_alloc(PAGE_SIZE);
|
|
|
+
|
|
|
+ early_trap_init(vectors);
|
|
|
+
|
|
|
+ for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
|
|
|
+ pmd_clear(pmd_off_k(addr));
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Map the kernel if it is XIP.
|
|
|
+ * It is always first in the modulearea.
|
|
|
+ */
|
|
|
+#ifdef CONFIG_XIP_KERNEL
|
|
|
+ map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
|
|
|
+ map.virtual = MODULES_VADDR;
|
|
|
+ map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
|
|
|
+ map.type = MT_ROM;
|
|
|
+ create_mapping(&map);
|
|
|
+#endif
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Map the cache flushing regions.
|
|
|
+ */
|
|
|
+#ifdef FLUSH_BASE
|
|
|
+ map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
|
|
|
+ map.virtual = FLUSH_BASE;
|
|
|
+ map.length = SZ_1M;
|
|
|
+ map.type = MT_CACHECLEAN;
|
|
|
+ create_mapping(&map);
|
|
|
+#endif
|
|
|
+#ifdef FLUSH_BASE_MINICACHE
|
|
|
+ map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
|
|
|
+ map.virtual = FLUSH_BASE_MINICACHE;
|
|
|
+ map.length = SZ_1M;
|
|
|
+ map.type = MT_MINICLEAN;
|
|
|
+ create_mapping(&map);
|
|
|
+#endif
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Create a mapping for the machine vectors at the high-vectors
|
|
|
+ * location (0xffff0000). If we aren't using high-vectors, also
|
|
|
+ * create a mapping at the low-vectors virtual address.
|
|
|
+ */
|
|
|
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors));
|
|
|
+ map.virtual = 0xffff0000;
|
|
|
+ map.length = PAGE_SIZE;
|
|
|
+ map.type = MT_HIGH_VECTORS;
|
|
|
+ create_mapping(&map);
|
|
|
+
|
|
|
+ if (!vectors_high()) {
|
|
|
+ map.virtual = 0;
|
|
|
+ map.type = MT_LOW_VECTORS;
|
|
|
+ create_mapping(&map);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ask the machine support to map in the statically mapped devices.
|
|
|
+ */
|
|
|
+ if (mdesc->map_io)
|
|
|
+ mdesc->map_io();
|
|
|
+ fill_pmd_gaps();
|
|
|
+
|
|
|
+ /* Reserve fixed i/o space in VMALLOC region */
|
|
|
+ pci_reserve_io();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Finally flush the caches and tlb to ensure that we're in a
|
|
|
+ * consistent state wrt the writebuffer. This also ensures that
|
|
|
+ * any write-allocated cache lines in the vector page are written
|
|
|
+ * back. After this point, we can start to touch devices again.
|
|
|
+ */
|
|
|
+ local_flush_tlb_all();
|
|
|
+ flush_cache_all();
|
|
|
+}
|
|
|
+
|
|
|
+static void __init kmap_init(void)
|
|
|
+{
|
|
|
+#ifdef CONFIG_HIGHMEM
|
|
|
+ pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
|
|
|
+ PKMAP_BASE, _PAGE_KERNEL_TABLE);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static void __init map_lowmem(void)
|
|
|
+{
|
|
|
+ struct memblock_region *reg;
|
|
|
+
|
|
|
+ /* Map all the lowmem memory banks. */
|
|
|
+ for_each_memblock(memory, reg) {
|
|
|
+ phys_addr_t start = reg->base;
|
|
|
+ phys_addr_t end = start + reg->size;
|
|
|
+ struct map_desc map;
|
|
|
+
|
|
|
+ if (end > arm_lowmem_limit)
|
|
|
+ end = arm_lowmem_limit;
|
|
|
+ if (start >= end)
|
|
|
+ break;
|
|
|
+
|
|
|
+ map.pfn = __phys_to_pfn(start);
|
|
|
+ map.virtual = __phys_to_virt(start);
|
|
|
+ map.length = end - start;
|
|
|
+ map.type = MT_MEMORY;
|
|
|
+
|
|
|
+ create_mapping(&map);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * paging_init() sets up the page tables, initialises the zone memory
|
|
|
+ * maps, and sets up the zero page, bad page and bad page tables.
|
|
|
+ */
|
|
|
+void __init paging_init(struct machine_desc *mdesc)
|
|
|
+{
|
|
|
+ void *zero_page;
|
|
|
+
|
|
|
+ memblock_set_current_limit(arm_lowmem_limit);
|
|
|
+
|
|
|
+ build_mem_type_table();
|
|
|
+ prepare_page_table();
|
|
|
+ map_lowmem();
|
|
|
+ dma_contiguous_remap();
|
|
|
+ devicemaps_init(mdesc);
|
|
|
+ kmap_init();
|
|
|
+
|
|
|
+ top_pmd = pmd_off_k(0xffff0000);
|
|
|
+
|
|
|
+ /* allocate the zero page. */
|
|
|
+ zero_page = early_alloc(PAGE_SIZE);
|
|
|
+
|
|
|
+ bootmem_init();
|
|
|
+
|
|
|
+ empty_zero_page = virt_to_page(zero_page);
|
|
|
+ __flush_dcache_page(NULL, empty_zero_page);
|
|
|
+}
|