|
@@ -473,3 +473,177 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
|
|
|
pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
|
|
|
|
|
|
/*
|
|
|
+ * If there are free pages between these,
|
|
|
+ * free the section of the memmap array.
|
|
|
+ */
|
|
|
+ if (pg < pgend)
|
|
|
+ free_bootmem(pg, pgend - pg);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * The mem_map array can get very big. Free the unused area of the memory map.
|
|
|
+ */
|
|
|
+static void __init free_unused_memmap(struct meminfo *mi)
|
|
|
+{
|
|
|
+ unsigned long bank_start, prev_bank_end = 0;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This relies on each bank being in address order.
|
|
|
+ * The banks are sorted previously in bootmem_init().
|
|
|
+ */
|
|
|
+ for_each_bank(i, mi) {
|
|
|
+ struct membank *bank = &mi->bank[i];
|
|
|
+
|
|
|
+ bank_start = bank_pfn_start(bank);
|
|
|
+
|
|
|
+#ifdef CONFIG_SPARSEMEM
|
|
|
+ /*
|
|
|
+ * Take care not to free memmap entries that don't exist
|
|
|
+ * due to SPARSEMEM sections which aren't present.
|
|
|
+ */
|
|
|
+ bank_start = min(bank_start,
|
|
|
+ ALIGN(prev_bank_end, PAGES_PER_SECTION));
|
|
|
+#else
|
|
|
+ /*
|
|
|
+ * Align down here since the VM subsystem insists that the
|
|
|
+ * memmap entries are valid from the bank start aligned to
|
|
|
+ * MAX_ORDER_NR_PAGES.
|
|
|
+ */
|
|
|
+ bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
|
|
|
+#endif
|
|
|
+ /*
|
|
|
+ * If we had a previous bank, and there is a space
|
|
|
+ * between the current bank and the previous, free it.
|
|
|
+ */
|
|
|
+ if (prev_bank_end && prev_bank_end < bank_start)
|
|
|
+ free_memmap(prev_bank_end, bank_start);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Align up here since the VM subsystem insists that the
|
|
|
+ * memmap entries are valid from the bank end aligned to
|
|
|
+ * MAX_ORDER_NR_PAGES.
|
|
|
+ */
|
|
|
+ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef CONFIG_SPARSEMEM
|
|
|
+ if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
|
|
|
+ free_memmap(prev_bank_end,
|
|
|
+ ALIGN(prev_bank_end, PAGES_PER_SECTION));
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static void __init free_highpages(void)
|
|
|
+{
|
|
|
+#ifdef CONFIG_HIGHMEM
|
|
|
+ unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
|
|
|
+ struct memblock_region *mem, *res;
|
|
|
+
|
|
|
+ /* set highmem page free */
|
|
|
+ for_each_memblock(memory, mem) {
|
|
|
+ unsigned long start = memblock_region_memory_base_pfn(mem);
|
|
|
+ unsigned long end = memblock_region_memory_end_pfn(mem);
|
|
|
+
|
|
|
+ /* Ignore complete lowmem entries */
|
|
|
+ if (end <= max_low)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* Truncate partial highmem entries */
|
|
|
+ if (start < max_low)
|
|
|
+ start = max_low;
|
|
|
+
|
|
|
+ /* Find and exclude any reserved regions */
|
|
|
+ for_each_memblock(reserved, res) {
|
|
|
+ unsigned long res_start, res_end;
|
|
|
+
|
|
|
+ res_start = memblock_region_reserved_base_pfn(res);
|
|
|
+ res_end = memblock_region_reserved_end_pfn(res);
|
|
|
+
|
|
|
+ if (res_end < start)
|
|
|
+ continue;
|
|
|
+ if (res_start < start)
|
|
|
+ res_start = start;
|
|
|
+ if (res_start > end)
|
|
|
+ res_start = end;
|
|
|
+ if (res_end > end)
|
|
|
+ res_end = end;
|
|
|
+ if (res_start != start)
|
|
|
+ totalhigh_pages += free_area(start, res_start,
|
|
|
+ NULL);
|
|
|
+ start = res_end;
|
|
|
+ if (start == end)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* And now free anything which remains */
|
|
|
+ if (start < end)
|
|
|
+ totalhigh_pages += free_area(start, end, NULL);
|
|
|
+ }
|
|
|
+ totalram_pages += totalhigh_pages;
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * mem_init() marks the free areas in the mem_map and tells us how much
|
|
|
+ * memory is free. This is done after various parts of the system have
|
|
|
+ * claimed their memory after the kernel image.
|
|
|
+ */
|
|
|
+void __init mem_init(void)
|
|
|
+{
|
|
|
+ unsigned long reserved_pages, free_pages;
|
|
|
+ struct memblock_region *reg;
|
|
|
+ int i;
|
|
|
+#ifdef CONFIG_HAVE_TCM
|
|
|
+ /* These pointers are filled in on TCM detection */
|
|
|
+ extern u32 dtcm_end;
|
|
|
+ extern u32 itcm_end;
|
|
|
+#endif
|
|
|
+
|
|
|
+ max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
|
|
|
+
|
|
|
+ /* this will put all unused low memory onto the freelists */
|
|
|
+ free_unused_memmap(&meminfo);
|
|
|
+
|
|
|
+ totalram_pages += free_all_bootmem();
|
|
|
+
|
|
|
+#ifdef CONFIG_SA1111
|
|
|
+ /* now that our DMA memory is actually so designated, we can free it */
|
|
|
+ totalram_pages += free_area(PHYS_PFN_OFFSET,
|
|
|
+ __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
|
|
|
+#endif
|
|
|
+
|
|
|
+ free_highpages();
|
|
|
+
|
|
|
+ reserved_pages = free_pages = 0;
|
|
|
+
|
|
|
+ for_each_bank(i, &meminfo) {
|
|
|
+ struct membank *bank = &meminfo.bank[i];
|
|
|
+ unsigned int pfn1, pfn2;
|
|
|
+ struct page *page, *end;
|
|
|
+
|
|
|
+ pfn1 = bank_pfn_start(bank);
|
|
|
+ pfn2 = bank_pfn_end(bank);
|
|
|
+
|
|
|
+ page = pfn_to_page(pfn1);
|
|
|
+ end = pfn_to_page(pfn2 - 1) + 1;
|
|
|
+
|
|
|
+ do {
|
|
|
+ if (PageReserved(page))
|
|
|
+ reserved_pages++;
|
|
|
+ else if (!page_count(page))
|
|
|
+ free_pages++;
|
|
|
+ page++;
|
|
|
+ } while (page < end);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Since our memory may not be contiguous, calculate the
|
|
|
+ * real number of pages we have in this system
|
|
|
+ */
|
|
|
+ printk(KERN_INFO "Memory:");
|
|
|
+ num_physpages = 0;
|
|
|
+ for_each_memblock(memory, reg) {
|
|
|
+ unsigned long pages = memblock_region_memory_end_pfn(reg) -
|
|
|
+ memblock_region_memory_base_pfn(reg);
|
|
|
+ num_physpages += pages;
|