|
@@ -664,3 +664,78 @@ static void __init create_36bit_mapping(struct map_desc *md,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
+ * Shift bits [35:32] of address into bits [23:20] of PMD
|
|
|
+ * (See ARMv6 spec).
|
|
|
+ */
|
|
|
+ phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
|
|
|
+
|
|
|
+ pgd = pgd_offset_k(addr);
|
|
|
+ end = addr + length;
|
|
|
+ do {
|
|
|
+ pud_t *pud = pud_offset(pgd, addr);
|
|
|
+ pmd_t *pmd = pmd_offset(pud, addr);
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < 16; i++)
|
|
|
+ *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
|
|
|
+
|
|
|
+ addr += SUPERSECTION_SIZE;
|
|
|
+ phys += SUPERSECTION_SIZE;
|
|
|
+ pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
|
|
|
+ } while (addr != end);
|
|
|
+}
|
|
|
+#endif /* !CONFIG_ARM_LPAE */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Create the page directory entries and any necessary
|
|
|
+ * page tables for the mapping specified by `md'. We
|
|
|
+ * are able to cope here with varying sizes and address
|
|
|
+ * offsets, and we take full advantage of sections and
|
|
|
+ * supersections.
|
|
|
+ */
|
|
|
+static void __init create_mapping(struct map_desc *md)
|
|
|
+{
|
|
|
+ unsigned long addr, length, end;
|
|
|
+ phys_addr_t phys;
|
|
|
+ const struct mem_type *type;
|
|
|
+ pgd_t *pgd;
|
|
|
+
|
|
|
+ if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
|
|
|
+ printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
|
|
|
+ " at 0x%08lx in user region\n",
|
|
|
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
|
|
|
+ md->virtual >= PAGE_OFFSET &&
|
|
|
+ (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
|
|
|
+ printk(KERN_WARNING "BUG: mapping for 0x%08llx"
|
|
|
+ " at 0x%08lx out of vmalloc space\n",
|
|
|
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
|
|
+ }
|
|
|
+
|
|
|
+ type = &mem_types[md->type];
|
|
|
+
|
|
|
+#ifndef CONFIG_ARM_LPAE
|
|
|
+ /*
|
|
|
+ * Catch 36-bit addresses
|
|
|
+ */
|
|
|
+ if (md->pfn >= 0x100000) {
|
|
|
+ create_36bit_mapping(md, type);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
+ addr = md->virtual & PAGE_MASK;
|
|
|
+ phys = __pfn_to_phys(md->pfn);
|
|
|
+ length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
|
|
+
|
|
|
+ if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
|
|
|
+ printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
|
|
|
+ "be mapped using pages, ignoring.\n",
|
|
|
+ (long long)__pfn_to_phys(md->pfn), addr);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ pgd = pgd_offset_k(addr);
|