|
@@ -739,3 +739,143 @@ static void __init create_mapping(struct map_desc *md)
|
|
}
|
|
}
|
|
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
pgd = pgd_offset_k(addr);
|
|
|
|
+ end = addr + length;
|
|
|
|
+ do {
|
|
|
|
+ unsigned long next = pgd_addr_end(addr, end);
|
|
|
|
+
|
|
|
|
+ alloc_init_pud(pgd, addr, next, phys, type);
|
|
|
|
+
|
|
|
|
+ phys += next - addr;
|
|
|
|
+ addr = next;
|
|
|
|
+ } while (pgd++, addr != end);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Create the architecture specific mappings
|
|
|
|
+ */
|
|
|
|
+void __init iotable_init(struct map_desc *io_desc, int nr)
|
|
|
|
+{
|
|
|
|
+ struct map_desc *md;
|
|
|
|
+ struct vm_struct *vm;
|
|
|
|
+
|
|
|
|
+ if (!nr)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
|
|
|
|
+
|
|
|
|
+ for (md = io_desc; nr; md++, nr--) {
|
|
|
|
+ create_mapping(md);
|
|
|
|
+ vm->addr = (void *)(md->virtual & PAGE_MASK);
|
|
|
|
+ vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
|
|
|
+ vm->phys_addr = __pfn_to_phys(md->pfn);
|
|
|
|
+ vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
|
|
|
+ vm->flags |= VM_ARM_MTYPE(md->type);
|
|
|
|
+ vm->caller = iotable_init;
|
|
|
|
+ vm_area_add_early(vm++);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
|
|
|
|
+ void *caller)
|
|
|
|
+{
|
|
|
|
+ struct vm_struct *vm;
|
|
|
|
+
|
|
|
|
+ vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
|
|
|
|
+ vm->addr = (void *)addr;
|
|
|
|
+ vm->size = size;
|
|
|
|
+ vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
|
|
|
|
+ vm->caller = caller;
|
|
|
|
+ vm_area_add_early(vm);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#ifndef CONFIG_ARM_LPAE
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * The Linux PMD is made of two consecutive section entries covering 2MB
|
|
|
|
+ * (see definition in include/asm/pgtable-2level.h). However a call to
|
|
|
|
+ * create_mapping() may optimize static mappings by using individual
|
|
|
|
+ * 1MB section mappings. This leaves the actual PMD potentially half
|
|
|
|
+ * initialized if the top or bottom section entry isn't used, leaving it
|
|
|
|
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
|
|
|
|
+ * the virtual space left free by that unused section entry.
|
|
|
|
+ *
|
|
|
|
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
|
|
|
|
+ * PMD halves once the static mappings are in place.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+static void __init pmd_empty_section_gap(unsigned long addr)
|
|
|
|
+{
|
|
|
|
+ vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __init fill_pmd_gaps(void)
|
|
|
|
+{
|
|
|
|
+ struct vm_struct *vm;
|
|
|
|
+ unsigned long addr, next = 0;
|
|
|
|
+ pmd_t *pmd;
|
|
|
|
+
|
|
|
|
+ /* we're still single threaded hence no lock needed here */
|
|
|
|
+ for (vm = vmlist; vm; vm = vm->next) {
|
|
|
|
+ if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
|
|
|
|
+ continue;
|
|
|
|
+ addr = (unsigned long)vm->addr;
|
|
|
|
+ if (addr < next)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Check if this vm starts on an odd section boundary.
|
|
|
|
+ * If so and the first section entry for this PMD is free
|
|
|
|
+ * then we block the corresponding virtual address.
|
|
|
|
+ */
|
|
|
|
+ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
|
|
|
|
+ pmd = pmd_off_k(addr);
|
|
|
|
+ if (pmd_none(*pmd))
|
|
|
|
+ pmd_empty_section_gap(addr & PMD_MASK);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Then check if this vm ends on an odd section boundary.
|
|
|
|
+ * If so and the second section entry for this PMD is empty
|
|
|
|
+ * then we block the corresponding virtual address.
|
|
|
|
+ */
|
|
|
|
+ addr += vm->size;
|
|
|
|
+ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
|
|
|
|
+ pmd = pmd_off_k(addr) + 1;
|
|
|
|
+ if (pmd_none(*pmd))
|
|
|
|
+ pmd_empty_section_gap(addr);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* no need to look at any vm entry until we hit the next PMD */
|
|
|
|
+ next = (addr + PMD_SIZE - 1) & PMD_MASK;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#else
|
|
|
|
+#define fill_pmd_gaps() do { } while (0)
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
|
|
|
|
+static void __init pci_reserve_io(void)
|
|
|
|
+{
|
|
|
|
+ struct vm_struct *vm;
|
|
|
|
+ unsigned long addr;
|
|
|
|
+
|
|
|
|
+ /* we're still single threaded hence no lock needed here */
|
|
|
|
+ for (vm = vmlist; vm; vm = vm->next) {
|
|
|
|
+ if (!(vm->flags & VM_ARM_STATIC_MAPPING))
|
|
|
|
+ continue;
|
|
|
|
+ addr = (unsigned long)vm->addr;
|
|
|
|
+ addr &= ~(SZ_2M - 1);
|
|
|
|
+ if (addr == PCI_IO_VIRT_BASE)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+ vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+#define pci_reserve_io() do { } while (0)
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_DEBUG_LL
|
|
|
|
+void __init debug_ll_io_init(void)
|
|
|
|
+{
|