|
@@ -320,3 +320,81 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
|
|
|
return arch_ioremap_caller(phys_addr, size, mtype,
|
|
|
__builtin_return_address(0));
|
|
|
}
|
|
|
+EXPORT_SYMBOL(__arm_ioremap);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Remap an arbitrary physical address space into the kernel virtual
|
|
|
+ * address space as memory. Needed when the kernel wants to execute
|
|
|
+ * code in external memory. This is needed for reprogramming source
|
|
|
+ * clocks that would affect normal memory for example. Please see
|
|
|
+ * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
|
|
|
+ */
|
|
|
+void __iomem *
|
|
|
+__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
|
|
|
+{
|
|
|
+ unsigned int mtype;
|
|
|
+
|
|
|
+ if (cached)
|
|
|
+ mtype = MT_MEMORY;
|
|
|
+ else
|
|
|
+ mtype = MT_MEMORY_NONCACHED;
|
|
|
+
|
|
|
+ return __arm_ioremap_caller(phys_addr, size, mtype,
|
|
|
+ __builtin_return_address(0));
|
|
|
+}
|
|
|
+
|
|
|
+void __iounmap(volatile void __iomem *io_addr)
|
|
|
+{
|
|
|
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
|
|
+ struct vm_struct *vm;
|
|
|
+
|
|
|
+ read_lock(&vmlist_lock);
|
|
|
+ for (vm = vmlist; vm; vm = vm->next) {
|
|
|
+ if (vm->addr > addr)
|
|
|
+ break;
|
|
|
+ if (!(vm->flags & VM_IOREMAP))
|
|
|
+ continue;
|
|
|
+ /* If this is a static mapping we must leave it alone */
|
|
|
+ if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
|
|
|
+ (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
|
|
|
+ read_unlock(&vmlist_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
|
|
+ /*
|
|
|
+ * If this is a section based mapping we need to handle it
|
|
|
+ * specially as the VM subsystem does not know how to handle
|
|
|
+ * such a beast.
|
|
|
+ */
|
|
|
+ if ((vm->addr == addr) &&
|
|
|
+ (vm->flags & VM_ARM_SECTION_MAPPING)) {
|
|
|
+ unmap_area_sections((unsigned long)vm->addr, vm->size);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ }
|
|
|
+ read_unlock(&vmlist_lock);
|
|
|
+
|
|
|
+ vunmap(addr);
|
|
|
+}
|
|
|
+
|
|
|
+void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
|
|
|
+
|
|
|
+void __arm_iounmap(volatile void __iomem *io_addr)
|
|
|
+{
|
|
|
+ arch_iounmap(io_addr);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(__arm_iounmap);
|
|
|
+
|
|
|
+#ifdef CONFIG_PCI
|
|
|
+int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
|
|
|
+{
|
|
|
+ BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
|
|
|
+
|
|
|
+ return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
|
|
|
+ PCI_IO_VIRT_BASE + offset + SZ_64K,
|
|
|
+ phys_addr,
|
|
|
+ __pgprot(get_mem_type(MT_DEVICE)->prot_pte));
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(pci_ioremap_io);
|
|
|
+#endif
|