123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156 |
- /*
- * linux/arch/arm/mm/ioremap.c
- *
- * Re-map IO memory to kernel address space so that we can access it.
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- *
- * Hacked for ARM by Phil Blundell <philb@gnu.org>
- * Hacked to allow all architectures to build, and various cleanups
- * by Russell King
- *
- * This allows a driver to remap an arbitrary region of bus memory into
- * virtual space. One should *only* use readl, writel, memcpy_toio and
- * so on with such remapped areas.
- *
- * Because the ARM only has a 32-bit address space we can't address the
- * whole of the (physical) PCI space at once. PCI huge-mode addressing
- * allows us to circumvent this restriction by splitting PCI space into
- * two 2GB chunks and mapping only one at a time into processor memory.
- * We use MMU protection domains to trap any attempt to access the bank
- * that is not currently mapped. (This isn't fully implemented yet.)
- */
- #include <linux/module.h>
- #include <linux/errno.h>
- #include <linux/mm.h>
- #include <linux/vmalloc.h>
- #include <linux/io.h>
- #include <linux/sizes.h>
- #include <asm/cp15.h>
- #include <asm/cputype.h>
- #include <asm/cacheflush.h>
- #include <asm/mmu_context.h>
- #include <asm/pgalloc.h>
- #include <asm/tlbflush.h>
- #include <asm/system_info.h>
- #include <asm/mach/map.h>
- #include <asm/mach/pci.h>
- #include "mm.h"
- int ioremap_page(unsigned long virt, unsigned long phys,
- const struct mem_type *mtype)
- {
- return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
- __pgprot(mtype->prot_pte));
- }
- EXPORT_SYMBOL(ioremap_page);
- void __check_vmalloc_seq(struct mm_struct *mm)
- {
- unsigned int seq;
- do {
- seq = init_mm.context.vmalloc_seq;
- memcpy(pgd_offset(mm, VMALLOC_START),
- pgd_offset_k(VMALLOC_START),
- sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
- pgd_index(VMALLOC_START)));
- mm->context.vmalloc_seq = seq;
- } while (seq != init_mm.context.vmalloc_seq);
- }
- #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
- /*
- * Section support is unsafe on SMP - If you iounmap and ioremap a region,
- * the other CPUs will not see this change until their next context switch.
- * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
- * which requires the new ioremap'd region to be referenced, the CPU will
- * reference the _old_ region.
- *
- * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
- * mask the size back to 1MB aligned or we will overflow in the loop below.
- */
- static void unmap_area_sections(unsigned long virt, unsigned long size)
- {
- unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmdp;
- flush_cache_vunmap(addr, end);
- pgd = pgd_offset_k(addr);
- pud = pud_offset(pgd, addr);
- pmdp = pmd_offset(pud, addr);
- do {
- pmd_t pmd = *pmdp;
- if (!pmd_none(pmd)) {
- /*
- * Clear the PMD from the page table, and
- * increment the vmalloc sequence so others
- * notice this change.
- *
- * Note: this is still racy on SMP machines.
- */
- pmd_clear(pmdp);
- init_mm.context.vmalloc_seq++;
- /*
- * Free the page table, if there was one.
- */
- if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
- pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
- }
- addr += PMD_SIZE;
- pmdp += 2;
- } while (addr < end);
- /*
- * Ensure that the active_mm is up to date - we want to
- * catch any use-after-iounmap cases.
- */
- if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
- __check_vmalloc_seq(current->active_mm);
- flush_tlb_kernel_range(virt, end);
- }
- static int
- remap_area_sections(unsigned long virt, unsigned long pfn,
- size_t size, const struct mem_type *type)
- {
- unsigned long addr = virt, end = virt + size;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- /*
- * Remove and free any PTE-based mapping, and
- * sync the current kernel mapping.
- */
- unmap_area_sections(virt, size);
- pgd = pgd_offset_k(addr);
- pud = pud_offset(pgd, addr);
- pmd = pmd_offset(pud, addr);
- do {
- pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
- pfn += SZ_1M >> PAGE_SHIFT;
- pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
- pfn += SZ_1M >> PAGE_SHIFT;
- flush_pmd_entry(pmd);
- addr += PMD_SIZE;
- pmd += 2;
- } while (addr < end);
- return 0;
- }
- static int
- remap_area_supersections(unsigned long virt, unsigned long pfn,
- size_t size, const struct mem_type *type)
- {
|