| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322 | 
							- /*
 
-  *  linux/arch/arm/mm/ioremap.c
 
-  *
 
-  * Re-map IO memory to kernel address space so that we can access it.
 
-  *
 
-  * (C) Copyright 1995 1996 Linus Torvalds
 
-  *
 
-  * Hacked for ARM by Phil Blundell <philb@gnu.org>
 
-  * Hacked to allow all architectures to build, and various cleanups
 
-  * by Russell King
 
-  *
 
-  * This allows a driver to remap an arbitrary region of bus memory into
 
-  * virtual space.  One should *only* use readl, writel, memcpy_toio and
 
-  * so on with such remapped areas.
 
-  *
 
-  * Because the ARM only has a 32-bit address space we can't address the
 
-  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
 
-  * allows us to circumvent this restriction by splitting PCI space into
 
-  * two 2GB chunks and mapping only one at a time into processor memory.
 
-  * We use MMU protection domains to trap any attempt to access the bank
 
-  * that is not currently mapped.  (This isn't fully implemented yet.)
 
-  */
 
- #include <linux/module.h>
 
- #include <linux/errno.h>
 
- #include <linux/mm.h>
 
- #include <linux/vmalloc.h>
 
- #include <linux/io.h>
 
- #include <linux/sizes.h>
 
- #include <asm/cp15.h>
 
- #include <asm/cputype.h>
 
- #include <asm/cacheflush.h>
 
- #include <asm/mmu_context.h>
 
- #include <asm/pgalloc.h>
 
- #include <asm/tlbflush.h>
 
- #include <asm/system_info.h>
 
- #include <asm/mach/map.h>
 
- #include <asm/mach/pci.h>
 
- #include "mm.h"
 
- int ioremap_page(unsigned long virt, unsigned long phys,
 
- 		 const struct mem_type *mtype)
 
- {
 
- 	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
 
- 				  __pgprot(mtype->prot_pte));
 
- }
 
- EXPORT_SYMBOL(ioremap_page);
 
- void __check_vmalloc_seq(struct mm_struct *mm)
 
- {
 
- 	unsigned int seq;
 
- 	do {
 
- 		seq = init_mm.context.vmalloc_seq;
 
- 		memcpy(pgd_offset(mm, VMALLOC_START),
 
- 		       pgd_offset_k(VMALLOC_START),
 
- 		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
 
- 					pgd_index(VMALLOC_START)));
 
- 		mm->context.vmalloc_seq = seq;
 
- 	} while (seq != init_mm.context.vmalloc_seq);
 
- }
 
- #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 
- /*
 
-  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 
-  * the other CPUs will not see this change until their next context switch.
 
-  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 
-  * which requires the new ioremap'd region to be referenced, the CPU will
 
-  * reference the _old_ region.
 
-  *
 
-  * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 
-  * mask the size back to 1MB aligned or we will overflow in the loop below.
 
-  */
 
- static void unmap_area_sections(unsigned long virt, unsigned long size)
 
- {
 
- 	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
 
- 	pgd_t *pgd;
 
- 	pud_t *pud;
 
- 	pmd_t *pmdp;
 
- 	flush_cache_vunmap(addr, end);
 
- 	pgd = pgd_offset_k(addr);
 
- 	pud = pud_offset(pgd, addr);
 
- 	pmdp = pmd_offset(pud, addr);
 
- 	do {
 
- 		pmd_t pmd = *pmdp;
 
- 		if (!pmd_none(pmd)) {
 
- 			/*
 
- 			 * Clear the PMD from the page table, and
 
- 			 * increment the vmalloc sequence so others
 
- 			 * notice this change.
 
- 			 *
 
- 			 * Note: this is still racy on SMP machines.
 
- 			 */
 
- 			pmd_clear(pmdp);
 
- 			init_mm.context.vmalloc_seq++;
 
- 			/*
 
- 			 * Free the page table, if there was one.
 
- 			 */
 
- 			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
 
- 				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
 
- 		}
 
- 		addr += PMD_SIZE;
 
- 		pmdp += 2;
 
- 	} while (addr < end);
 
- 	/*
 
- 	 * Ensure that the active_mm is up to date - we want to
 
- 	 * catch any use-after-iounmap cases.
 
- 	 */
 
- 	if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
 
- 		__check_vmalloc_seq(current->active_mm);
 
- 	flush_tlb_kernel_range(virt, end);
 
- }
 
- static int
 
- remap_area_sections(unsigned long virt, unsigned long pfn,
 
- 		    size_t size, const struct mem_type *type)
 
- {
 
- 	unsigned long addr = virt, end = virt + size;
 
- 	pgd_t *pgd;
 
- 	pud_t *pud;
 
- 	pmd_t *pmd;
 
- 	/*
 
- 	 * Remove and free any PTE-based mapping, and
 
- 	 * sync the current kernel mapping.
 
- 	 */
 
- 	unmap_area_sections(virt, size);
 
- 	pgd = pgd_offset_k(addr);
 
- 	pud = pud_offset(pgd, addr);
 
- 	pmd = pmd_offset(pud, addr);
 
- 	do {
 
- 		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 
- 		pfn += SZ_1M >> PAGE_SHIFT;
 
- 		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 
- 		pfn += SZ_1M >> PAGE_SHIFT;
 
- 		flush_pmd_entry(pmd);
 
- 		addr += PMD_SIZE;
 
- 		pmd += 2;
 
- 	} while (addr < end);
 
- 	return 0;
 
- }
 
- static int
 
- remap_area_supersections(unsigned long virt, unsigned long pfn,
 
- 			 size_t size, const struct mem_type *type)
 
- {
 
- 	unsigned long addr = virt, end = virt + size;
 
- 	pgd_t *pgd;
 
- 	pud_t *pud;
 
- 	pmd_t *pmd;
 
- 	/*
 
- 	 * Remove and free any PTE-based mapping, and
 
- 	 * sync the current kernel mapping.
 
- 	 */
 
- 	unmap_area_sections(virt, size);
 
- 	pgd = pgd_offset_k(virt);
 
- 	pud = pud_offset(pgd, addr);
 
- 	pmd = pmd_offset(pud, addr);
 
- 	do {
 
- 		unsigned long super_pmd_val, i;
 
- 		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
 
- 				PMD_SECT_SUPER;
 
- 		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
 
- 		for (i = 0; i < 8; i++) {
 
- 			pmd[0] = __pmd(super_pmd_val);
 
- 			pmd[1] = __pmd(super_pmd_val);
 
- 			flush_pmd_entry(pmd);
 
- 			addr += PMD_SIZE;
 
- 			pmd += 2;
 
- 		}
 
- 		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
 
- 	} while (addr < end);
 
- 	return 0;
 
- }
 
- #endif
 
- void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
 
- 	unsigned long offset, size_t size, unsigned int mtype, void *caller)
 
- {
 
- 	const struct mem_type *type;
 
- 	int err;
 
- 	unsigned long addr;
 
-  	struct vm_struct * area;
 
- #ifndef CONFIG_ARM_LPAE
 
- 	/*
 
- 	 * High mappings must be supersection aligned
 
- 	 */
 
- 	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
 
- 		return NULL;
 
- #endif
 
- 	type = get_mem_type(mtype);
 
- 	if (!type)
 
- 		return NULL;
 
- 	/*
 
- 	 * Page align the mapping size, taking account of any offset.
 
- 	 */
 
- 	size = PAGE_ALIGN(offset + size);
 
- 	/*
 
- 	 * Try to reuse one of the static mapping whenever possible.
 
- 	 */
 
- 	read_lock(&vmlist_lock);
 
- 	for (area = vmlist; area; area = area->next) {
 
- 		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
 
- 			break;
 
- 		if (!(area->flags & VM_ARM_STATIC_MAPPING))
 
- 			continue;
 
- 		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
 
- 			continue;
 
- 		if (__phys_to_pfn(area->phys_addr) > pfn ||
 
- 		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
 
- 			continue;
 
- 		/* we can drop the lock here as we know *area is static */
 
- 		read_unlock(&vmlist_lock);
 
- 		addr = (unsigned long)area->addr;
 
- 		addr += __pfn_to_phys(pfn) - area->phys_addr;
 
- 		return (void __iomem *) (offset + addr);
 
- 	}
 
- 	read_unlock(&vmlist_lock);
 
- 	/*
 
- 	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
 
- 	 */
 
- 	if (WARN_ON(pfn_valid(pfn)))
 
- 		return NULL;
 
- 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 
-  	if (!area)
 
-  		return NULL;
 
-  	addr = (unsigned long)area->addr;
 
- 	area->phys_addr = __pfn_to_phys(pfn);
 
- #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 
- 	if (DOMAIN_IO == 0 &&
 
- 	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
 
- 	       cpu_is_xsc3()) && pfn >= 0x100000 &&
 
- 	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
 
- 		area->flags |= VM_ARM_SECTION_MAPPING;
 
- 		err = remap_area_supersections(addr, pfn, size, type);
 
- 	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
 
- 		area->flags |= VM_ARM_SECTION_MAPPING;
 
- 		err = remap_area_sections(addr, pfn, size, type);
 
- 	} else
 
- #endif
 
- 		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
 
- 					 __pgprot(type->prot_pte));
 
- 	if (err) {
 
-  		vunmap((void *)addr);
 
-  		return NULL;
 
-  	}
 
- 	flush_cache_vmap(addr, addr + size);
 
- 	return (void __iomem *) (offset + addr);
 
- }
 
- void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
 
- 	unsigned int mtype, void *caller)
 
- {
 
- 	unsigned long last_addr;
 
-  	unsigned long offset = phys_addr & ~PAGE_MASK;
 
-  	unsigned long pfn = __phys_to_pfn(phys_addr);
 
-  	/*
 
-  	 * Don't allow wraparound or zero size
 
- 	 */
 
- 	last_addr = phys_addr + size - 1;
 
- 	if (!size || last_addr < phys_addr)
 
- 		return NULL;
 
- 	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
 
- 			caller);
 
- }
 
- /*
 
-  * Remap an arbitrary physical address space into the kernel virtual
 
-  * address space. Needed when the kernel wants to access high addresses
 
-  * directly.
 
-  *
 
-  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 
-  * have to convert them into an offset in a page-aligned mapping, but the
 
-  * caller shouldn't need to know that small detail.
 
-  */
 
- void __iomem *
 
- __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
 
- 		  unsigned int mtype)
 
- {
 
- 	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
 
- 			__builtin_return_address(0));
 
- }
 
- EXPORT_SYMBOL(__arm_ioremap_pfn);
 
- void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
 
- 				      unsigned int, void *) =
 
- 	__arm_ioremap_caller;
 
- void __iomem *
 
- __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
 
- {
 
- 	return arch_ioremap_caller(phys_addr, size, mtype,
 
- 		__builtin_return_address(0));
 
- }
 
 
  |