|
@@ -491,3 +491,104 @@ static void __init build_mem_type_table(void)
|
|
|
pteval_t v = pgprot_val(protection_map[i]);
|
|
|
protection_map[i] = __pgprot(v | user_pgprot);
|
|
|
}
|
|
|
+
|
|
|
+ mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
|
|
|
+ mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
|
|
|
+
|
|
|
+ pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
|
|
|
+ pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
|
|
|
+ L_PTE_DIRTY | kern_pgprot);
|
|
|
+
|
|
|
+ mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
|
|
|
+ mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
|
|
|
+ mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
|
|
|
+ mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
|
|
|
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
|
|
|
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
|
|
|
+ mem_types[MT_ROM].prot_sect |= cp->pmd;
|
|
|
+
|
|
|
+ switch (cp->pmd) {
|
|
|
+ case PMD_SECT_WT:
|
|
|
+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
|
|
|
+ break;
|
|
|
+ case PMD_SECT_WB:
|
|
|
+ case PMD_SECT_WBWA:
|
|
|
+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ printk("Memory policy: ECC %sabled, Data cache %s\n",
|
|
|
+ ecc_mask ? "en" : "dis", cp->policy);
|
|
|
+
|
|
|
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
|
|
|
+ struct mem_type *t = &mem_types[i];
|
|
|
+ if (t->prot_l1)
|
|
|
+ t->prot_l1 |= PMD_DOMAIN(t->domain);
|
|
|
+ if (t->prot_sect)
|
|
|
+ t->prot_sect |= PMD_DOMAIN(t->domain);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
|
|
|
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
|
+ unsigned long size, pgprot_t vma_prot)
|
|
|
+{
|
|
|
+ if (!pfn_valid(pfn))
|
|
|
+ return pgprot_noncached(vma_prot);
|
|
|
+ else if (file->f_flags & O_SYNC)
|
|
|
+ return pgprot_writecombine(vma_prot);
|
|
|
+ return vma_prot;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(phys_mem_access_prot);
|
|
|
+#endif
|
|
|
+
|
|
|
+#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
|
|
|
+
|
|
|
+static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
|
|
|
+{
|
|
|
+ void *ptr = __va(memblock_alloc(sz, align));
|
|
|
+ memset(ptr, 0, sz);
|
|
|
+ return ptr;
|
|
|
+}
|
|
|
+
|
|
|
+static void __init *early_alloc(unsigned long sz)
|
|
|
+{
|
|
|
+ return early_alloc_aligned(sz, sz);
|
|
|
+}
|
|
|
+
|
|
|
+static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
|
|
|
+{
|
|
|
+ if (pmd_none(*pmd)) {
|
|
|
+ pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
|
|
|
+ __pmd_populate(pmd, __pa(pte), prot);
|
|
|
+ }
|
|
|
+ BUG_ON(pmd_bad(*pmd));
|
|
|
+ return pte_offset_kernel(pmd, addr);
|
|
|
+}
|
|
|
+
|
|
|
+static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|
|
+ unsigned long end, unsigned long pfn,
|
|
|
+ const struct mem_type *type)
|
|
|
+{
|
|
|
+ pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
|
|
|
+ do {
|
|
|
+ set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
|
|
|
+ pfn++;
|
|
|
+ } while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
+}
|
|
|
+
|
|
|
+static void __init alloc_init_section(pud_t *pud, unsigned long addr,
|
|
|
+ unsigned long end, phys_addr_t phys,
|
|
|
+ const struct mem_type *type)
|
|
|
+{
|
|
|
+ pmd_t *pmd = pmd_offset(pud, addr);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Try a section mapping - end, addr and phys must all be aligned
|
|
|
+ * to a section boundary. Note that PMDs refer to the individual
|
|
|
+ * L1 entries, whereas PGDs refer to a group of L1 entries making
|
|
|
+ * up one logical pointer to an L2 table.
|
|
|
+ */
|
|
|
+ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
|
|
|
+ pmd_t *p = pmd;
|
|
|
+
|
|
|
+#ifndef CONFIG_ARM_LPAE
|