|
@@ -363,3 +363,131 @@ static void __init build_mem_type_table(void)
|
|
|
|
|
|
/*
|
|
|
* Mark the device areas according to the CPU/architecture.
|
|
|
+ */
|
|
|
+ if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
|
|
|
+ if (!cpu_is_xsc3()) {
|
|
|
+ /*
|
|
|
+ * Mark device regions on ARMv6+ as execute-never
|
|
|
+ * to prevent speculative instruction fetches.
|
|
|
+ */
|
|
|
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
|
|
|
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
|
|
|
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
|
|
|
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
|
|
|
+ }
|
|
|
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
|
|
|
+ /*
|
|
|
+ * For ARMv7 with TEX remapping,
|
|
|
+ * - shared device is SXCB=1100
|
|
|
+ * - nonshared device is SXCB=0100
|
|
|
+ * - write combine device mem is SXCB=0001
|
|
|
+ * (Uncached Normal memory)
|
|
|
+ */
|
|
|
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
|
|
|
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
|
|
|
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
|
|
|
+ } else if (cpu_is_xsc3()) {
|
|
|
+ /*
|
|
|
+ * For Xscale3,
|
|
|
+ * - shared device is TEXCB=00101
|
|
|
+ * - nonshared device is TEXCB=01000
|
|
|
+ * - write combine device mem is TEXCB=00100
|
|
|
+ * (Inner/Outer Uncacheable in xsc3 parlance)
|
|
|
+ */
|
|
|
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
|
|
|
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
|
|
|
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * For ARMv6 and ARMv7 without TEX remapping,
|
|
|
+ * - shared device is TEXCB=00001
|
|
|
+ * - nonshared device is TEXCB=01000
|
|
|
+ * - write combine device mem is TEXCB=00100
|
|
|
+ * (Uncached Normal in ARMv6 parlance).
|
|
|
+ */
|
|
|
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
|
|
|
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
|
|
|
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * On others, write combining is "Uncached/Buffered"
|
|
|
+ */
|
|
|
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Now deal with the memory-type mappings
|
|
|
+ */
|
|
|
+ cp = &cache_policies[cachepolicy];
|
|
|
+ vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * ARMv6 and above have extended page tables.
|
|
|
+ */
|
|
|
+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
|
|
|
+#ifndef CONFIG_ARM_LPAE
|
|
|
+ /*
|
|
|
+ * Mark cache clean areas and XIP ROM read only
|
|
|
+ * from SVC mode and no access from userspace.
|
|
|
+ */
|
|
|
+ mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
|
|
+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
|
|
+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (is_smp()) {
|
|
|
+ /*
|
|
|
+ * Mark memory with the "shared" attribute
|
|
|
+ * for SMP systems
|
|
|
+ */
|
|
|
+ user_pgprot |= L_PTE_SHARED;
|
|
|
+ kern_pgprot |= L_PTE_SHARED;
|
|
|
+ vecs_pgprot |= L_PTE_SHARED;
|
|
|
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
|
|
|
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
|
|
|
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
|
|
|
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
|
|
|
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
|
|
|
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
|
|
|
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
|
|
|
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
|
|
|
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Non-cacheable Normal - intended for memory areas that must
|
|
|
+ * not cause dirty cache line writebacks when used
|
|
|
+ */
|
|
|
+ if (cpu_arch >= CPU_ARCH_ARMv6) {
|
|
|
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
|
|
|
+ /* Non-cacheable Normal is XCB = 001 */
|
|
|
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |=
|
|
|
+ PMD_SECT_BUFFERED;
|
|
|
+ } else {
|
|
|
+ /* For both ARMv6 and non-TEX-remapping ARMv7 */
|
|
|
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |=
|
|
|
+ PMD_SECT_TEX(1);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef CONFIG_ARM_LPAE
|
|
|
+ /*
|
|
|
+ * Do not generate access flag faults for the kernel mappings.
|
|
|
+ */
|
|
|
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
|
|
|
+ mem_types[i].prot_pte |= PTE_EXT_AF;
|
|
|
+ if (mem_types[i].prot_sect)
|
|
|
+ mem_types[i].prot_sect |= PMD_SECT_AF;
|
|
|
+ }
|
|
|
+ kern_pgprot |= PTE_EXT_AF;
|
|
|
+ vecs_pgprot |= PTE_EXT_AF;
|
|
|
+#endif
|
|
|
+
|
|
|
+ for (i = 0; i < 16; i++) {
|
|
|
+ pteval_t v = pgprot_val(protection_map[i]);
|
|
|
+ protection_map[i] = __pgprot(v | user_pgprot);
|
|
|
+ }
|