|
@@ -330,3 +330,134 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
|
|
const char *type;
|
|
|
|
|
|
l2x0_base = base;
|
|
|
+ if (cache_id_part_number_from_dt)
|
|
|
+ cache_id = cache_id_part_number_from_dt;
|
|
|
+ else
|
|
|
+ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
|
|
|
+ & L2X0_CACHE_ID_PART_MASK;
|
|
|
+ aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
|
|
|
+
|
|
|
+ aux &= aux_mask;
|
|
|
+ aux |= aux_val;
|
|
|
+
|
|
|
+ /* Determine the number of ways */
|
|
|
+ switch (cache_id) {
|
|
|
+ case L2X0_CACHE_ID_PART_L310:
|
|
|
+ if (aux & (1 << 16))
|
|
|
+ ways = 16;
|
|
|
+ else
|
|
|
+ ways = 8;
|
|
|
+ type = "L310";
|
|
|
+#ifdef CONFIG_PL310_ERRATA_753970
|
|
|
+ /* Unmapped register. */
|
|
|
+ sync_reg_offset = L2X0_DUMMY_REG;
|
|
|
+#endif
|
|
|
+ if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
|
|
|
+ outer_cache.set_debug = pl310_set_debug;
|
|
|
+ break;
|
|
|
+ case L2X0_CACHE_ID_PART_L210:
|
|
|
+ ways = (aux >> 13) & 0xf;
|
|
|
+ type = "L210";
|
|
|
+ break;
|
|
|
+
|
|
|
+ case AURORA_CACHE_ID:
|
|
|
+ sync_reg_offset = AURORA_SYNC_REG;
|
|
|
+ ways = (aux >> 13) & 0xf;
|
|
|
+ ways = 2 << ((ways + 1) >> 2);
|
|
|
+ way_size_shift = AURORA_WAY_SIZE_SHIFT;
|
|
|
+ type = "Aurora";
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ /* Assume unknown chips have 8 ways */
|
|
|
+ ways = 8;
|
|
|
+ type = "L2x0 series";
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ l2x0_way_mask = (1 << ways) - 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * L2 cache Size = Way size * Number of ways
|
|
|
+ */
|
|
|
+ way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
|
|
|
+ way_size = 1 << (way_size + way_size_shift);
|
|
|
+
|
|
|
+ l2x0_size = ways * way_size * SZ_1K;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check if l2x0 controller is already enabled.
|
|
|
+ * If you are booting from non-secure mode
|
|
|
+ * accessing the below registers will fault.
|
|
|
+ */
|
|
|
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
|
|
|
+ /* Make sure that I&D is not locked down when starting */
|
|
|
+ l2x0_unlock(cache_id);
|
|
|
+
|
|
|
+ /* l2x0 controller is disabled */
|
|
|
+ writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
|
|
|
+
|
|
|
+ l2x0_inv_all();
|
|
|
+
|
|
|
+ /* enable L2X0 */
|
|
|
+ writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Re-read it in case some bits are reserved. */
|
|
|
+ aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
|
|
|
+
|
|
|
+ /* Save the value for resuming. */
|
|
|
+ l2x0_saved_regs.aux_ctrl = aux;
|
|
|
+
|
|
|
+ if (!of_init) {
|
|
|
+ outer_cache.inv_range = l2x0_inv_range;
|
|
|
+ outer_cache.clean_range = l2x0_clean_range;
|
|
|
+ outer_cache.flush_range = l2x0_flush_range;
|
|
|
+ outer_cache.sync = l2x0_cache_sync;
|
|
|
+ outer_cache.flush_all = l2x0_flush_all;
|
|
|
+ outer_cache.inv_all = l2x0_inv_all;
|
|
|
+ outer_cache.disable = l2x0_disable;
|
|
|
+ }
|
|
|
+
|
|
|
+ printk(KERN_INFO "%s cache controller enabled\n", type);
|
|
|
+ printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
|
|
|
+ ways, cache_id, aux, l2x0_size);
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_OF
|
|
|
+static int l2_wt_override;
|
|
|
+
|
|
|
+/*
|
|
|
+ * Note that the end addresses passed to Linux primitives are
|
|
|
+ * noninclusive, while the hardware cache range operations use
|
|
|
+ * inclusive start and end addresses.
|
|
|
+ */
|
|
|
+static unsigned long calc_range_end(unsigned long start, unsigned long end)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Limit the number of cache lines processed at once,
|
|
|
+ * since cache range operations stall the CPU pipeline
|
|
|
+ * until completion.
|
|
|
+ */
|
|
|
+ if (end > start + MAX_RANGE_SIZE)
|
|
|
+ end = start + MAX_RANGE_SIZE;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Cache range operations can't straddle a page boundary.
|
|
|
+ */
|
|
|
+ if (end > PAGE_ALIGN(start+1))
|
|
|
+ end = PAGE_ALIGN(start+1);
|
|
|
+
|
|
|
+ return end;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
|
|
|
+ * and range operations only do a TLB lookup on the start address.
|
|
|
+ */
|
|
|
+static void aurora_pa_range(unsigned long start, unsigned long end,
|
|
|
+ unsigned long offset)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
|
|
|
+ writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
|