|
@@ -64,3 +64,100 @@
|
|
* flush_user_range(start, end, flags)
|
|
* flush_user_range(start, end, flags)
|
|
*
|
|
*
|
|
* Clean and invalidate a range of cache entries in the
|
|
* Clean and invalidate a range of cache entries in the
|
|
|
|
+ * specified address space before a change of page tables.
|
|
|
|
+ * - start - user start address (inclusive, page aligned)
|
|
|
|
+ * - end - user end address (exclusive, page aligned)
|
|
|
|
+ * - flags - vma->vm_flags field
|
|
|
|
+ *
|
|
|
|
+ * coherent_kern_range(start, end)
|
|
|
|
+ *
|
|
|
|
+ * Ensure coherency between the Icache and the Dcache in the
|
|
|
|
+ * region described by start, end. If you have non-snooping
|
|
|
|
+ * Harvard caches, you need to implement this function.
|
|
|
|
+ * - start - virtual start address
|
|
|
|
+ * - end - virtual end address
|
|
|
|
+ *
|
|
|
|
+ * coherent_user_range(start, end)
|
|
|
|
+ *
|
|
|
|
+ * Ensure coherency between the Icache and the Dcache in the
|
|
|
|
+ * region described by start, end. If you have non-snooping
|
|
|
|
+ * Harvard caches, you need to implement this function.
|
|
|
|
+ * - start - virtual start address
|
|
|
|
+ * - end - virtual end address
|
|
|
|
+ *
|
|
|
|
+ * flush_kern_dcache_area(kaddr, size)
|
|
|
|
+ *
|
|
|
|
+ * Ensure that the data held in page is written back.
|
|
|
|
+ * - kaddr - page address
|
|
|
|
+ * - size - region size
|
|
|
|
+ *
|
|
|
|
+ * DMA Cache Coherency
|
|
|
|
+ * ===================
|
|
|
|
+ *
|
|
|
|
+ * dma_flush_range(start, end)
|
|
|
|
+ *
|
|
|
|
+ * Clean and invalidate the specified virtual address range.
|
|
|
|
+ * - start - virtual start address
|
|
|
|
+ * - end - virtual end address
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+struct cpu_cache_fns {
|
|
|
|
+ void (*flush_icache_all)(void);
|
|
|
|
+ void (*flush_kern_all)(void);
|
|
|
|
+ void (*flush_kern_louis)(void);
|
|
|
|
+ void (*flush_user_all)(void);
|
|
|
|
+ void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
|
|
|
|
+
|
|
|
|
+ void (*coherent_kern_range)(unsigned long, unsigned long);
|
|
|
|
+ int (*coherent_user_range)(unsigned long, unsigned long);
|
|
|
|
+ void (*flush_kern_dcache_area)(void *, size_t);
|
|
|
|
+
|
|
|
|
+ void (*dma_map_area)(const void *, size_t, int);
|
|
|
|
+ void (*dma_unmap_area)(const void *, size_t, int);
|
|
|
|
+
|
|
|
|
+ void (*dma_flush_range)(const void *, const void *);
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Select the calling method
|
|
|
|
+ */
|
|
|
|
+#ifdef MULTI_CACHE
|
|
|
|
+
|
|
|
|
+extern struct cpu_cache_fns cpu_cache;
|
|
|
|
+
|
|
|
|
+#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
|
|
|
|
+#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
|
|
|
|
+#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
|
|
|
|
+#define __cpuc_flush_user_all cpu_cache.flush_user_all
|
|
|
|
+#define __cpuc_flush_user_range cpu_cache.flush_user_range
|
|
|
|
+#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
|
|
|
|
+#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
|
|
|
|
+#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * These are private to the dma-mapping API. Do not use directly.
|
|
|
|
+ * Their sole purpose is to ensure that data held in the cache
|
|
|
|
+ * is visible to DMA, or data written by DMA to system memory is
|
|
|
|
+ * visible to the CPU.
|
|
|
|
+ */
|
|
|
|
+#define dmac_map_area cpu_cache.dma_map_area
|
|
|
|
+#define dmac_unmap_area cpu_cache.dma_unmap_area
|
|
|
|
+#define dmac_flush_range cpu_cache.dma_flush_range
|
|
|
|
+
|
|
|
|
+#else
|
|
|
|
+
|
|
|
|
+extern void __cpuc_flush_icache_all(void);
|
|
|
|
+extern void __cpuc_flush_kern_all(void);
|
|
|
|
+extern void __cpuc_flush_kern_louis(void);
|
|
|
|
+extern void __cpuc_flush_user_all(void);
|
|
|
|
+extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
|
|
|
|
+extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
|
|
|
|
+extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
|
|
|
|
+extern void __cpuc_flush_dcache_area(void *, size_t);
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * These are private to the dma-mapping API. Do not use directly.
|
|
|
|
+ * Their sole purpose is to ensure that data held in the cache
|
|
|
|
+ * is visible to DMA, or data written by DMA to system memory is
|
|
|
|
+ * visible to the CPU.
|
|
|
|
+ */
|