Bläddra i källkod

efHeterogeneousSynchronization dataSynchronizationMemory.h 沈瑞清 commit at 2020-09-11

沈瑞清 4 år sedan
förälder
incheckning
efcdf083f3

+ 175 - 0
efHeterogeneousSynchronization/databaseOperation/dataSynchronizationMemory.h

@@ -10,3 +10,178 @@
 #include <asm/machvec.h>
 #include <asm/hwrpb.h>
 
+/* The generic header contains only prototypes.  Including it ensures that
+   the implementation we have here matches that interface.  */
+#include <asm-generic/iomap.h>
+
+/* We don't use IO slowdowns on the Alpha, but.. */
+#define __SLOW_DOWN_IO	do { } while (0)
+#define SLOW_DOWN_IO	do { } while (0)
+
+/*
+ * Virtual -> physical identity mapping starts at this offset
+ */
+#ifdef USE_48_BIT_KSEG
+#define IDENT_ADDR     0xffff800000000000UL
+#else
+#define IDENT_ADDR     0xfffffc0000000000UL
+#endif
+
+/*
+ * We try to avoid hae updates (thus the cache), but when we
+ * do need to update the hae, we need to do it atomically, so
+ * that any interrupts wouldn't get confused with the hae
+ * register not being up-to-date with respect to the hardware
+ * value.
+ */
+extern inline void __set_hae(unsigned long new_hae)
+{
+	unsigned long flags = swpipl(IPL_MAX);
+
+	barrier();
+
+	alpha_mv.hae_cache = new_hae;
+	*alpha_mv.hae_register = new_hae;
+	mb();
+	/* Re-read to make sure it was written.  */
+	new_hae = *alpha_mv.hae_register;
+
+	setipl(flags);
+	barrier();
+}
+
+extern inline void set_hae(unsigned long new_hae)
+{
+	if (new_hae != alpha_mv.hae_cache)
+		__set_hae(new_hae);
+}
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ */
+#ifdef USE_48_BIT_KSEG
+static inline unsigned long virt_to_phys(void *address)
+{
+	return (unsigned long)address - IDENT_ADDR;
+}
+
+static inline void * phys_to_virt(unsigned long address)
+{
+	return (void *) (address + IDENT_ADDR);
+}
+#else
+static inline unsigned long virt_to_phys(void *address)
+{
+        unsigned long phys = (unsigned long)address;
+
+	/* Sign-extend from bit 41.  */
+	phys <<= (64 - 41);
+	phys = (long)phys >> (64 - 41);
+
+	/* Crop to the physical address width of the processor.  */
+        phys &= (1ul << hwrpb->pa_bits) - 1;
+
+        return phys;
+}
+
+static inline void * phys_to_virt(unsigned long address)
+{
+        return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
+}
+#endif
+
+#define page_to_phys(page)	page_to_pa(page)
+
+static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
+{
+	return page_to_phys(page);
+}
+
+/* Maximum PIO space address supported?  */
+#define IO_SPACE_LIMIT 0xffff
+
+/*
+ * Change addresses as seen by the kernel (virtual) to addresses as
+ * seen by a device (bus), and vice versa.
+ *
+ * Note that this only works for a limited range of kernel addresses,
+ * and very well may not span all memory.  Consider this interface 
+ * deprecated in favour of the DMA-mapping API.
+ */
+extern unsigned long __direct_map_base;
+extern unsigned long __direct_map_size;
+
+static inline unsigned long __deprecated virt_to_bus(void *address)
+{
+	unsigned long phys = virt_to_phys(address);
+	unsigned long bus = phys + __direct_map_base;
+	return phys <= __direct_map_size ? bus : 0;
+}
+#define isa_virt_to_bus virt_to_bus
+
+static inline void * __deprecated bus_to_virt(unsigned long address)
+{
+	void *virt;
+
+	/* This check is a sanity check but also ensures that bus address 0
+	   maps to virtual address 0 which is useful to detect null pointers
+	   (the NCR driver is much simpler if NULL pointers are preserved).  */
+	address -= __direct_map_base;
+	virt = phys_to_virt(address);
+	return (long)address <= 0 ? NULL : virt;
+}
+#define isa_bus_to_virt bus_to_virt
+
+/*
+ * There are different chipsets to interface the Alpha CPUs to the world.
+ */
+
+#define IO_CONCAT(a,b)	_IO_CONCAT(a,b)
+#define _IO_CONCAT(a,b)	a ## _ ## b
+
+#ifdef CONFIG_ALPHA_GENERIC
+
+/* In a generic kernel, we always go through the machine vector.  */
+
+#define REMAP1(TYPE, NAME, QUAL)					\
+static inline TYPE generic_##NAME(QUAL void __iomem *addr)		\
+{									\
+	return alpha_mv.mv_##NAME(addr);				\
+}
+
+#define REMAP2(TYPE, NAME, QUAL)					\
+static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)	\
+{									\
+	alpha_mv.mv_##NAME(b, addr);					\
+}
+
+REMAP1(unsigned int, ioread8, /**/)
+REMAP1(unsigned int, ioread16, /**/)
+REMAP1(unsigned int, ioread32, /**/)
+REMAP1(u8, readb, const volatile)
+REMAP1(u16, readw, const volatile)
+REMAP1(u32, readl, const volatile)
+REMAP1(u64, readq, const volatile)
+
+REMAP2(u8, iowrite8, /**/)
+REMAP2(u16, iowrite16, /**/)
+REMAP2(u32, iowrite32, /**/)
+REMAP2(u8, writeb, volatile)
+REMAP2(u16, writew, volatile)
+REMAP2(u32, writel, volatile)
+REMAP2(u64, writeq, volatile)
+
+#undef REMAP1
+#undef REMAP2
+
+extern inline void __iomem *generic_ioportmap(unsigned long a)
+{
+	return alpha_mv.mv_ioportmap(a);
+}
+
+static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
+{
+	return alpha_mv.mv_ioremap(a, s);
+}
+
+static inline void generic_iounmap(volatile void __iomem *a)