dataSynchronizationMemory.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. #ifndef __ALPHA_IO_H
  2. #define __ALPHA_IO_H
  3. #ifdef __KERNEL__
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <asm/compiler.h>
  7. #include <asm/pgtable.h>
  8. #include <asm/machvec.h>
  9. #include <asm/hwrpb.h>
  10. /* The generic header contains only prototypes. Including it ensures that
  11. the implementation we have here matches that interface. */
  12. #include <asm-generic/iomap.h>
  13. /* We don't use IO slowdowns on the Alpha, but.. */
  14. #define __SLOW_DOWN_IO do { } while (0)
  15. #define SLOW_DOWN_IO do { } while (0)
  16. /*
  17. * Virtual -> physical identity mapping starts at this offset
  18. */
  19. #ifdef USE_48_BIT_KSEG
  20. #define IDENT_ADDR 0xffff800000000000UL
  21. #else
  22. #define IDENT_ADDR 0xfffffc0000000000UL
  23. #endif
  24. /*
  25. * We try to avoid hae updates (thus the cache), but when we
  26. * do need to update the hae, we need to do it atomically, so
  27. * that any interrupts wouldn't get confused with the hae
  28. * register not being up-to-date with respect to the hardware
  29. * value.
  30. */
  31. extern inline void __set_hae(unsigned long new_hae)
  32. {
  33. unsigned long flags = swpipl(IPL_MAX);
  34. barrier();
  35. alpha_mv.hae_cache = new_hae;
  36. *alpha_mv.hae_register = new_hae;
  37. mb();
  38. /* Re-read to make sure it was written. */
  39. new_hae = *alpha_mv.hae_register;
  40. setipl(flags);
  41. barrier();
  42. }
  43. extern inline void set_hae(unsigned long new_hae)
  44. {
  45. if (new_hae != alpha_mv.hae_cache)
  46. __set_hae(new_hae);
  47. }
  48. /*
  49. * Change virtual addresses to physical addresses and vv.
  50. */
  51. #ifdef USE_48_BIT_KSEG
  52. static inline unsigned long virt_to_phys(void *address)
  53. {
  54. return (unsigned long)address - IDENT_ADDR;
  55. }
  56. static inline void * phys_to_virt(unsigned long address)
  57. {
  58. return (void *) (address + IDENT_ADDR);
  59. }
  60. #else
  61. static inline unsigned long virt_to_phys(void *address)
  62. {
  63. unsigned long phys = (unsigned long)address;
  64. /* Sign-extend from bit 41. */
  65. phys <<= (64 - 41);
  66. phys = (long)phys >> (64 - 41);
  67. /* Crop to the physical address width of the processor. */
  68. phys &= (1ul << hwrpb->pa_bits) - 1;
  69. return phys;
  70. }
  71. static inline void * phys_to_virt(unsigned long address)
  72. {
  73. return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
  74. }
  75. #endif
  76. #define page_to_phys(page) page_to_pa(page)
  77. static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
  78. {
  79. return page_to_phys(page);
  80. }
  81. /* Maximum PIO space address supported? */
  82. #define IO_SPACE_LIMIT 0xffff
  83. /*
  84. * Change addresses as seen by the kernel (virtual) to addresses as
  85. * seen by a device (bus), and vice versa.
  86. *
  87. * Note that this only works for a limited range of kernel addresses,
  88. * and very well may not span all memory. Consider this interface
  89. * deprecated in favour of the DMA-mapping API.
  90. */
  91. extern unsigned long __direct_map_base;
  92. extern unsigned long __direct_map_size;
  93. static inline unsigned long __deprecated virt_to_bus(void *address)
  94. {
  95. unsigned long phys = virt_to_phys(address);
  96. unsigned long bus = phys + __direct_map_base;
  97. return phys <= __direct_map_size ? bus : 0;
  98. }
  99. #define isa_virt_to_bus virt_to_bus
  100. static inline void * __deprecated bus_to_virt(unsigned long address)
  101. {
  102. void *virt;
  103. /* This check is a sanity check but also ensures that bus address 0
  104. maps to virtual address 0 which is useful to detect null pointers
  105. (the NCR driver is much simpler if NULL pointers are preserved). */
  106. address -= __direct_map_base;
  107. virt = phys_to_virt(address);
  108. return (long)address <= 0 ? NULL : virt;
  109. }
  110. #define isa_bus_to_virt bus_to_virt
  111. /*
  112. * There are different chipsets to interface the Alpha CPUs to the world.
  113. */
  114. #define IO_CONCAT(a,b) _IO_CONCAT(a,b)
  115. #define _IO_CONCAT(a,b) a ## _ ## b
  116. #ifdef CONFIG_ALPHA_GENERIC
  117. /* In a generic kernel, we always go through the machine vector. */
  118. #define REMAP1(TYPE, NAME, QUAL) \
  119. static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
  120. { \
  121. return alpha_mv.mv_##NAME(addr); \
  122. }
  123. #define REMAP2(TYPE, NAME, QUAL) \
  124. static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
  125. { \
  126. alpha_mv.mv_##NAME(b, addr); \
  127. }
  128. REMAP1(unsigned int, ioread8, /**/)
  129. REMAP1(unsigned int, ioread16, /**/)
  130. REMAP1(unsigned int, ioread32, /**/)
  131. REMAP1(u8, readb, const volatile)
  132. REMAP1(u16, readw, const volatile)
  133. REMAP1(u32, readl, const volatile)
  134. REMAP1(u64, readq, const volatile)
  135. REMAP2(u8, iowrite8, /**/)
  136. REMAP2(u16, iowrite16, /**/)
  137. REMAP2(u32, iowrite32, /**/)
  138. REMAP2(u8, writeb, volatile)
  139. REMAP2(u16, writew, volatile)
  140. REMAP2(u32, writel, volatile)
  141. REMAP2(u64, writeq, volatile)
  142. #undef REMAP1
  143. #undef REMAP2
  144. extern inline void __iomem *generic_ioportmap(unsigned long a)
  145. {
  146. return alpha_mv.mv_ioportmap(a);
  147. }
  148. static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
  149. {
  150. return alpha_mv.mv_ioremap(a, s);
  151. }
  152. static inline void generic_iounmap(volatile void __iomem *a)