Commit 548927e0 authored by Sai Prakash Ranjan's avatar Sai Prakash Ranjan Committed by Arnd Bergmann

arm64: io: Use asm-generic high level MMIO accessors

Remove custom arm64 MMIO accessors read{b,w,l,q} and their relaxed
versions in support to use asm-generic defined accessors. Also define
one set of IO barriers (ar/bw version) used by asm-generic code to
override the arm64 specific variants.
Suggested-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarSai Prakash Ranjan <quic_saipraka@quicinc.com>
Reviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parent f2c50921
......@@ -91,7 +91,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
}
/* IO barriers */
#define __iormb(v) \
#define __io_ar(v) \
({ \
unsigned long tmp; \
\
......@@ -108,39 +108,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
: "memory"); \
})
#define __io_par(v) __iormb(v)
#define __iowmb() dma_wmb()
#define __iomb() dma_mb()
/*
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses.
*/
#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
#define __io_bw() dma_wmb()
#define __io_br(v)
#define __io_aw(v)
#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access.
*/
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
/* arm64-specific, don't use in portable drivers */
#define __iormb(v) __io_ar(v)
#define __iowmb() __io_bw()
#define __iomb() dma_mb()
/*
* I/O port access primitives.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment