2828#include <asm/byteorder.h>
2929#include <asm/memory.h>
3030#include <asm-generic/pci_iomap.h>
31- #include <linux/msm_rtb.h>
3231#include <xen/xen.h>
3332
3433/*
@@ -62,24 +61,23 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
6261 * the bus. Rather than special-case the machine, just let the compiler
6362 * generate the access for CPUs prior to ARMv6.
6463 */
65- #define __raw_readw_no_log (a ) (__chk_io_ptr(a), \
66- *(volatile unsigned short __force *)(a))
67- #define __raw_writew_no_log (v , a ) ((void)(__chk_io_ptr(a), \
68- *(volatile unsigned short __force *)\
69- (a) = (v)))
64+ #define __raw_readw (a ) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
65+ #define __raw_writew (v ,a ) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
7066#else
7167/*
7268 * When running under a hypervisor, we want to avoid I/O accesses with
7369 * writeback addressing modes as these incur a significant performance
7470 * overhead (the address generation must be emulated in software).
7571 */
76- static inline void __raw_writew_no_log (u16 val , volatile void __iomem * addr )
72+ #define __raw_writew __raw_writew
73+ static inline void __raw_writew (u16 val , volatile void __iomem * addr )
7774{
7875 asm volatile ("strh %1, %0"
7976 : : "Q" (* (volatile u16 __force * )addr ), "r" (val ));
8077}
8178
82- static inline u16 __raw_readw_no_log (const volatile void __iomem * addr )
79+ #define __raw_readw __raw_readw
80+ static inline u16 __raw_readw (const volatile void __iomem * addr )
8381{
8482 u16 val ;
8583 asm volatile ("ldrh %0, %1"
@@ -89,19 +87,22 @@ static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
8987}
9088#endif
9189
92- static inline void __raw_writeb_no_log (u8 val , volatile void __iomem * addr )
90+ #define __raw_writeb __raw_writeb
91+ static inline void __raw_writeb (u8 val , volatile void __iomem * addr )
9392{
9493 asm volatile ("strb %1, %0"
9594 : : "Qo" (* (volatile u8 __force * )addr ), "r" (val ));
9695}
9796
98- static inline void __raw_writel_no_log (u32 val , volatile void __iomem * addr )
97+ #define __raw_writel __raw_writel
98+ static inline void __raw_writel (u32 val , volatile void __iomem * addr )
9999{
100100 asm volatile ("str %1, %0"
101101 : : "Qo" (* (volatile u32 __force * )addr ), "r" (val ));
102102}
103103
104- static inline void __raw_writeq_no_log (u64 val , volatile void __iomem * addr )
104+ #define __raw_writeq __raw_writeq
105+ static inline void __raw_writeq (u64 val , volatile void __iomem * addr )
105106{
106107 register u64 v asm ("r2" );
107108
@@ -112,7 +113,8 @@ static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
112113 : "r" (v ));
113114}
114115
115- static inline u8 __raw_readb_no_log (const volatile void __iomem * addr )
116+ #define __raw_readb __raw_readb
117+ static inline u8 __raw_readb (const volatile void __iomem * addr )
116118{
117119 u8 val ;
118120 asm volatile ("ldrb %0, %1"
@@ -121,7 +123,8 @@ static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
121123 return val ;
122124}
123125
124- static inline u32 __raw_readl_no_log (const volatile void __iomem * addr )
126+ #define __raw_readl __raw_readl
127+ static inline u32 __raw_readl (const volatile void __iomem * addr )
125128{
126129 u32 val ;
127130 asm volatile ("ldr %0, %1"
@@ -130,7 +133,8 @@ static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
130133 return val ;
131134}
132135
133- static inline u64 __raw_readq_no_log (const volatile void __iomem * addr )
136+ #define __raw_readq __raw_readq
137+ static inline u64 __raw_readq (const volatile void __iomem * addr )
134138{
135139 register u64 val asm ("r2" );
136140
@@ -140,48 +144,6 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
140144 return val ;
141145}
142146
143- /*
144- * There may be cases when clients don't want to support or can't support the
145- * logging. The appropriate functions can be used but clients should carefully
146- * consider why they can't support the logging.
147- */
148-
149- #define __raw_write_logged (v , a , _t ) ({ \
150- int _ret; \
151- volatile void __iomem *_a = (a); \
152- void *_addr = (void __force *)(_a); \
153- _ret = uncached_logk(LOGK_WRITEL, _addr); \
154- ETB_WAYPOINT; \
155- __raw_write##_t##_no_log((v), _a); \
156- if (_ret) \
157- LOG_BARRIER; \
158- })
159-
160-
161- #define __raw_writeb (v , a ) __raw_write_logged((v), (a), b)
162- #define __raw_writew (v , a ) __raw_write_logged((v), (a), w)
163- #define __raw_writel (v , a ) __raw_write_logged((v), (a), l)
164- #define __raw_writeq (v , a ) __raw_write_logged((v), (a), q)
165-
166- #define __raw_read_logged (a , _l , _t ) ({ \
167- unsigned _t __a; \
168- const volatile void __iomem *_a = (a); \
169- void *_addr = (void __force *)(_a); \
170- int _ret; \
171- _ret = uncached_logk(LOGK_READL, _addr); \
172- ETB_WAYPOINT; \
173- __a = __raw_read##_l##_no_log(_a);\
174- if (_ret) \
175- LOG_BARRIER; \
176- __a; \
177- })
178-
179-
180- #define __raw_readb (a ) __raw_read_logged((a), b, char)
181- #define __raw_readw (a ) __raw_read_logged((a), w, short)
182- #define __raw_readl (a ) __raw_read_logged((a), l, int)
183- #define __raw_readq (a ) __raw_read_logged((a), q, long long)
184-
185147/*
186148 * Architecture ioremap implementation.
187149 */
@@ -363,24 +325,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
363325 __raw_readl(c)); __r; })
364326#define readq_relaxed (c ) ({ u64 __r = le64_to_cpu((__force __le64) \
365327 __raw_readq(c)); __r; })
366- #define readb_relaxed_no_log (c ) ({ u8 __r = __raw_readb_no_log(c); __r; })
367- #define readl_relaxed_no_log (c ) ({ u32 __r = le32_to_cpu((__force __le32) \
368- __raw_readl_no_log(c)); __r; })
369- #define readq_relaxed_no_log (c ) ({ u64 __r = le64_to_cpu((__force __le64) \
370- __raw_readq_no_log(c)); __r; })
371328
372329
373330#define writeb_relaxed (v , c ) __raw_writeb(v, c)
374331#define writew_relaxed (v , c ) __raw_writew((__force u16) cpu_to_le16(v), c)
375332#define writel_relaxed (v , c ) __raw_writel((__force u32) cpu_to_le32(v), c)
376333#define writeq_relaxed (v , c ) __raw_writeq((__force u64) cpu_to_le64(v), c)
377- #define writeb_relaxed_no_log (v , c ) ((void)__raw_writeb_no_log((v), (c)))
378- #define writew_relaxed_no_log (v , c ) __raw_writew_no_log((__force u16) \
379- cpu_to_le16(v), c)
380- #define writel_relaxed_no_log (v , c ) __raw_writel_no_log((__force u32) \
381- cpu_to_le32(v), c)
382- #define writeq_relaxed_no_log (v , c ) __raw_writeq_no_log((__force u64) \
383- cpu_to_le64(v), c)
384334
385335#define readb (c ) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
386336#define readw (c ) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
@@ -401,24 +351,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
401351#define writesw (p ,d ,l ) __raw_writesw(p,d,l)
402352#define writesl (p ,d ,l ) __raw_writesl(p,d,l)
403353
404- #define readb_no_log (c ) \
405- ({ u8 __v = readb_relaxed_no_log(c); __iormb(); __v; })
406- #define readw_no_log (c ) \
407- ({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; })
408- #define readl_no_log (c ) \
409- ({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; })
410- #define readq_no_log (c ) \
411- ({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; })
412-
413- #define writeb_no_log (v , c ) \
414- ({ __iowmb(); writeb_relaxed_no_log((v), (c)); })
415- #define writew_no_log (v , c ) \
416- ({ __iowmb(); writew_relaxed_no_log((v), (c)); })
417- #define writel_no_log (v , c ) \
418- ({ __iowmb(); writel_relaxed_no_log((v), (c)); })
419- #define writeq_no_log (v , c ) \
420- ({ __iowmb(); writeq_relaxed_no_log((v), (c)); })
421-
422354#ifndef __ARMBE__
423355static inline void memset_io (volatile void __iomem * dst , unsigned c ,
424356 size_t count )
0 commit comments