ARM: Rework and correct barrier definitions

As part of testing booting Linux kernels on Rockchip devices, it was
discovered by Ziyuan Xu and Sandy Patterson that we had multiple and for
some cases incomplete isb definitions.  This was causing a failure to
boot of the Linux kernel.

In order to solve this problem as well as cover any corner cases that we
may also have had a number of changes are made in order to consolidate
things.  First, <asm/barriers.h> now becomes the source of isb/dsb/dmb
definitions.  This however introduces another complexity.  Due to
needing to build SPL for 32bit tegra with -march=armv4 we need to borrow
the __LINUX_ARM_ARCH__ logic from the Linux Kernel in a more complete
form.  Move this from arch/arm/lib/Makefile to arch/arm/Makefile and add
a comment about it.  Now that we can always know what the target CPU is
capable off we can get always do the correct thing for the barrier.  The
final part of this is that need to be consistent everywhere and call
isb()/dsb()/dmb() and NOT call ISB/DSB/DMB in some cases and the
function names in others.

Reviewed-by: Stephen Warren <swarren@nvidia.com>
Tested-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Ziyuan Xu <xzy.xu@rock-chips.com>
Acked-by: Sandy Patterson <apatterson@sightlogix.com>
Reported-by: Ziyuan Xu <xzy.xu@rock-chips.com>
Reported-by: Sandy Patterson <apatterson@sightlogix.com>
Signed-off-by: Tom Rini <trini@konsulko.com>
diff --git a/arch/arm/include/asm/barriers.h b/arch/arm/include/asm/barriers.h
index 37870f9..04784b7 100644
--- a/arch/arm/include/asm/barriers.h
+++ b/arch/arm/include/asm/barriers.h
@@ -30,15 +30,22 @@
 
 #endif /* !CONFIG_ARM64 */
 
-#if defined(__ARM_ARCH_7A__) || defined(CONFIG_ARM64)
+#if __LINUX_ARM_ARCH__ >= 7
 #define ISB	asm volatile ("isb sy" : : : "memory")
 #define DSB	asm volatile ("dsb sy" : : : "memory")
 #define DMB	asm volatile ("dmb sy" : : : "memory")
-#else
+#elif __LINUX_ARM_ARCH__ == 6
 #define ISB	CP15ISB
 #define DSB	CP15DSB
 #define DMB	CP15DMB
+#else
+#define ISB	asm volatile ("" : : : "memory")
+#define DSB	CP15DSB
+#define DMB	asm volatile ("" : : : "memory")
 #endif
 
+#define isb()	ISB
+#define dsb()	DSB
+#define dmb()	DMB
 #endif	/* __ASSEMBLY__ */
 #endif	/* __BARRIERS_H__ */
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 6121f1d..5834f5b 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -25,6 +25,7 @@
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <asm/memory.h>
+#include <asm/barriers.h>
 #if 0	/* XXX###XXX */
 #include <asm/arch/hardware.h>
 #endif	/* XXX###XXX */
@@ -136,8 +137,7 @@
  * TODO: The kernel offers some more advanced versions of barriers, it might
  * have some advantages to use them instead of the simple one here.
  */
-#define mb()		asm volatile("dsb sy" : : : "memory")
-#define dmb()		__asm__ __volatile__ ("" : : : "memory")
+#define mb()		dsb()
 #define __iormb()	dmb()
 #define __iowmb()	dmb()
 
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 2bdc0be..7b7b867 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -3,6 +3,7 @@
 
 #include <common.h>
 #include <linux/compiler.h>
+#include <asm/barriers.h>
 
 #ifdef CONFIG_ARM64
 
@@ -34,11 +35,6 @@
 	DCACHE_WRITEALLOC = 4 << 2,
 };
 
-#define isb()				\
-	({asm volatile(			\
-	"isb" : : : "memory");		\
-	})
-
 #define wfi()				\
 	({asm volatile(			\
 	"wfi" : : : "memory");		\
@@ -227,8 +223,6 @@
  */
 void save_boot_params_ret(void);
 
-#define isb() __asm__ __volatile__ ("" : : : "memory")
-
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
 #ifdef __ARM_ARCH_7A__