From: Zachary Amsden <zach@vmware.com>

This change encapsulates MSR register accessors and moves them into the
sub-architecture layer.  The goal is a clean, uniform interface that may be
redefined on new sub-architectures of i386.

Signed-off-by: Zachary Amsden <zach@vmware.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 include/asm-i386/mach-default/mach_msr.h |   60 +++++++++++++++++++++++++++++++
 include/asm-i386/msr.h                   |   58 +----------------------------
 2 files changed, 62 insertions(+), 56 deletions(-)

diff -puN include/asm-i386/mach-default/mach_msr.h~i386-transparent-paravirtualization-sub-arch-move-msr-accessors-into-the-sub-arch-layer include/asm-i386/mach-default/mach_msr.h
--- devel/include/asm-i386/mach-default/mach_msr.h~i386-transparent-paravirtualization-sub-arch-move-msr-accessors-into-the-sub-arch-layer	2005-08-06 15:01:22.000000000 -0700
+++ devel-akpm/include/asm-i386/mach-default/mach_msr.h	2005-08-06 15:01:22.000000000 -0700
@@ -0,0 +1,60 @@
+#ifndef MACH_MSR_H
+#define MACH_MSR_H
+
+#define rdmsr(msr,val1,val2) \
+	__asm__ __volatile__("rdmsr" \
+			  : "=a" (val1), "=d" (val2) \
+			  : "c" (msr))
+
+#define wrmsr(msr,val1,val2) \
+	__asm__ __volatile__("wrmsr" \
+			  : /* no outputs */ \
+			  : "c" (msr), "a" (val1), "d" (val2))
+
+/* wrmsr with exception handling */
+#define wrmsr_safe(msr,a,b) ({ int ret__;						\
+	asm volatile("2: wrmsr ; xorl %0,%0\n"						\
+		     "1:\n\t"								\
+		     ".section .fixup,\"ax\"\n\t"					\
+		     "3:  movl %4,%0 ; jmp 1b\n\t"					\
+		     ".previous\n\t"							\
+ 		     ".section __ex_table,\"a\"\n"					\
+		     "   .align 4\n\t"							\
+		     "   .long 	2b,3b\n\t"						\
+		     ".previous"							\
+		     : "=a" (ret__)							\
+		     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
+	ret__; })
+
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr,a,b) ({ int ret__;						\
+	asm volatile("2: rdmsr ; xorl %0,%0\n"						\
+		     "1:\n\t"								\
+		     ".section .fixup,\"ax\"\n\t"					\
+		     "3:  movl %4,%0 ; jmp 1b\n\t"					\
+		     ".previous\n\t"							\
+ 		     ".section __ex_table,\"a\"\n"					\
+		     "   .align 4\n\t"							\
+		     "   .long 	2b,3b\n\t"						\
+		     ".previous"							\
+		     : "=r" (ret__), "=a" (*(a)), "=d" (*(b))				\
+		     : "c" (msr), "i" (-EFAULT));\
+	ret__; })
+
+#define rdtsc(low,high) \
+     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+     __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+     __asm__ __volatile__("rdtsc" : "=A" (val))
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) \
+     __asm__ __volatile__("rdpmc" \
+			  : "=a" (low), "=d" (high) \
+			  : "c" (counter))
+
+#endif
diff -puN include/asm-i386/msr.h~i386-transparent-paravirtualization-sub-arch-move-msr-accessors-into-the-sub-arch-layer include/asm-i386/msr.h
--- devel/include/asm-i386/msr.h~i386-transparent-paravirtualization-sub-arch-move-msr-accessors-into-the-sub-arch-layer	2005-08-06 15:01:22.000000000 -0700
+++ devel-akpm/include/asm-i386/msr.h	2005-08-06 15:01:22.000000000 -0700
@@ -1,22 +1,14 @@
 #ifndef __ASM_MSR_H
 #define __ASM_MSR_H
 
+#include <mach_msr.h>
+
 /*
  * Access to machine-specific registers (available on 586 and better only)
  * Note: the rd* operations modify the parameters directly (without using
  * pointer indirection), this allows gcc to optimize better
  */
 
-#define rdmsr(msr,val1,val2) \
-	__asm__ __volatile__("rdmsr" \
-			  : "=a" (val1), "=d" (val2) \
-			  : "c" (msr))
-
-#define wrmsr(msr,val1,val2) \
-	__asm__ __volatile__("wrmsr" \
-			  : /* no outputs */ \
-			  : "c" (msr), "a" (val1), "d" (val2))
-
 #define rdmsrl(msr,val) do { \
 	unsigned long l__,h__; \
 	rdmsr (msr, l__, h__);  \
@@ -32,52 +24,6 @@ static inline void wrmsrl (unsigned long
 	wrmsr (msr, lo, hi);
 }
 
-/* wrmsr with exception handling */
-#define wrmsr_safe(msr,a,b) ({ int ret__;						\
-	asm volatile("2: wrmsr ; xorl %0,%0\n"						\
-		     "1:\n\t"								\
-		     ".section .fixup,\"ax\"\n\t"					\
-		     "3:  movl %4,%0 ; jmp 1b\n\t"					\
-		     ".previous\n\t"							\
- 		     ".section __ex_table,\"a\"\n"					\
-		     "   .align 4\n\t"							\
-		     "   .long 	2b,3b\n\t"						\
-		     ".previous"							\
-		     : "=a" (ret__)							\
-		     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
-	ret__; })
-
-/* rdmsr with exception handling */
-#define rdmsr_safe(msr,a,b) ({ int ret__;						\
-	asm volatile("2: rdmsr ; xorl %0,%0\n"						\
-		     "1:\n\t"								\
-		     ".section .fixup,\"ax\"\n\t"					\
-		     "3:  movl %4,%0 ; jmp 1b\n\t"					\
-		     ".previous\n\t"							\
- 		     ".section __ex_table,\"a\"\n"					\
-		     "   .align 4\n\t"							\
-		     "   .long 	2b,3b\n\t"						\
-		     ".previous"							\
-		     : "=r" (ret__), "=a" (*(a)), "=d" (*(b))				\
-		     : "c" (msr), "i" (-EFAULT));\
-	ret__; })
-
-#define rdtsc(low,high) \
-     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
-#define rdtscl(low) \
-     __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
-
-#define rdtscll(val) \
-     __asm__ __volatile__("rdtsc" : "=A" (val))
-
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
-
-#define rdpmc(counter,low,high) \
-     __asm__ __volatile__("rdpmc" \
-			  : "=a" (low), "=d" (high) \
-			  : "c" (counter))
-
 /* symbolic names for some interesting MSRs */
 /* Intel defined MSRs. */
 #define MSR_IA32_P5_MC_ADDR		0
_