patch-2.1.43 linux/include/asm-alpha/byteorder.h

Next file: linux/include/asm-alpha/fpu.h
Previous file: linux/fs/vfat/namei.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.42/linux/include/asm-alpha/byteorder.h linux/include/asm-alpha/byteorder.h
@@ -14,90 +14,65 @@
 #define __LITTLE_ENDIAN_BITFIELD
 #endif
 
-#ifdef __KERNEL__
-
-/*
- * In-kernel byte order macros to handle stuff like
- * byte-order-dependent filesystems etc.
- */
-#define cpu_to_le32(x) (x)
-#define le32_to_cpu(x) (x)
-#define cpu_to_le16(x) (x)
-#define le16_to_cpu(x) (x)
-
-#define cpu_to_be32(x) htonl((x))
-#define be32_to_cpu(x) ntohl((x))
-#define cpu_to_be16(x) htons((x))
-#define be16_to_cpu(x) ntohs((x))
-
-#endif /* __KERNEL__ */
-
-extern unsigned long int	ntohl(unsigned long int);
+extern unsigned int		ntohl(unsigned int);
 extern unsigned short int	ntohs(unsigned short int);
-extern unsigned long int	htonl(unsigned long int);
+extern unsigned int		htonl(unsigned int);
 extern unsigned short int	htons(unsigned short int);
 
-extern unsigned long int	__ntohl(unsigned long int);
+extern unsigned int		__ntohl(unsigned int);
 extern unsigned short int	__ntohs(unsigned short int);
 
 #ifdef __GNUC__
 
-extern unsigned long int	__constant_ntohl(unsigned long int);
+extern unsigned int		__constant_ntohl(unsigned int);
 extern unsigned short int	__constant_ntohs(unsigned short int);
 
-/*
- * The constant and non-constant versions here are the same.
- * Maybe I'll come up with an alpha-optimized routine for the
- * non-constant ones (the constant ones don't need it: gcc
- * will optimize it to the correct constant)
- */
-
-extern __inline__ unsigned long int
-__ntohl(unsigned long int x)
+extern __inline__ unsigned int
+__ntohl(unsigned int x)
 {
-	unsigned long int res, t1, t2;
+	unsigned int t1, t2, t3;
+
+	/* Break the final or's out of the block so that gcc can
+	   schedule them at will.  Further, use add not or so that
+	   we elide the sign extend gcc will put in because the
+	   return type is not a long.  */
 
 	__asm__(
-	"# bswap input: %0 (aabbccdd)\n\t"
-	"# output: %0, used %1 %2\n\t"
-	"extlh	%0,5,%1		# %1 = dd000000\n\t"
-	"zap	%0,0xfd,%2	# %2 = 0000cc00\n\t"
-	"sll	%2,5,%2		# %2 = 00198000\n\t"
-	"s8addq	%2,%1,%1	# %1 = ddcc0000\n\t"
-	"zap	%0,0xfb,%2	# %2 = 00bb0000\n\t"
-	"srl	%2,8,%2		# %2 = 0000bb00\n\t"
-	"extbl	%0,3,%0		# %0 = 000000aa\n\t"
-	"or	%1,%0,%0	# %0 = ddcc00aa\n\t"
-	"or	%2,%0,%0	# %0 = ddccbbaa\n"
-	: "r="(res), "r="(t1), "r="(t2)
-	: "0" (x & 0xffffffffUL));
-	return res;
+	"insbl	%3,3,%1		# %1 = dd000000\n\t"
+	"zapnot	%3,2,%2		# %2 = 0000cc00\n\t"
+	"sll	%2,8,%2		# %2 = 00cc0000\n\t"
+	"or	%2,%1,%1	# %1 = ddcc0000\n\t"
+	"zapnot	%3,4,%2		# %2 = 00bb0000\n\t"
+	"extbl	%3,3,%0		# %0 = 000000aa\n\t"
+	"srl	%2,8,%2		# %2 = 0000bb00"
+	: "=r"(t3), "=&r"(t1), "=&r"(t2)
+	: "r"(x));
+
+	return t3 + t2 + t1;
 }
 
 #define __constant_ntohl(x) \
-   ((unsigned long int)((((x) & 0x000000ffUL) << 24) | \
-			(((x) & 0x0000ff00UL) <<  8) | \
-			(((x) & 0x00ff0000UL) >>  8) | \
-			(((x) & 0xff000000UL) >> 24)))
+   ((unsigned int)((((x) & 0x000000ff) << 24) | \
+		   (((x) & 0x0000ff00) <<  8) | \
+		   (((x) & 0x00ff0000) >>  8) | \
+		   (((x) & 0xff000000) >> 24)))
 
 extern __inline__ unsigned short int
 __ntohs(unsigned short int x)
 {
-	unsigned long int res, t1;
+	unsigned short int t1, t2;
 	
 	__asm__(
-	"# v0 is result; swap in-place.\n\t"
-	"bis	%2,%2,%0	# v0 = aabb\n\t"
-	"extwh	%0,7,%1		# t1 = bb00\n\t"
-	"extbl	%0,1,%0		# v0 = 00aa\n\t"
-	"bis	%0,%1,%0	# v0 = bbaa\n"
-	: "r="(res), "r="(t1) : "r"(x));
-	return res;
+	"insbl	%2,1,%1		# %1 = bb00\n\t"
+	"extbl	%2,1,%0		# %0 = 00aa"
+	: "=r"(t1), "=&r"(t2) : "r"(x));
+
+	return t1 | t2;
 }
 
 #define __constant_ntohs(x) \
-((unsigned short int)((((unsigned short int)(x) & 0x00ff) << 8) | \
-		      (((unsigned short int)(x) & 0xff00) >> 8)))
+((unsigned short int)((((x) & 0x00ff) << 8) | \
+		      (((x) & 0xff00) >> 8)))
 
 #define __htonl(x) __ntohl(x)
 #define __htons(x) __ntohs(x)
@@ -124,5 +99,70 @@
 #endif /* __OPTIMIZE__ */
 
 #endif /* __GNUC__ */
+
+#ifdef __KERNEL__
+
+/*
+ * In-kernel byte order macros to handle stuff like
+ * byte-order-dependent filesystems etc.
+ */
+#define cpu_to_le32(x) (x)
+#define cpu_to_le16(x) (x)
+
+#define cpu_to_be32(x) htonl((x))
+#define cpu_to_be16(x) htons((x))
+
+/* The same, but returns converted value from the location pointer by addr. */
+extern __inline__ __u16 cpu_to_le16p(__u16 *addr)
+{
+	return cpu_to_le16(*addr);
+}
+
+extern __inline__ __u32 cpu_to_le32p(__u32 *addr)
+{
+	return cpu_to_le32(*addr);
+}
+
+extern __inline__ __u16 cpu_to_be16p(__u16 *addr)
+{
+	return cpu_to_be16(*addr);
+}
+
+extern __inline__ __u32 cpu_to_be32p(__u32 *addr)
+{
+	return cpu_to_be32(*addr);
+}
+
+/* The same, but do the conversion in situ, ie. put the value back to addr. */
+#define cpu_to_le16s(x) do { } while (0)
+#define cpu_to_le32s(x) do { } while (0)
+
+extern __inline__ void cpu_to_be16s(__u16 *addr)
+{
+	*addr = cpu_to_be16(*addr);
+}
+
+extern __inline__ void cpu_to_be32s(__u32 *addr)
+{
+	*addr = cpu_to_be32(*addr);
+}
+
+/* Convert from specified byte order, to CPU byte order. */
+#define le16_to_cpu(x)  cpu_to_le16(x)
+#define le32_to_cpu(x)  cpu_to_le32(x)
+#define be16_to_cpu(x)  cpu_to_be16(x)
+#define be32_to_cpu(x)  cpu_to_be32(x)
+
+#define le16_to_cpup(x) cpu_to_le16p(x)
+#define le32_to_cpup(x) cpu_to_le32p(x)
+#define be16_to_cpup(x) cpu_to_be16p(x)
+#define be32_to_cpup(x) cpu_to_be32p(x)
+
+#define le16_to_cpus(x) cpu_to_le16s(x)
+#define le32_to_cpus(x) cpu_to_le32s(x)
+#define be16_to_cpus(x) cpu_to_be16s(x)
+#define be32_to_cpus(x) cpu_to_be32s(x)
+
+#endif /* __KERNEL__ */
 
 #endif /* _ALPHA_BYTEORDER_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov