patch-2.1.71 linux/include/asm-i386/byteorder.h

Next file: linux/include/asm-i386/ioctls.h
Previous file: linux/include/asm-alpha/siginfo.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.70/linux/include/asm-i386/byteorder.h linux/include/asm-i386/byteorder.h
@@ -1,36 +1,16 @@
 #ifndef _I386_BYTEORDER_H
 #define _I386_BYTEORDER_H
 
-#undef ntohl
-#undef ntohs
-#undef htonl
-#undef htons
+#include <asm/types.h>
 
-#ifndef __LITTLE_ENDIAN
-#define __LITTLE_ENDIAN 1234
-#endif
-
-#ifndef __LITTLE_ENDIAN_BITFIELD
-#define __LITTLE_ENDIAN_BITFIELD
-#endif
+#ifdef __GNUC__
 
 /* For avoiding bswap on i386 */
 #ifdef __KERNEL__
 #include <linux/config.h>
 #endif
 
-extern unsigned long int	ntohl(unsigned long int);
-extern unsigned short int	ntohs(unsigned short int);
-extern unsigned long int	htonl(unsigned long int);
-extern unsigned short int	htons(unsigned short int);
-
-extern __inline__ unsigned long int	__ntohl(unsigned long int);
-extern __inline__ unsigned short int	__ntohs(unsigned short int);
-extern __inline__ unsigned long int	__constant_ntohl(unsigned long int);
-extern __inline__ unsigned short int	__constant_ntohs(unsigned short int);
-
-extern __inline__ unsigned long int
-__ntohl(unsigned long int x)
+static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
 {
 #if defined(__KERNEL__) && !defined(CONFIG_M386)
 	__asm__("bswap %0" : "=r" (x) : "0" (x));
@@ -40,115 +20,28 @@
 		"xchgb %b0,%h0"		/* swap higher bytes	*/
 		:"=q" (x)
 		: "0" (x));
-#endif	
-	return x;
-}
-
-#define __constant_ntohl(x) \
-	((unsigned long int)((((unsigned long int)(x) & 0x000000ffU) << 24) | \
-			     (((unsigned long int)(x) & 0x0000ff00U) <<  8) | \
-			     (((unsigned long int)(x) & 0x00ff0000U) >>  8) | \
-			     (((unsigned long int)(x) & 0xff000000U) >> 24)))
-
-extern __inline__ unsigned short int
-__ntohs(unsigned short int x)
-{
-	__asm__("xchgb %b0,%h0"		/* swap bytes		*/
-		: "=q" (x)
-		:  "0" (x));
-	return x;
-}
-
-#define __constant_ntohs(x) \
-	((unsigned short int)((((unsigned short int)(x) & 0x00ff) << 8) | \
-			      (((unsigned short int)(x) & 0xff00) >> 8))) \
-
-#define __htonl(x) __ntohl(x)
-#define __htons(x) __ntohs(x)
-#define __constant_htonl(x) __constant_ntohl(x)
-#define __constant_htons(x) __constant_ntohs(x)
-
-#ifdef  __OPTIMIZE__
-#  define ntohl(x) \
-(__builtin_constant_p((long)(x)) ? \
- __constant_ntohl((x)) : \
- __ntohl((x)))
-#  define ntohs(x) \
-(__builtin_constant_p((short)(x)) ? \
- __constant_ntohs((x)) : \
- __ntohs((x)))
-#  define htonl(x) \
-(__builtin_constant_p((long)(x)) ? \
- __constant_htonl((x)) : \
- __htonl((x)))
-#  define htons(x) \
-(__builtin_constant_p((short)(x)) ? \
- __constant_htons((x)) : \
- __htons((x)))
 #endif
-
-#ifdef __KERNEL__
-/*
- * In-kernel byte order macros to handle stuff like
- * byte-order-dependent filesystems etc.
- */
-#define cpu_to_le32(x) (x)
-#define cpu_to_le16(x) (x)
-
-#define cpu_to_be32(x) htonl((x))
-#define cpu_to_be16(x) htons((x))
-
-/* The same, but returns converted value from the location pointer by addr. */
-extern __inline__ __u16 cpu_to_le16p(__u16 *addr)
-{
-	return cpu_to_le16(*addr);
-}
-
-extern __inline__ __u32 cpu_to_le32p(__u32 *addr)
-{
-	return cpu_to_le32(*addr);
-}
-
-extern __inline__ __u16 cpu_to_be16p(__u16 *addr)
-{
-	return cpu_to_be16(*addr);
+	return x;
 }
 
-extern __inline__ __u32 cpu_to_be32p(__u32 *addr)
+static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
 {
-	return cpu_to_be32(*addr);
+	__asm__("xchgb %b0,%h0"		/* swap bytes		*/ \
+		: "=q" (x) \
+		:  "0" (x)); \
+		return x;
 }
 
-/* The same, but do the conversion in situ, ie. put the value back to addr. */
-#define cpu_to_le16s(x) do { } while (0)
-#define cpu_to_le32s(x) do { } while (0)
+#define __arch__swab32(x) ___arch__swab32(x)
+#define __arch__swab16(x) ___arch__swab16(x)
 
-extern __inline__ void cpu_to_be16s(__u16 *addr)
-{
-	*addr = cpu_to_be16(*addr);
-}
-
-extern __inline__ void cpu_to_be32s(__u32 *addr)
-{
-	*addr = cpu_to_be32(*addr);
-}
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#  define __BYTEORDER_HAS_U64__
+#  define __SWAB_64_THRU_32__
+#endif
 
-/* Convert from specified byte order, to CPU byte order. */
-#define le16_to_cpu(x)  cpu_to_le16(x)
-#define le32_to_cpu(x)  cpu_to_le32(x)
-#define be16_to_cpu(x)  cpu_to_be16(x)
-#define be32_to_cpu(x)  cpu_to_be32(x)
-
-#define le16_to_cpup(x) cpu_to_le16p(x)
-#define le32_to_cpup(x) cpu_to_le32p(x)
-#define be16_to_cpup(x) cpu_to_be16p(x)
-#define be32_to_cpup(x) cpu_to_be32p(x)
-
-#define le16_to_cpus(x) cpu_to_le16s(x)
-#define le32_to_cpus(x) cpu_to_le32s(x)
-#define be16_to_cpus(x) cpu_to_be16s(x)
-#define be32_to_cpus(x) cpu_to_be32s(x)
+#endif /* __GNUC__ */
 
-#endif /* __KERNEL__ */
+#include <linux/byteorder_little_endian.h>
 
-#endif
+#endif /* _I386_BYTEORDER_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov