patch-2.1.16 linux/include/asm-sparc/bitops.h

Next file: linux/include/asm-sparc/byteorder.h
Previous file: linux/include/asm-sparc/auxio.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.15/linux/include/asm-sparc/bitops.h linux/include/asm-sparc/bitops.h
@@ -1,4 +1,4 @@
-/* $Id: bitops.h,v 1.36 1996/09/29 22:57:21 davem Exp $
+/* $Id: bitops.h,v 1.39 1996/12/10 06:06:35 davem Exp $
  * bitops.h: Bit string operations on the Sparc.
  *
  * Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -104,19 +104,30 @@
 	mask = 1 << (nr & 31);
 	__asm__ __volatile__("
 	rd	%%psr, %%g3
+	nop
+	nop
+	nop
 	andcc	%%g3, %3, %%g0
-	be,a	1f
-	 wr	%%g3, %3, %%psr
-1:	ld	[%0], %%g4
+	bne	1f
+	 nop
+	wr	%%g3, %3, %%psr
+	nop
+	nop
+	nop
+1:
+	ld	[%0], %%g4
 	or	%%g4, %2, %%g2
 	andcc	%%g3, %3, %%g0
 	st	%%g2, [%0]
-	be,a	1f
-	 wr	%%g3, 0x0, %%psr
-1:	nop
+	bne	1f
+	 nop
+	wr	%%g3, 0x0, %%psr
+	nop
+	nop
+	nop
+1:
 	and	%%g4, %2, %0
-	"
-	: "=&r" (ADDR)
+"	: "=&r" (ADDR)
 	: "0" (ADDR), "r" (mask), "i" (PSR_PIL)
 	: "g2", "g3", "g4");
 
@@ -132,19 +143,30 @@
 	mask = 1 << (nr & 31);
 	__asm__ __volatile__("
 	rd	%%psr, %%g3
+	nop
+	nop
+	nop
 	andcc	%%g3, %3, %%g0
-	be,a	1f
-	 wr	%%g3, %3, %%psr
-1:	ld	[%0], %%g4
+	bne	1f
+	 nop
+	wr	%%g3, %3, %%psr
+	nop
+	nop
+	nop
+1:
+	ld	[%0], %%g4
 	andn	%%g4, %2, %%g2
 	andcc	%%g3, %3, %%g0
 	st	%%g2, [%0]
-	be,a	1f
+	bne	1f
+	 nop
 	wr	%%g3, 0x0, %%psr
-1:	nop
+	nop
+	nop
+	nop
+1:
 	and	%%g4, %2, %0
-	"
-	: "=&r" (ADDR)
+"	: "=&r" (ADDR)
 	: "0" (ADDR), "r" (mask), "i" (PSR_PIL)
 	: "g2", "g3", "g4");
 
@@ -160,19 +182,30 @@
 	mask = 1 << (nr & 31);
 	__asm__ __volatile__("
 	rd	%%psr, %%g3
+	nop
+	nop
+	nop
 	andcc	%%g3, %3, %%g0
-	be,a	1f
-	 wr	%%g3, %3, %%psr
-1:	ld	[%0], %%g4
+	bne	1f
+	 nop
+	wr	%%g3, %3, %%psr
+	nop
+	nop
+	nop
+1:
+	ld	[%0], %%g4
 	xor	%%g4, %2, %%g2
 	andcc	%%g3, %3, %%g0
 	st	%%g2, [%0]
-	be,a	1f
-	 wr	%%g3, 0x0, %%psr
-1:	nop
+	bne	1f
+	 nop
+	wr	%%g3, 0x0, %%psr
+	nop
+	nop
+	nop
+1:
 	and	%%g4, %2, %0
-	"
-	: "=&r" (ADDR)
+"	: "=&r" (ADDR)
 	: "0" (ADDR), "r" (mask), "i" (PSR_PIL)
 	: "g2", "g3", "g4");
 
@@ -249,7 +282,7 @@
 
 #ifndef __KERNEL__
 
-extern __inline__ int __ext2_set_bit(int nr, void *addr)
+extern __inline__ int set_le_bit(int nr, void *addr)
 {
 	int		mask;
 	unsigned char	*ADDR = (unsigned char *) addr;
@@ -269,7 +302,7 @@
 	return (int) ADDR;
 }
 
-extern __inline__ int __ext2_clear_bit(int nr, void *addr)
+extern __inline__ int clear_le_bit(int nr, void *addr)
 {
 	int		mask;
 	unsigned char	*ADDR = (unsigned char *) addr;
@@ -293,7 +326,7 @@
 
 /* Now for the ext2 filesystem bit operations and helper routines. */
 
-extern __inline__ int __ext2_set_bit(int nr,void * addr)
+extern __inline__ int set_le_bit(int nr,void * addr)
 {
 	int		mask;
 	unsigned char	*ADDR = (unsigned char *) addr;
@@ -302,26 +335,37 @@
 	mask = 1 << (nr & 0x07);
 	__asm__ __volatile__("
 	rd	%%psr, %%g3
+	nop
+	nop
+	nop
 	andcc	%%g3, %3, %%g0
-	be,a	1f
-	 wr	%%g3, %3, %%psr
-1:	ldub	[%0], %%g4
+	bne	1f
+	 nop
+	wr	%%g3, %3, %%psr
+	nop
+	nop
+	nop
+1:
+	ldub	[%0], %%g4
 	or	%%g4, %2, %%g2
 	andcc	%%g3, %3, %%g0
 	stb	%%g2, [%0]
-	be,a	1f
-	 wr	%%g3, 0x0, %%psr
-1:	nop
+	bne	1f
+	 nop
+	wr	%%g3, 0x0, %%psr
+	nop
+	nop
+	nop
+1:
 	and	%%g4, %2, %0
-	"
-	: "=&r" (ADDR)
+"	: "=&r" (ADDR)
 	: "0" (ADDR), "r" (mask), "i" (PSR_PIL)
 	: "g2", "g3", "g4");
 
 	return (int) ADDR;
 }
 
-extern __inline__ int __ext2_clear_bit(int nr, void * addr)
+extern __inline__ int clear_le_bit(int nr, void * addr)
 {
 	int		mask;
 	unsigned char	*ADDR = (unsigned char *) addr;
@@ -330,19 +374,30 @@
 	mask = 1 << (nr & 0x07);
 	__asm__ __volatile__("
 	rd	%%psr, %%g3
+	nop
+	nop
+	nop
 	andcc	%%g3, %3, %%g0
-	be,a	1f
-	 wr	%%g3, %3, %%psr
-1:	ldub	[%0], %%g4
+	bne	1f
+	 nop
+	wr	%%g3, %3, %%psr
+	nop
+	nop
+	nop
+1:
+	ldub	[%0], %%g4
 	andn	%%g4, %2, %%g2
 	andcc	%%g3, %3, %%g0
 	stb	%%g2, [%0]
-	be,a	1f
+	bne	1f
+	 nop
 	wr	%%g3, 0x0, %%psr
-1:	nop
+	nop
+	nop
+	nop
+1:
 	and	%%g4, %2, %0
-	"
-	: "=&r" (ADDR)
+"	: "=&r" (ADDR)
 	: "0" (ADDR), "r" (mask), "i" (PSR_PIL)
 	: "g2", "g3", "g4");
 
@@ -351,7 +406,7 @@
 
 #endif /* __KERNEL__ */
 
-extern __inline__ int __ext2_test_bit(int nr, __const__ void * addr)
+extern __inline__ int test_le_bit(int nr, __const__ void * addr)
 {
 	int			mask;
 	__const__ unsigned char	*ADDR = (__const__ unsigned char *) addr;
@@ -361,21 +416,24 @@
 	return ((mask & *ADDR) != 0);
 }
 
-extern __inline__ unsigned short __swab16(unsigned short value)
-{
-        return ((value >> 8) | (value << 8));
-}     
+#ifdef __KERNEL__
+
+#define ext2_set_bit   set_le_bit
+#define ext2_clear_bit clear_le_bit
+#define ext2_test_bit  test_le_bit
+
+#endif /* __KERNEL__ */
+
+#define find_first_zero_le_bit(addr, size) \
+        find_next_zero_le_bit((addr), (size), 0)
 
 extern __inline__ unsigned long __swab32(unsigned long value)
 {
-        return ((value >> 24) | ((value >> 8) & 0xff00) |
-               ((value << 8) & 0xff0000) | (value << 24));
+	return((value>>24) | ((value>>8)&0xff00) |
+	       ((value<<8)&0xff0000) | (value<<24));
 }     
 
-#define __ext2_find_first_zero_bit(addr, size) \
-        __ext2_find_next_zero_bit((addr), (size), 0)
-
-extern __inline__ unsigned long __ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+extern __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset)
 {
 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
 	unsigned long result = offset & ~31UL;
@@ -411,5 +469,11 @@
 	return result + ffz(__swab32(tmp));
 }
 
-#endif /* defined(_SPARC_BITOPS_H) */
+#ifdef __KERNEL__
+
+#define ext2_find_first_zero_bit     find_first_zero_le_bit
+#define ext2_find_next_zero_bit      find_next_zero_le_bit
 
+#endif /* __KERNEL__ */
+
+#endif /* defined(_SPARC_BITOPS_H) */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov