patch-2.1.79 linux/include/asm-ppc/smp_lock.h

Next file: linux/include/asm-ppc/socket.h
Previous file: linux/include/asm-ppc/smp.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.78/linux/include/asm-ppc/smp_lock.h linux/include/asm-ppc/smp_lock.h
@@ -1,7 +1,6 @@
 #ifndef __PPC_SMPLOCK_H
 #define __PPC_SMPLOCK_H
 
-#include <linux/kernel.h> /* for panic */
 #ifndef __SMP__
 
 #define lock_kernel()		do { } while (0)
@@ -9,7 +8,7 @@
 #define release_kernel_lock(task, cpu, depth)	((depth) = 1)
 #define reacquire_kernel_lock(task, cpu, depth)	do { } while(0)
 
-#else
+#else /* __SMP__ */
 
 /* Release global kernel lock and global interrupt lock */
 #define release_kernel_lock(task, cpu, depth)		\
@@ -19,39 +18,30 @@
 		(task)->lock_depth = 0;			\
 		klock_info.akp = NO_PROC_ID;		\
 		klock_info.kernel_flag = 0;		\
-	}						\
+	} 						\
 	release_irqlock(cpu);				\
 	__sti();					\
 } while(0)
 
-/* Re-acquire the kernel lock */
-#define reacquire_kernel_lock(task, cpu, depth) \
-do { if (depth) \
-	{ __cli(); \
-	  __asm__ __volatile__( \
-	  "blr __lock_kernel\n\t" \
-	  "stw %2,%0\n\t" \
-	  : "=m" (task->lock_depth) \
-	  : "d" (cpu), "c" (depth)); \
-	  __sti(); \
-       } \
-} while (0)
+extern void reacquire_kernel_lock(struct task_struct *, int,int);
 
 /* The following acquire and release the master kernel global lock,
  * the idea is that the usage of this mechanmism becomes less and less
  * as time goes on, to the point where they are no longer needed at all
  * and can thus disappear.
  */
+extern void __lock_kernel(struct task_struct *);
+extern void __unlock_kernel(struct task_struct *);
 
 extern __inline__ void lock_kernel(void)
 {
-	panic("lock_kernel()\n");
+	__lock_kernel(current);
 }
 
 /* Release kernel global lock. */
 extern __inline__ void unlock_kernel(void)
 {
-	panic("unlock_kernel()\n");
+	__unlock_kernel(current);
 }
 
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov