patch-2.4.2 linux/include/asm-s390x/smplock.h
Next file: linux/include/asm-s390x/socket.h
Previous file: linux/include/asm-s390x/smp.h
Back to the patch index
Back to the overall index
- Lines: 63
- Date:
Tue Feb 13 14:13:44 2001
- Orig file:
v2.4.1/linux/include/asm-s390x/smplock.h
- Orig date:
Wed Dec 31 16:00:00 1969
diff -u --recursive --new-file v2.4.1/linux/include/asm-s390x/smplock.h linux/include/asm-s390x/smplock.h
@@ -0,0 +1,62 @@
+/*
+ * include/asm-s390/smplock.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/smplock.h"
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+#define kernel_locked() spin_is_locked(&kernel_flag)
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
+
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)