patch-2.1.4 linux/include/asm-alpha/page.h

Next file: linux/include/asm-alpha/posix_types.h
Previous file: linux/fs/xiafs/inode.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.3/linux/include/asm-alpha/page.h linux/include/asm-alpha/page.h
@@ -10,6 +10,65 @@
 
 #define STRICT_MM_TYPECHECKS
 
+/*
+ * A _lot_ of the kernel time is spent clearing pages, so
+ * do this as fast as we possibly can. Also, doing this
+ * as a separate inline function (rather than memset())
+ * results in clearer kernel profiles as we see _who_ is
+ * doing page clearing or copying.
+ */
+static inline void clear_page(unsigned long page)
+{
+	unsigned long count;
+	__asm__ __volatile__(
+		".align 4\n"
+		"1:\n\t"
+		"stq $31,0(%1)\n\t"
+		"stq $31,8(%1)\n\t"
+		"stq $31,16(%1)\n\t"
+		"stq $31,24(%1)\n\t"
+		"subq %0,1,%0\n\t"
+		"stq $31,32(%1)\n\t"
+		"stq $31,40(%1)\n\t"
+		"stq $31,48(%1)\n\t"
+		"stq $31,56(%1)\n\t"
+		"addq $1,64,$1\n\t"
+		"bne %0,1b"
+		:"=r" (count),"=r" (page)
+		:"0" (PAGE_SIZE/64), "1" (page));
+}
+
+static inline void copy_page(unsigned long to, unsigned long from)
+{
+	unsigned long count;
+	__asm__ __volatile__(
+		".align 4\n"
+		"1:\n\t"
+		"ldq $0,0(%1)\n\t"
+		"ldq $1,8(%1)\n\t"
+		"ldq $2,16(%1)\n\t"
+		"ldq $3,24(%1)\n\t"
+		"ldq $4,32(%1)\n\t"
+		"ldq $5,40(%1)\n\t"
+		"ldq $6,48(%1)\n\t"
+		"ldq $7,56(%1)\n\t"
+		"subq %0,1,%0\n\t"
+		"addq %1,64,%1\n\t"
+		"stq $0,0(%2)\n\t"
+		"stq $1,8(%2)\n\t"
+		"stq $2,16(%2)\n\t"
+		"stq $3,24(%2)\n\t"
+		"stq $4,32(%2)\n\t"
+		"stq $5,40(%2)\n\t"
+		"stq $6,48(%2)\n\t"
+		"stq $7,56(%2)\n\t"
+		"addq %2,64,%2\n\t"
+		"bne %0,1b"
+		:"=r" (count), "=r" (from), "=r" (to)
+		:"0" (PAGE_SIZE/64), "1" (from), "2" (to)
+		:"$0","$1","$2","$3","$4","$5","$6","$7");
+}
+
 #ifdef STRICT_MM_TYPECHECKS
 /*
  * These are used to make use of C type-checking..

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov