patch-2.4.10 linux/arch/sparc64/kernel/dtlb_backend.S
Next file: linux/arch/sparc64/kernel/dtlb_base.S
Previous file: linux/arch/sparc64/defconfig
Back to the patch index
Back to the overall index
- Lines: 86
- Date:
Thu Sep 20 14:11:57 2001
- Orig file:
v2.4.9/linux/arch/sparc64/kernel/dtlb_backend.S
- Orig date:
Mon Aug 27 12:41:40 2001
diff -u --recursive --new-file v2.4.9/linux/arch/sparc64/kernel/dtlb_backend.S linux/arch/sparc64/kernel/dtlb_backend.S
@@ -1,4 +1,4 @@
-/* $Id: dtlb_backend.S,v 1.12 2001/08/13 20:41:54 kanoj Exp $
+/* $Id: dtlb_backend.S,v 1.14 2001/09/07 18:26:17 kanoj Exp $
* dtlb_backend.S: Back end to DTLB miss replacement strategy.
* This is included directly into the trap table.
*
@@ -6,11 +6,27 @@
* Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
-#define TAG_CONTEXT_BITS 0x3ff
+#include <asm/pgtable.h>
+
+#if PAGE_SHIFT == 13
+#define FILL_VALID_SZ_BITS1(r1) \
+ sllx %g2, 62, r1
+#define FILL_VALID_SZ_BITS2(r1)
+#define FILL_VALID_SZ_BITS_NOP nop
+#else /* PAGE_SHIFT */
+#define FILL_VALID_SZ_BITS1(r1) \
+ or %g0, 5, r1
+#define FILL_VALID_SZ_BITS2(r1) \
+ sllx r1, 61, r1
+#define FILL_VALID_SZ_BITS_NOP
+#endif /* PAGE_SHIFT */
+
+#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
#define VPTE_SHIFT (PAGE_SHIFT - 3)
-#define PMD_SHIFT (23 - PAGE_SHIFT + 3)
-#define PGD_SHIFT (34 - PAGE_SHIFT + 3)
-#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P)
+#define TLB_PMD_SHIFT (PAGE_SHIFT - 3 + 3)
+#define TLB_PGD_SHIFT (PMD_BITS + PAGE_SHIFT - 3 + 3)
+#define TLB_PMD_MASK (((1 << PMD_BITS) - 1) << 1)
+#define TLB_PGD_MASK (((1 << (VA_BITS - PAGE_SHIFT - (PAGE_SHIFT - 3) - PMD_BITS)) - 1) << 2)
/* Ways we can get here:
*
@@ -31,13 +47,13 @@
/* TLB1 ** ICACHE line 2: Quick VPTE miss */
ldxa [%g1] ASI_DMMU, %g5 ! Doing PGD caching?
- srlx %g6, (PMD_SHIFT - 1), %g1 ! Position PMD offset
+ srlx %g6, (TLB_PMD_SHIFT - 1), %g1 ! Position PMD offset
be,pn %xcc, sparc64_vpte_nucleus ! Is it from Nucleus?
- and %g1, 0xffe, %g1 ! Mask PMD offset bits
+ and %g1, TLB_PMD_MASK, %g1 ! Mask PMD offset bits
brnz,pt %g5, sparc64_vpte_continue ! Yep, go like smoke
add %g1, %g1, %g1 ! Position PMD offset some more
- srlx %g6, (PGD_SHIFT - 2), %g5 ! Position PGD offset
- and %g5, 0xffc, %g5 ! Mask PGD offset
+ srlx %g6, (TLB_PGD_SHIFT - 2), %g5 ! Position PGD offset
+ and %g5, TLB_PGD_MASK, %g5 ! Mask PGD offset
/* TLB1 ** ICACHE line 3: Quick VPTE miss */
lduwa [%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD
@@ -48,7 +64,8 @@
lduwa [%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD
sllx %g5, 11, %g5 ! Shift into place
brz,pn %g5, vpte_noent ! Valid?
- sllx %g2, 62, %g1 ! Put _PAGE_VALID into %g1
+ FILL_VALID_SZ_BITS1(%g1) ! Put _PAGE_VALID into %g1
+ FILL_VALID_SZ_BITS2(%g1) ! Put _PAGE_VALID into %g1
or %g5, VPTE_BITS, %g5 ! Prepare VPTE data
/* TLB1 ** ICACHE line 4: Quick VPTE miss */
@@ -59,10 +76,15 @@
retry ! Load PTE once again
nop
nop
- nop
+ FILL_VALID_SZ_BITS_NOP
-#undef TAG_CONTEXT_BITS
#undef VPTE_SHIFT
-#undef PMD_SHIFT
-#undef PGD_SHIFT
+#undef TLB_PMD_SHIFT
+#undef TLB_PGD_SHIFT
#undef VPTE_BITS
+#undef TLB_PMD_MASK
+#undef TLB_PGD_MASK
+#undef FILL_VALID_SZ_BITS1
+#undef FILL_VALID_SZ_BITS2
+#undef FILL_VALID_SZ_BITS_NOP
+
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)