patch-2.1.34 linux/arch/sparc64/kernel/dtlb_prot.S

Next file: linux/arch/sparc64/kernel/entry.S
Previous file: linux/arch/sparc64/kernel/dtlb_miss.S
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.33/linux/arch/sparc64/kernel/dtlb_prot.S linux/arch/sparc64/kernel/dtlb_prot.S
@@ -1,62 +1,56 @@
-/* $Id: dtlb_prot.S,v 1.5 1997/02/26 11:09:26 jj Exp $
- * dtlb_prot.S:	Fast TLB protection trap processing.
+/* $Id: dtlb_prot.S,v 1.10 1997/03/25 09:47:13 davem Exp $
+ * dtlb_prot.S:	Data TLB protection code, this is included directly
+ *              into the trap table.
  *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  */
 
-	/* We are in the MMU globals, %g7 contains the physical
-	 * address of current->mm->pgd at all times.  %g2 is
-	 * also preloaded with the mask 0x1ff8 to make things
-	 * even quicker.
-	 *
-	 * Many subtle things are done here.  The high bits of
-	 * the virtual address missed are most easily obtained
-	 * from the tag target (it is at address zero in ASI_IMMU
-	 * so no address formation is necessary to get at this).
-	 * This is used to compute the pgd and pmd table offsets.
-	 *
-	 * Even more clever is that physical page zero is always
-	 * a page full of zeroes.  This means we can just follow
-	 * through with all the page table traversals even if nothing
-	 * is mapped because we'll just do loads from page zero
-	 * and get yet another zero.  We only need to do the check
-	 * for the valid bit being set in the final pte we obtain.
-	 *
-	 * Furthermore, we set the TSB base register to the address
-	 * zero, and we use the 8KB tsb ptr to calculate the pte
-	 * offset.  Again it is at address zero in ASI_IMMU_TSB_8KB_PTR
-	 * so no address formation is necessary, saves more instructions.
-	 *
-	 * We use physical address accesses to get at the page
-	 * tables, and this is for two reasons.  This makes it
-	 * impossible to take a fault while we are servicing the
-	 * miss.  Also this physical bypass access only allocates
-	 * in the E-cache, and thus we prevent D-cache pollution
-	 * from the miss handlers probing the page tables.
+	/* We know kernel never takes protection trap,
+	 * this makes this routine a lot easier than it
+	 * would be otherwise.
 	 */
 
-	/* I-cache line 0 */
-	ldxa		[%g0] ASI_DMMU, %g1
-	srlx		%g1, 8, %g3
-	and		%g3, %g2, %g3
-	ldxa		[%g7 + %g3] ASI_PHYS_USE_EC, %g5
-	sllx		%g1, 2, %g4
-	and		%g4, %g2, %g3
-	ldxa		[%g5 + %g3] ASI_PHYS_USE_EC, %g4
-	ldxa		[%g0] ASI_DMMU_TSB_8KB_PTR, %g1
-	/* I-cache line 1 */
-	srlx		%g1, 1, %g1
-	ldxa		[%g4 + %g1] ASI_PHYS_USE_EC, %g3
-	andcc		%g3, _PAGE_WRITE, %g0
-	be,pn		%xcc, sparc64_dtlb_fault
-	 or		%g3, (_PAGE_WRITE|_PAGE_W|_PAGE_MODIFIED|_PAGE_ACCESSED), %g3
-
-	/* Blamo... */
-	stxa		%g3, [%g4 + %g1] ASI_DMMU
-	stxa		%g3, [%g0] ASI_DTLB_DATA_IN
-	retry
-
-	/* I-cache line 2 */
-	nop; nop; nop; nop; nop; nop; nop; nop;
-	/* I-cache line 3 */
-	nop; nop; nop; nop; nop; nop; nop; nop;
+#define MODIFIED_BITS	(_PAGE_WRITE | _PAGE_W | _PAGE_MODIFIED | _PAGE_ACCESSED)
+
+				/* ICACHE line 1 */
+  /*0x00*/	ldxa		[%g0] ASI_DMMU, %g1		! Get TAG_TARGET
+  /*0x04*/	srlx		%g1, 8, %g3			! Position PGD offset
+  /*0x08*/	sllx		%g1, 2, %g4			! Position PMD offset
+  /*0x0c*/	and		%g3, %g2, %g3			! Mask PGD offset
+  /*0x10*/	and		%g4, %g2, %g3			! Mask PMD offset
+  /*0x14*/	ldxa		[%g7 + %g3] ASI_PHYS_USE_EC, %g5	! Load PGD
+  /*0x18*/	ldxa		[%g5 + %g3] ASI_PHYS_USE_EC, %g4	! Load PMD
+  /*0x1c*/	ldxa		[%g0] ASI_DMMU_TSB_8KB_PTR, %g1		! For PTE offset
+
+				/* ICACHE line 2 */
+  /*0x20*/	srlx		%g1, 1, %g1				! PTE offset
+  /*0x24*/	ldxa		[%g4 + %g1] ASI_PHYS_USE_EC, %g3	! Load PTE
+  /*0x28*/	andcc		%g3, _PAGE_WRITE, %g0			! Writable?
+  /*0x2c*/	be,pt		%xcc, sparc64_dtlb_fault		! Nope...
+  /*0x30*/	 or		%g3, (MODIFIED_BITS), %g3		! Yes it is
+  /*0x34*/	mov		TLB_TAG_ACCESS, %g5			! Get the page
+  /*0x38*/	ldxa		[%g5] ASI_DMMU, %g1			! From MMU
+  /*0x3c*/	add		%g2, 7, %g5				! Compute mask
+
+				/* ICACHE line 3 */
+  /*0x40*/	andn		%g1, %g5, %g1				! Mask page
+  /*0x44*/	or		%g1, 0x10, %g1				! 2ndary Context
+  /*0x48*/	stxa		%g0, [%g1] ASI_DMMU_DEMAP		! TLB flush page
+  /*0x4c*/	membar		#Sync					! Synchronize
+  /*0x50*/	stxa		%g3, [%g4 + %g1] ASI_PHYS_USE_EC	! Update sw PTE
+  /*0x54*/	stxa		%g3, [%g0] ASI_DTLB_DATA_IN		! TLB load
+  /*0x58*/	retry							! Trap return
+  /*0x5c*/	nop
+
+				/* ICACHE line 4 */
+  /*0x60*/	nop
+  /*0x64*/	nop
+  /*0x68*/	nop
+  /*0x6c*/	nop
+  /*0x70*/	nop
+  /*0x74*/	nop
+  /*0x78*/	nop
+  /*0x7c*/	nop
+
+#undef MODIFIED_BITS

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov