patch-2.1.44 linux/arch/mips/kernel/r4k_misc.S

Next file: linux/arch/mips/kernel/r4k_scall.S
Previous file: linux/arch/mips/kernel/r4k_fpu.S
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.43/linux/arch/mips/kernel/r4k_misc.S linux/arch/mips/kernel/r4k_misc.S
@@ -0,0 +1,189 @@
+/* $Id: r4k_misc.S,v 1.2 1997/06/12 14:18:10 ralf Exp $
+ * r4k_misc.S: Misc. exception handling code for r4k.
+ *
+ * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse
+ *
+ * Multi-cpu abstraction and reworking:
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ */
+#include <asm/asm.h>
+#include <asm/current.h>
+#include <asm/offset.h>
+#include <asm/bootinfo.h>
+#include <asm/cachectl.h>
+#include <asm/current.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsconfig.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+#define NOTLB_OPTIMIZE /* If you are paranoid, define this. */
+
+	/* ABUSE of CPP macros 101. */
+
+	/* After this macro runs, the pte faulted on is
+	 * in register PTE, a ptr into the table in which
+	 * the pte belongs is in PTR.
+	 */
+#define LOAD_PTE(pte, ptr) \
+	mfc0	pte, CP0_BADVADDR; \
+	srl	pte, pte, 22; \
+	_GET_CURRENT(ptr); \
+	sll	pte, pte, 2; \
+	lw	ptr, THREAD_PGDIR(ptr); \
+	addu	ptr, pte, ptr; \
+	mfc0	pte, CP0_BADVADDR; \
+	lw	ptr, (ptr); \
+	srl	pte, pte, 10; \
+	and	pte, pte, 0xffc; \
+	addu	ptr, ptr, pte; \
+	lw	pte, (ptr);
+
+	/* This places the even/odd pte pair in the page
+	 * table at PTR into ENTRYLO0 and ENTRYLO1 using
+	 * TMP as a scratch register.
+	 */
+#define PTE_RELOAD(ptr, tmp) \
+	ori	ptr, ptr, 0x4; \
+	xori	ptr, ptr, 0x4; \
+	lw	tmp, 4(ptr); \
+	lw	ptr, 0(ptr); \
+	srl	tmp, tmp, 6; \
+	mtc0	tmp, CP0_ENTRYLO1; \
+	srl	ptr, ptr, 6; \
+	mtc0	ptr, CP0_ENTRYLO0;
+
+#define DO_FAULT(write) \
+	.set	noat; \
+	.set	macro; \
+	SAVE_ALL; \
+	mfc0	a2, CP0_BADVADDR; \
+	STI; \
+	.set	at; \
+	move	a0, sp; \
+	jal	do_page_fault; \
+	 li	a1, write; \
+	j	ret_from_sys_call; \
+	 nop; \
+	.set	noat; \
+	.set	nomacro;
+
+	/* Check is PTE is present, if not then jump to LABEL.
+	 * PTR points to the page table where this PTE is located,
+	 * when the macro is done executing PTE will be restored
+	 * with it's original value.
+	 */
+#define PTE_PRESENT(pte, ptr, label) \
+	andi	pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
+	xori	pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
+	bnez	pte, label; \
+	 lw	pte, (ptr);
+
+	/* Make PTE valid, store result in PTR. */
+#define PTE_MAKEVALID(pte, ptr) \
+	ori	pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \
+	sw	pte, (ptr);
+
+	/* Check if PTE can be written to, if not branch to LABEL.
+	 * Regardless restore PTE with value from PTR when done.
+	 */
+#define PTE_WRITABLE(pte, ptr, label) \
+	andi	pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
+	xori	pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
+	bnez	pte, label; \
+	 lw	pte, (ptr);
+
+	/* Make PTE writable, update software status bits as well,
+	 * then store at PTR.
+	 */
+#define PTE_MAKEWRITE(pte, ptr) \
+	ori	pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \
+			   _PAGE_VALID | _PAGE_DIRTY); \
+	sw	pte, (ptr);
+
+	.set	noreorder
+	.set	mips3
+
+	/* Note for many R4k variants tlb probes cannot be executed out
+	 * of the instruction cache else you get bogus results.
+	 */
+
+	.align	5
+	NESTED(r4k_handle_tlbl, PT_SIZE, sp)
+	.set	noat
+	.set	nomacro
+invalid_tlbl:
+#ifndef NOTLB_OPTIMIZE
+	/* Test present bit in entry. */
+	LOAD_PTE(k0, k1)
+	tlbp
+	PTE_PRESENT(k0, k1, nopage_tlbl)
+	PTE_MAKEVALID(k0, k1)
+	PTE_RELOAD(k1, k0)
+	nop
+	b	1f
+	 tlbwi
+1:
+	nop
+	eret
+#endif
+
+nopage_tlbl:
+	DO_FAULT(0)
+	END(r4k_handle_tlbl)
+
+	.align	5
+	NESTED(r4k_handle_tlbs, PT_SIZE, sp)
+	.set	noat
+#ifndef NOTLB_OPTIMIZE
+	LOAD_PTE(k0, k1)
+	tlbp				# find faulting entry
+	PTE_WRITABLE(k0, k1, nopage_tlbs)
+	PTE_MAKEWRITE(k0, k1)
+	PTE_RELOAD(k1, k0)
+	nop
+	b	1f
+	 tlbwi
+1:
+	nop
+	eret
+#endif
+
+nopage_tlbs:
+	DO_FAULT(1)
+	END(r4k_handle_tlbs)
+
+	.align	5
+	NESTED(r4k_handle_mod, PT_SIZE, sp)
+	.set	noat
+#ifndef NOTLB_OPTIMIZE
+	LOAD_PTE(k0, k1)
+	tlbp					# find faulting entry
+	andi	k0, k0, _PAGE_WRITE
+	beqz	k0, nowrite_mod
+	 lw	k0, (k1)
+
+	/* Present and writable bits set, set accessed and dirty bits. */
+	PTE_MAKEWRITE(k0, k1)
+#if 0
+	ori	k0, k0, (_PAGE_ACCESSED | _PAGE_DIRTY)
+	sw	k0, (k1)
+#endif
+
+	/* Now reload the entry into the tlb. */
+	PTE_RELOAD(k1, k0)
+	nop
+	b	1f
+	 tlbwi
+1:
+	nop
+	eret
+#endif
+
+nowrite_mod:
+	DO_FAULT(1)
+	END(r4k_handle_mod)

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov