patch-2.4.19 linux-2.4.19/arch/mips64/mm/r4xx0.c
Next file: linux-2.4.19/arch/mips64/mm/tlb-dbg-r4k.c
Previous file: linux-2.4.19/arch/mips64/mm/pg-sb1.c
Back to the patch index
Back to the overall index
- Lines: 931
- Date:
Fri Aug 2 17:39:43 2002
- Orig file:
linux-2.4.18/arch/mips64/mm/r4xx0.c
- Orig date:
Sun Sep 30 12:26:08 2001
diff -urN linux-2.4.18/arch/mips64/mm/r4xx0.c linux-2.4.19/arch/mips64/mm/r4xx0.c
@@ -9,6 +9,7 @@
* Copyright (C) 1997, 1998, 1999, 2000, 2001 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
+#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -20,8 +21,8 @@
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/bootinfo.h>
-#include <asm/sgialib.h>
#include <asm/mmu_context.h>
+#include <asm/cpu.h>
/* CP0 hazard avoidance. */
#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
@@ -40,6 +41,7 @@
#undef DEBUG_CACHE
+
/*
* Dummy cache handling routines for machines without boardcaches
*/
@@ -95,9 +97,9 @@
"sd\t$0,-8(%0)\n\t"
".set\tat\n\t"
".set\treorder"
- :"=r" (page)
- :"0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_D)
- :"$1", "memory");
+ : "=r" (page)
+ : "0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_D)
+ : "memory");
}
static void r4k_clear_page_d32(void * page)
@@ -120,9 +122,9 @@
"sd\t$0,-8(%0)\n\t"
".set\tat\n\t"
".set\treorder"
- :"=r" (page)
- :"0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_D)
- :"$1", "memory");
+ : "=r" (page)
+ : "0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_D)
+ : "memory");
}
@@ -180,11 +182,9 @@
"sd\t$0,-8(%0)\n\t"
".set\tat\n\t"
".set\treorder"
- :"=r" (page)
- :"0" (page),
- "I" (PAGE_SIZE),
- "i" (Create_Dirty_Excl_D)
- :"$1", "memory");
+ : "=r" (page)
+ : "0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_D)
+ : "memory");
}
/*
@@ -214,9 +214,9 @@
"sd\t$0,-8(%0)\n\t"
".set\tat\n\t"
".set\treorder"
- :"=r" (page)
- :"0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_D)
- :"$1", "memory");
+ : "=r" (page)
+ : "0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_D)
+ : "memory");
__restore_flags(flags);
}
@@ -251,9 +251,9 @@
"sd\t$0,-8(%0)\n\t"
".set\tat\n\t"
".set\treorder"
- :"=r" (page)
- :"0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_SD)
- :"$1","memory");
+ : "=r" (page)
+ : "0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_SD)
+ : "memory");
}
static void r4k_clear_page_s32(void * page)
@@ -276,9 +276,9 @@
"sd\t$0,-8(%0)\n\t"
".set\tat\n\t"
".set\treorder"
- :"=r" (page)
- :"0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_SD)
- :"$1","memory");
+ : "=r" (page)
+ : "0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_SD)
+ : "memory");
}
static void r4k_clear_page_s64(void * page)
@@ -300,11 +300,9 @@
"sd\t$0,-8(%0)\n\t"
".set\tat\n\t"
".set\treorder"
- :"=r" (page)
- :"0" (page),
- "I" (PAGE_SIZE),
- "i" (Create_Dirty_Excl_SD)
- :"$1","memory");
+ : "=r" (page)
+ : "0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_SD)
+ : "memory");
}
static void r4k_clear_page_s128(void * page)
@@ -334,11 +332,9 @@
"sd\t$0,-8(%0)\n\t"
".set\tat\n\t"
".set\treorder"
- :"=r" (page)
- :"0" (page),
- "I" (PAGE_SIZE),
- "i" (Create_Dirty_Excl_SD)
- :"$1", "memory");
+ : "=r" (page)
+ : "0" (page), "I" (PAGE_SIZE), "i" (Create_Dirty_Excl_SD)
+ : "memory");
}
@@ -697,83 +693,47 @@
static inline void r4k_flush_cache_all_s16d16i16(void)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache16(); blast_icache16(); blast_scache16();
- __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s32d16i16(void)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache16(); blast_icache16(); blast_scache32();
- __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s64d16i16(void)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache16(); blast_icache16(); blast_scache64();
- __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s128d16i16(void)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache16(); blast_icache16(); blast_scache128();
- __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s32d32i32(void)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache32(); blast_icache32(); blast_scache32();
- __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s64d32i32(void)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache32(); blast_icache32(); blast_scache64();
- __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s128d32i32(void)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache32(); blast_icache32(); blast_scache128();
- __restore_flags(flags);
}
static inline void r4k_flush_cache_all_d16i16(void)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache16(); blast_icache16();
- __restore_flags(flags);
}
static inline void r4k_flush_cache_all_d32i32(void)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache32(); blast_icache32();
- __restore_flags(flags);
}
static void r4k_flush_cache_range_s16d16i16(struct mm_struct *mm,
@@ -781,7 +741,6 @@
unsigned long end)
{
struct vm_area_struct *vma;
- unsigned long flags;
if (CPU_CONTEXT(smp_processor_id(), mm) == 0)
return;
@@ -800,7 +759,6 @@
pmd_t *pmd;
pte_t *pte;
- __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -810,7 +768,6 @@
blast_scache16_page(start);
start += PAGE_SIZE;
}
- __restore_flags(flags);
}
}
}
@@ -820,7 +777,6 @@
unsigned long end)
{
struct vm_area_struct *vma;
- unsigned long flags;
if (CPU_CONTEXT(smp_processor_id(), mm) == 0)
return;
@@ -839,7 +795,6 @@
pmd_t *pmd;
pte_t *pte;
- __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -849,7 +804,6 @@
blast_scache32_page(start);
start += PAGE_SIZE;
}
- __restore_flags(flags);
}
}
}
@@ -859,7 +813,6 @@
unsigned long end)
{
struct vm_area_struct *vma;
- unsigned long flags;
if (CPU_CONTEXT(smp_processor_id(), mm) == 0)
return;
@@ -878,7 +831,6 @@
pmd_t *pmd;
pte_t *pte;
- __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -888,7 +840,6 @@
blast_scache64_page(start);
start += PAGE_SIZE;
}
- __restore_flags(flags);
}
}
}
@@ -898,7 +849,6 @@
unsigned long end)
{
struct vm_area_struct *vma;
- unsigned long flags;
if (CPU_CONTEXT(smp_processor_id(), mm) == 0)
return;
@@ -917,7 +867,6 @@
pmd_t *pmd;
pte_t *pte;
- __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -927,7 +876,6 @@
blast_scache128_page(start);
start += PAGE_SIZE;
}
- __restore_flags(flags);
}
}
}
@@ -937,7 +885,6 @@
unsigned long end)
{
struct vm_area_struct *vma;
- unsigned long flags;
if (CPU_CONTEXT(smp_processor_id(), mm) == 0)
return;
@@ -956,7 +903,6 @@
pmd_t *pmd;
pte_t *pte;
- __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -966,7 +912,6 @@
blast_scache32_page(start);
start += PAGE_SIZE;
}
- __restore_flags(flags);
}
}
}
@@ -976,7 +921,6 @@
unsigned long end)
{
struct vm_area_struct *vma;
- unsigned long flags;
if (CPU_CONTEXT(smp_processor_id(), mm) == 0)
return;
@@ -995,7 +939,6 @@
pmd_t *pmd;
pte_t *pte;
- __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -1005,7 +948,6 @@
blast_scache64_page(start);
start += PAGE_SIZE;
}
- __restore_flags(flags);
}
}
}
@@ -1015,7 +957,6 @@
unsigned long end)
{
struct vm_area_struct *vma;
- unsigned long flags;
if (CPU_CONTEXT(smp_processor_id(), mm) != 0)
return;
@@ -1034,7 +975,6 @@
pmd_t *pmd;
pte_t *pte;
- __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -1044,7 +984,6 @@
blast_scache128_page(start);
start += PAGE_SIZE;
}
- __restore_flags(flags);
}
}
}
@@ -1054,14 +993,10 @@
unsigned long end)
{
if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
- unsigned long flags;
-
#ifdef DEBUG_CACHE
printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
#endif
- __save_and_cli(flags);
blast_dcache16(); blast_icache16();
- __restore_flags(flags);
}
}
@@ -1070,14 +1005,10 @@
unsigned long end)
{
if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
- unsigned long flags;
-
#ifdef DEBUG_CACHE
printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
#endif
- __save_and_cli(flags);
blast_dcache32(); blast_icache32();
- __restore_flags(flags);
}
}
@@ -1180,7 +1111,6 @@
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1195,7 +1125,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1224,14 +1153,12 @@
} else
blast_scache16_page(page);
out:
- __restore_flags(flags);
}
static void r4k_flush_cache_page_s32d16i16(struct vm_area_struct *vma,
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1246,7 +1173,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1274,14 +1200,12 @@
} else
blast_scache32_page(page);
out:
- __restore_flags(flags);
}
static void r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma,
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1296,7 +1220,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1325,14 +1248,12 @@
} else
blast_scache64_page(page);
out:
- __restore_flags(flags);
}
static void r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma,
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1347,7 +1268,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1377,14 +1297,12 @@
} else
blast_scache128_page(page);
out:
- __restore_flags(flags);
}
static void r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma,
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1399,7 +1317,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1430,14 +1347,12 @@
} else
blast_scache32_page(page);
out:
- __restore_flags(flags);
}
static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma,
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1452,7 +1367,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1483,14 +1397,12 @@
} else
blast_scache64_page(page);
out:
- __restore_flags(flags);
}
static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1505,7 +1417,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1534,14 +1445,12 @@
} else
blast_scache128_page(page);
out:
- __restore_flags(flags);
}
static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma,
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1556,7 +1465,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1584,14 +1492,12 @@
blast_dcache16_page_indexed(page);
}
out:
- __restore_flags(flags);
}
static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1606,7 +1512,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1636,14 +1541,12 @@
blast_dcache32_page_indexed(page);
}
out:
- __restore_flags(flags);
}
static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -1658,7 +1561,6 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1688,7 +1590,6 @@
blast_dcache32_page_indexed(page ^ dcache_waybit);
}
out:
- __restore_flags(flags);
}
static void r4k_flush_page_to_ram_s16(struct page *page)
@@ -1713,20 +1614,40 @@
static void r4k_flush_page_to_ram_d16(struct page *page)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache16_page((unsigned long)page_address(page));
- __restore_flags(flags);
}
static void r4k_flush_page_to_ram_d32(struct page *page)
{
- unsigned long flags;
-
- __save_and_cli(flags);
blast_dcache32_page((unsigned long)page_address(page));
- __restore_flags(flags);
+}
+
+static void
+r4k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ flush_cache_all();
+}
+
+static void
+r4k_flush_icache_page_s(struct vm_area_struct *vma, struct page *page)
+{
+ /*
+ * We did an scache flush therefore PI is already clean.
+ */
+}
+
+/*
+ * Ok, this seriously sucks. We use them to flush a user page but don't
+ * know the virtual address, so we have to blast away the whole icache
+ * which is significantly more expensive than the real thing.
+ */
+static void
+r4k_flush_icache_page_p(struct vm_area_struct *vma, struct page *page)
+{
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
+ flush_cache_all();
}
/*
@@ -1827,7 +1748,7 @@
static void r4k_dma_cache_wback(unsigned long addr, unsigned long size)
{
- panic("r4k_dma_cache called - should not happen.\n");
+ panic("r4k_dma_cache called - should not happen.");
}
/*
@@ -1864,7 +1785,7 @@
#define NTLB_ENTRIES_HALF 24 /* Fixed on all R4XX0 variants... */
-static inline void r4k_flush_tlb_all(void)
+void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
@@ -1897,7 +1818,7 @@
__restore_flags(flags);
}
-static void r4k_flush_tlb_mm(struct mm_struct *mm)
+void local_flush_tlb_mm(struct mm_struct *mm)
{
if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
unsigned long flags;
@@ -1906,14 +1827,14 @@
printk("[tlbmm<%d>]", mm->context);
#endif
__save_and_cli(flags);
- get_new_cpu_mmu_context(mm, smp_processor_id());
+ get_new_mmu_context(mm, smp_processor_id());
if(mm == current->mm)
set_entryhi(CPU_CONTEXT(smp_processor_id(), mm) & 0xff);
__restore_flags(flags);
}
}
-static void r4k_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
@@ -1954,7 +1875,7 @@
}
set_entryhi(oldpid);
} else {
- get_new_cpu_mmu_context(mm, smp_processor_id());
+ get_new_mmu_context(mm, smp_processor_id());
if(mm == current->mm)
set_entryhi(CPU_CONTEXT(smp_processor_id(),
mm) & 0xff);
@@ -1963,7 +1884,7 @@
}
}
-static void r4k_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
if (CPU_CONTEXT(smp_processor_id(), vma->vm_mm) != 0) {
unsigned long flags;
@@ -2086,35 +2007,6 @@
}
#endif
-static void r4k_show_regs(struct pt_regs *regs)
-{
- /* Saved main processor registers. */
- printk("$0 : %016lx %016lx %016lx %016lx\n",
- 0UL, regs->regs[1], regs->regs[2], regs->regs[3]);
- printk("$4 : %016lx %016lx %016lx %016lx\n",
- regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
- printk("$8 : %016lx %016lx %016lx %016lx\n",
- regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]);
- printk("$12 : %016lx %016lx %016lx %016lx\n",
- regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]);
- printk("$16 : %016lx %016lx %016lx %016lx\n",
- regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]);
- printk("$20 : %016lx %016lx %016lx %016lx\n",
- regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]);
- printk("$24 : %016lx %016lx\n",
- regs->regs[24], regs->regs[25]);
- printk("$28 : %016lx %016lx %016lx %016lx\n",
- regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]);
- printk("Hi : %016lx\n", regs->hi);
- printk("Lo : %016lx\n", regs->lo);
-
- /* Saved cp0 registers. */
- printk("epc : %016lx %s\nbadvaddr: %016lx\n",
- regs->cp0_epc, print_tainted(), regs->cp0_badvaddr);
- printk("Status : %08x\nCause : %08x\n",
- (unsigned int) regs->cp0_status, (unsigned int) regs->cp0_cause);
-}
-
/* Detect and size the various r4k caches. */
static void __init probe_icache(unsigned long config)
{
@@ -2224,6 +2116,7 @@
case 16:
_clear_page = r4k_clear_page_d16;
_copy_page = r4k_copy_page_d16;
+ _flush_cache_all = r4k_flush_cache_all_d16i16;
_flush_cache_l1 = r4k_flush_cache_all_d16i16;
_flush_cache_mm = r4k_flush_cache_mm_d16i16;
_flush_cache_range = r4k_flush_cache_range_d16i16;
@@ -2241,6 +2134,7 @@
_clear_page = r4k_clear_page_d32;
_copy_page = r4k_copy_page_d32;
}
+ _flush_cache_all = r4k_flush_cache_all_d32i32;
_flush_cache_l1 = r4k_flush_cache_all_d32i32;
_flush_cache_mm = r4k_flush_cache_mm_d32i32;
_flush_cache_range = r4k_flush_cache_range_d32i32;
@@ -2256,7 +2150,7 @@
_flush_page_to_ram = r4k_flush_page_to_ram_d32;
break;
}
-
+ _flush_icache_page = r4k_flush_icache_page_p;
_dma_cache_wback_inv = r4k_dma_cache_wback_inv_pc;
_dma_cache_wback = r4k_dma_cache_wback;
_dma_cache_inv = r4k_dma_cache_inv_pc;
@@ -2268,6 +2162,7 @@
case 16:
switch(dc_lsize) {
case 16:
+ _flush_cache_all = r4k_flush_cache_all_s16d16i16;
_flush_cache_l1 = r4k_flush_cache_all_s16d16i16;
_flush_cache_mm = r4k_flush_cache_mm_s16d16i16;
_flush_cache_range = r4k_flush_cache_range_s16d16i16;
@@ -2283,12 +2178,14 @@
case 32:
switch(dc_lsize) {
case 16:
+ _flush_cache_all = r4k_flush_cache_all_s32d16i16;
_flush_cache_l1 = r4k_flush_cache_all_s32d16i16;
_flush_cache_mm = r4k_flush_cache_mm_s32d16i16;
_flush_cache_range = r4k_flush_cache_range_s32d16i16;
_flush_cache_page = r4k_flush_cache_page_s32d16i16;
break;
case 32:
+ _flush_cache_all = r4k_flush_cache_all_s32d32i32;
_flush_cache_l1 = r4k_flush_cache_all_s32d32i32;
_flush_cache_mm = r4k_flush_cache_mm_s32d32i32;
_flush_cache_range = r4k_flush_cache_range_s32d32i32;
@@ -2302,12 +2199,14 @@
case 64:
switch(dc_lsize) {
case 16:
+ _flush_cache_all = r4k_flush_cache_all_s64d16i16;
_flush_cache_l1 = r4k_flush_cache_all_s64d16i16;
_flush_cache_mm = r4k_flush_cache_mm_s64d16i16;
_flush_cache_range = r4k_flush_cache_range_s64d16i16;
_flush_cache_page = r4k_flush_cache_page_s64d16i16;
break;
case 32:
+ _flush_cache_all = r4k_flush_cache_all_s64d32i32;
_flush_cache_l1 = r4k_flush_cache_all_s64d32i32;
_flush_cache_mm = r4k_flush_cache_mm_s64d32i32;
_flush_cache_range = r4k_flush_cache_range_s64d32i32;
@@ -2321,12 +2220,14 @@
case 128:
switch(dc_lsize) {
case 16:
+ _flush_cache_all = r4k_flush_cache_all_s128d16i16;
_flush_cache_l1 = r4k_flush_cache_all_s128d16i16;
_flush_cache_mm = r4k_flush_cache_mm_s128d16i16;
_flush_cache_range = r4k_flush_cache_range_s128d16i16;
_flush_cache_page = r4k_flush_cache_page_s128d16i16;
break;
case 32:
+ _flush_cache_all = r4k_flush_cache_all_s128d32i32;
_flush_cache_l1 = r4k_flush_cache_all_s128d32i32;
_flush_cache_mm = r4k_flush_cache_mm_s128d32i32;
_flush_cache_range = r4k_flush_cache_range_s128d32i32;
@@ -2338,6 +2239,7 @@
_copy_page = r4k_copy_page_s128;
break;
}
+ _flush_icache_page = r4k_flush_icache_page_s;
_dma_cache_wback_inv = r4k_dma_cache_wback_inv_sc;
_dma_cache_wback = r4k_dma_cache_wback;
_dma_cache_inv = r4k_dma_cache_inv_sc;
@@ -2366,19 +2268,17 @@
{
unsigned long config = read_32bit_cp0_register(CP0_CONFIG);
- printk("CPU revision is: %08x\n", read_32bit_cp0_register(CP0_PRID));
-
#ifdef CONFIG_MIPS_UNCACHED
- set_cp0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
+ change_cp0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
#else
- set_cp0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_NONCOHERENT);
+ change_cp0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_NONCOHERENT);
#endif /* UNCACHED */
probe_icache(config);
probe_dcache(config);
setup_scache(config);
- switch(mips_cputype) {
+ switch(mips_cpu.cputype) {
case CPU_R4600: /* QED style two way caches? */
case CPU_R4700:
case CPU_R5000:
@@ -2390,16 +2290,11 @@
if ((read_32bit_cp0_register(CP0_PRID) & 0xfff0) == 0x2020) {
_flush_cache_sigtramp = r4600v20k_flush_cache_sigtramp;
}
+ _flush_icache_range = r4k_flush_icache_range; /* Ouch */
- _flush_tlb_all = r4k_flush_tlb_all;
- _flush_tlb_mm = r4k_flush_tlb_mm;
- _flush_tlb_range = r4k_flush_tlb_range;
- _flush_tlb_page = r4k_flush_tlb_page;
_flush_cache_l2 = r4k_flush_cache_l2;
- update_mmu_cache = r4k_update_mmu_cache;
-
- _show_regs = r4k_show_regs;
+ _update_mmu_cache = r4k_update_mmu_cache;
flush_cache_l1();
@@ -2411,5 +2306,5 @@
* be set for 4kb pages.
*/
write_32bit_cp0_register(CP0_PAGEMASK, PM_4K);
- _flush_tlb_all();
+ local_flush_tlb_all();
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)