patch-2.4.7 linux/arch/mips/mm/r4xx0.c
Next file: linux/arch/mips/mm/r5432.c
Previous file: linux/arch/mips/mm/r2300.c
Back to the patch index
Back to the overall index
- Lines: 1299
- Date:
Wed Jul 4 11:50:39 2001
- Orig file:
v2.4.6/linux/arch/mips/mm/r4xx0.c
- Orig date:
Sun Aug 6 11:43:17 2000
diff -u --recursive --new-file v2.4.6/linux/arch/mips/mm/r4xx0.c linux/arch/mips/mm/r4xx0.c
@@ -19,12 +19,13 @@
#include <linux/sched.h>
#include <linux/mm.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
#include <asm/bcache.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
-#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
/* CP0 hazard avoidance. */
@@ -208,7 +209,7 @@
{
unsigned int flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
*(volatile unsigned int *)KSEG1;
__asm__ __volatile__(
".set\tnoreorder\n\t"
@@ -235,7 +236,7 @@
"I" (PAGE_SIZE),
"i" (Create_Dirty_Excl_D)
:"$1","memory");
- restore_flags(flags);
+ __restore_flags(flags);
}
/*
@@ -626,7 +627,7 @@
:"0" (to), "1" (from),
"I" (PAGE_SIZE),
"i" (Create_Dirty_Excl_D));
- restore_flags(flags);
+ __restore_flags(flags);
}
/*
@@ -910,81 +911,81 @@
{
unsigned long flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache16(); blast_icache16(); blast_scache16();
- restore_flags(flags);
+ __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s32d16i16(void)
{
unsigned long flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache16(); blast_icache16(); blast_scache32();
- restore_flags(flags);
+ __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s64d16i16(void)
{
unsigned long flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache16(); blast_icache16(); blast_scache64();
- restore_flags(flags);
+ __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s128d16i16(void)
{
unsigned long flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache16(); blast_icache16(); blast_scache128();
- restore_flags(flags);
+ __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s32d32i32(void)
{
unsigned long flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache32(); blast_icache32(); blast_scache32();
- restore_flags(flags);
+ __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s64d32i32(void)
{
unsigned long flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache32(); blast_icache32(); blast_scache64();
- restore_flags(flags);
+ __restore_flags(flags);
}
static inline void r4k_flush_cache_all_s128d32i32(void)
{
unsigned long flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache32(); blast_icache32(); blast_scache128();
- restore_flags(flags);
+ __restore_flags(flags);
}
static inline void r4k_flush_cache_all_d16i16(void)
{
unsigned long flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache16(); blast_icache16();
- restore_flags(flags);
+ __restore_flags(flags);
}
static inline void r4k_flush_cache_all_d32i32(void)
{
unsigned long flags;
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache32(); blast_icache32();
- restore_flags(flags);
+ __restore_flags(flags);
}
static void
@@ -1010,10 +1011,8 @@
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int text;
- save_and_cli(flags);
- text = vma->vm_flags & VM_EXEC;
+ __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -1023,7 +1022,7 @@
blast_scache16_page(start);
start += PAGE_SIZE;
}
- restore_flags(flags);
+ __restore_flags(flags);
}
}
}
@@ -1051,10 +1050,8 @@
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int text;
- save_and_cli(flags);
- text = vma->vm_flags & VM_EXEC;
+ __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -1064,7 +1061,7 @@
blast_scache32_page(start);
start += PAGE_SIZE;
}
- restore_flags(flags);
+ __restore_flags(flags);
}
}
}
@@ -1091,10 +1088,8 @@
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int text;
- save_and_cli(flags);
- text = vma->vm_flags & VM_EXEC;
+ __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -1104,7 +1099,7 @@
blast_scache64_page(start);
start += PAGE_SIZE;
}
- restore_flags(flags);
+ __restore_flags(flags);
}
}
}
@@ -1131,10 +1126,8 @@
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int text;
- save_and_cli(flags);
- text = vma->vm_flags & VM_EXEC;
+ __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -1144,7 +1137,7 @@
blast_scache128_page(start);
start += PAGE_SIZE;
}
- restore_flags(flags);
+ __restore_flags(flags);
}
}
}
@@ -1171,10 +1164,8 @@
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int text;
- save_and_cli(flags);
- text = vma->vm_flags & VM_EXEC;
+ __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -1184,7 +1175,7 @@
blast_scache32_page(start);
start += PAGE_SIZE;
}
- restore_flags(flags);
+ __restore_flags(flags);
}
}
}
@@ -1211,10 +1202,8 @@
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int text;
- save_and_cli(flags);
- text = vma->vm_flags & VM_EXEC;
+ __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -1224,7 +1213,7 @@
blast_scache64_page(start);
start += PAGE_SIZE;
}
- restore_flags(flags);
+ __restore_flags(flags);
}
}
}
@@ -1251,10 +1240,8 @@
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int text;
- save_and_cli(flags);
- text = vma->vm_flags & VM_EXEC;
+ __save_and_cli(flags);
while(start < end) {
pgd = pgd_offset(mm, start);
pmd = pmd_offset(pgd, start);
@@ -1264,7 +1251,7 @@
blast_scache128_page(start);
start += PAGE_SIZE;
}
- restore_flags(flags);
+ __restore_flags(flags);
}
}
}
@@ -1279,9 +1266,9 @@
#ifdef DEBUG_CACHE
printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache16(); blast_icache16();
- restore_flags(flags);
+ __restore_flags(flags);
}
}
@@ -1295,9 +1282,9 @@
#ifdef DEBUG_CACHE
printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
blast_dcache32(); blast_icache32();
- restore_flags(flags);
+ __restore_flags(flags);
}
}
@@ -1404,7 +1391,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1416,7 +1402,7 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1429,25 +1415,24 @@
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
- /* Doing flushes for another ASID than the current one is
+ /*
+ * Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
* for every cache flush operation. So we do indexed flushes
* in that case, which doesn't overly flush the cache too much.
*/
if (mm->context != current->active_mm->context) {
- /* Do indexed flush, too much work to get the (possible)
+ /*
+ * Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
blast_scache16_page_indexed(page);
} else
blast_scache16_page(page);
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
static void r4k_flush_cache_page_s32d16i16(struct vm_area_struct *vma,
@@ -1458,7 +1443,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1470,7 +1454,7 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1482,25 +1466,24 @@
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
- /* Doing flushes for another ASID than the current one is
+ /*
+ * Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
* for every cache flush operation. So we do indexed flushes
* in that case, which doesn't overly flush the cache too much.
*/
if (mm->context != current->active_mm->context) {
- /* Do indexed flush, too much work to get the (possible)
+ /*
+ * Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
blast_scache32_page_indexed(page);
} else
blast_scache32_page(page);
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
static void r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma,
@@ -1511,7 +1494,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1523,7 +1505,7 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1535,7 +1517,6 @@
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1543,18 +1524,17 @@
* in that case, which doesn't overly flush the cache too much.
*/
if (mm->context != current->active_mm->context) {
- /* Do indexed flush, too much work to get the (possible)
+ /*
+ * Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
blast_scache64_page_indexed(page);
} else
blast_scache64_page(page);
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
static void r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma,
@@ -1565,7 +1545,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1577,7 +1556,7 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1590,8 +1569,8 @@
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
- /* Doing flushes for another ASID than the current one is
+ /*
+ * Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
* for every cache flush operation. So we do indexed flushes
* in that case, which doesn't overly flush the cache too much.
@@ -1603,13 +1582,11 @@
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
blast_scache128_page_indexed(page);
} else
blast_scache128_page(page);
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
static void r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma,
@@ -1620,7 +1597,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1632,7 +1608,7 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1645,7 +1621,6 @@
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1659,13 +1634,11 @@
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache32_page_indexed(page);
- if(text)
- blast_icache32_page_indexed(page);
blast_scache32_page_indexed(page);
} else
blast_scache32_page(page);
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma,
@@ -1676,7 +1649,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1688,7 +1660,7 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1701,7 +1673,6 @@
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1715,13 +1686,11 @@
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache32_page_indexed(page);
- if(text)
- blast_icache32_page_indexed(page);
blast_scache64_page_indexed(page);
} else
blast_scache64_page(page);
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,
@@ -1732,7 +1701,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1744,19 +1712,19 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
ptep = pte_offset(pmdp, page);
- /* If the page isn't marked valid, the page cannot possibly be
+ /*
+ * If the page isn't marked valid, the page cannot possibly be
* in the cache.
*/
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1769,13 +1737,11 @@
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache32_page_indexed(page);
- if(text)
- blast_icache32_page_indexed(page);
blast_scache128_page_indexed(page);
} else
blast_scache128_page(page);
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma,
@@ -1786,7 +1752,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1798,19 +1763,19 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
ptep = pte_offset(pmdp, page);
- /* If the page isn't marked valid, the page cannot possibly be
+ /*
+ * If the page isn't marked valid, the page cannot possibly be
* in the cache.
*/
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1819,19 +1784,15 @@
*/
if (mm == current->active_mm) {
blast_dcache16_page(page);
- if(text)
- blast_icache16_page(page);
} else {
/* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
page = (KSEG0 + (page & (dcache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
}
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,
@@ -1842,7 +1803,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1854,7 +1814,7 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1867,7 +1827,6 @@
if (!(pte_val(*ptep) & _PAGE_PRESENT))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1876,8 +1835,6 @@
*/
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
blast_dcache32_page(page);
- if(text)
- blast_icache32_page(page);
} else {
/*
* Do indexed flush, too much work to get the (possible)
@@ -1885,11 +1842,9 @@
*/
page = (KSEG0 + (page & (dcache_size - 1)));
blast_dcache32_page_indexed(page);
- if(text)
- blast_icache32_page_indexed(page);
}
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
@@ -1900,7 +1855,6 @@
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1912,7 +1866,7 @@
#ifdef DEBUG_CACHE
printk("cpage[%d,%08lx]", (int)mm->context, page);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
pmdp = pmd_offset(pgdp, page);
@@ -1925,7 +1879,6 @@
if (!(pte_val(*ptep) & _PAGE_PRESENT))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1934,8 +1887,6 @@
*/
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
blast_dcache32_page(page);
- if(text)
- blast_icache32_page(page);
} else {
/* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
@@ -1943,13 +1894,9 @@
page = (KSEG0 + (page & (dcache_size - 1)));
blast_dcache32_page_indexed(page);
blast_dcache32_page_indexed(page ^ dcache_waybit);
- if(text) {
- blast_icache32_page_indexed(page);
- blast_icache32_page_indexed(page ^ icache_waybit);
- }
}
out:
- restore_flags(flags);
+ __restore_flags(flags);
}
/* If the addresses passed to these routines are valid, they are
@@ -1961,120 +1908,71 @@
* flush.
* 3) In KSEG1, no flush necessary.
*/
-static void r4k_flush_page_to_ram_s16d16i16(struct page * page)
+static void r4k_flush_page_to_ram_s16(struct page *page)
{
- unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
-
- if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
-#ifdef DEBUG_CACHE
- printk("cram[%08lx]", addr);
-#endif
- blast_scache16_page(addr);
- }
+ blast_scache16_page((unsigned long)page_address(page));
}
-static void r4k_flush_page_to_ram_s32d16i16(struct page * page)
+static void r4k_flush_page_to_ram_s32(struct page *page)
{
- unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
-
- if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
-#ifdef DEBUG_CACHE
- printk("cram[%08lx]", addr);
-#endif
- blast_scache32_page(addr);
- }
+ blast_scache32_page((unsigned long)page_address(page));
}
-static void r4k_flush_page_to_ram_s64d16i16(struct page * page)
+static void r4k_flush_page_to_ram_s64(struct page *page)
{
- unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
-
- if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
-#ifdef DEBUG_CACHE
- printk("cram[%08lx]", addr);
-#endif
- blast_scache64_page(addr);
- }
+ blast_scache64_page((unsigned long)page_address(page));
}
-static void r4k_flush_page_to_ram_s128d16i16(struct page * page)
+static void r4k_flush_page_to_ram_s128(struct page *page)
{
- unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
-
- if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
-#ifdef DEBUG_CACHE
- printk("cram[%08lx]", addr);
-#endif
- blast_scache128_page(addr);
- }
+ blast_scache128_page((unsigned long)page_address(page));
}
-static void r4k_flush_page_to_ram_s32d32i32(struct page * page)
+static void r4k_flush_page_to_ram_d16(struct page *page)
{
- unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
-
- if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
-#ifdef DEBUG_CACHE
- printk("cram[%08lx]", addr);
-#endif
- blast_scache32_page(addr);
- }
+ blast_dcache16_page((unsigned long)page_address(page));
}
-static void r4k_flush_page_to_ram_s64d32i32(struct page * page)
+static void r4k_flush_page_to_ram_d32(struct page *page)
{
- unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
-
- if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
-#ifdef DEBUG_CACHE
- printk("cram[%08lx]", addr);
-#endif
- blast_scache64_page(addr);
- }
+ blast_dcache32_page((unsigned long)page_address(page));
}
-static void r4k_flush_page_to_ram_s128d32i32(struct page * page)
+static void r4k_flush_page_to_ram_d32_r4600(struct page *page)
{
- unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
+ unsigned long flags;
- if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
-#ifdef DEBUG_CACHE
- printk("cram[%08lx]", addr);
-#endif
- blast_scache128_page(addr);
- }
+ __save_and_cli(flags); /* For R4600 v1.7 bug. */
+ blast_dcache32_page((unsigned long)page_address(page));
+ __restore_flags(flags);
}
-static void r4k_flush_page_to_ram_d16i16(struct page * page)
+static void
+r4k_flush_icache_page_s(struct vm_area_struct *vma, struct page *page)
{
- unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
-
- if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
- unsigned long flags;
-
-#ifdef DEBUG_CACHE
- printk("cram[%08lx]", addr);
-#endif
- __save_and_cli(flags);
- blast_dcache16_page(addr);
- __restore_flags(flags);
- }
+ /*
+ * We did an scache flush therefore PI is already clean.
+ */
}
-static void r4k_flush_page_to_ram_d32i32(struct page * page)
+static void
+r4k_flush_icache_range(unsigned long start, unsigned long end)
{
- unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
+ flush_cache_all();
+}
- if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
- unsigned long flags;
+/*
+ * Ok, this seriously sucks. We use them to flush a user page but don't
+ * know the virtual address, so we have to blast away the whole icache
+ * which is significantly more expensive than the real thing.
+ */
+static void
+r4k_flush_icache_page_p(struct vm_area_struct *vma, struct page *page)
+{
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
-#ifdef DEBUG_CACHE
- printk("cram[%08lx]", addr);
-#endif
- __save_and_cli(flags);
- blast_dcache32_page(addr);
- __restore_flags(flags);
- }
+ flush_cache_all();
}
/*
@@ -2098,7 +1996,7 @@
flush_cache_all();
} else {
/* Workaround for R4600 bug. See comment above. */
- save_and_cli(flags);
+ __save_and_cli(flags);
*(volatile unsigned long *)KSEG1;
a = addr & ~(dc_lsize - 1);
@@ -2108,9 +2006,9 @@
if (a == end) break;
a += dc_lsize;
}
- restore_flags(flags);
+ __restore_flags(flags);
}
- bcops->bc_wback_inv(addr, size);
+ bc_wback_inv(addr, size);
}
static void
@@ -2142,7 +2040,7 @@
flush_cache_all();
} else {
/* Workaround for R4600 bug. See comment above. */
- save_and_cli(flags);
+ __save_and_cli(flags);
*(volatile unsigned long *)KSEG1;
a = addr & ~(dc_lsize - 1);
@@ -2152,10 +2050,10 @@
if (a == end) break;
a += dc_lsize;
}
- restore_flags(flags);
+ __restore_flags(flags);
}
- bcops->bc_inv(addr, size);
+ bc_inv(addr, size);
}
static void
@@ -2237,7 +2135,7 @@
printk("[tlball]");
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = (get_entryhi() & 0xff);
set_entryhi(KSEG0);
@@ -2257,7 +2155,7 @@
}
BARRIER;
set_entryhi(old_ctx);
- restore_flags(flags);
+ __restore_flags(flags);
}
void flush_tlb_mm(struct mm_struct *mm)
@@ -2268,11 +2166,11 @@
#ifdef DEBUG_TLB
printk("[tlbmm<%d>]", mm->context);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
get_new_mmu_context(mm, asid_cache);
if (mm == current->active_mm)
set_entryhi(mm->context & 0xff);
- restore_flags(flags);
+ __restore_flags(flags);
}
}
@@ -2287,7 +2185,7 @@
printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & 0xff),
start, end);
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if(size <= NTLB_ENTRIES_HALF) {
@@ -2321,7 +2219,7 @@
if (mm == current->active_mm)
set_entryhi(mm->context & 0xff);
}
- restore_flags(flags);
+ __restore_flags(flags);
}
}
@@ -2336,7 +2234,7 @@
#endif
newpid = (vma->vm_mm->context & 0xff);
page &= (PAGE_MASK << 1);
- save_and_cli(flags);
+ __save_and_cli(flags);
oldpid = (get_entryhi() & 0xff);
set_entryhi(page | newpid);
BARRIER;
@@ -2354,15 +2252,10 @@
finish:
BARRIER;
set_entryhi(oldpid);
- restore_flags(flags);
+ __restore_flags(flags);
}
}
-/* Load a new root pointer into the TLB. */
-void load_pgd(unsigned long pg_dir)
-{
-}
-
void pgd_init(unsigned long page)
{
unsigned long *p = (unsigned long *) page;
@@ -2380,12 +2273,6 @@
}
}
-#ifdef DEBUG_TLBUPDATE
-static unsigned long ehi_debug[NTLB_ENTRIES];
-static unsigned long el0_debug[NTLB_ENTRIES];
-static unsigned long el1_debug[NTLB_ENTRIES];
-#endif
-
/* We will need multiple versions of update_mmu_cache(), one that just
* updates the TLB with the new pte(s), and another which also checks
* for the R4k "end of page" hardware bug and does the needy.
@@ -2414,7 +2301,7 @@
}
#endif
- save_and_cli(flags);
+ __save_and_cli(flags);
address &= (PAGE_MASK << 1);
set_entryhi(address | (pid));
pgdp = pgd_offset(vma->vm_mm, address);
@@ -2437,7 +2324,7 @@
BARRIER;
set_entryhi(pid);
BARRIER;
- restore_flags(flags);
+ __restore_flags(flags);
}
#if 0
@@ -2450,7 +2337,7 @@
pte_t *ptep;
int idx;
- save_and_cli(flags);
+ __save_and_cli(flags);
address &= (PAGE_MASK << 1);
set_entryhi(address | (get_entryhi() & 0xff));
pgdp = pgd_offset(vma->vm_mm, address);
@@ -2466,7 +2353,7 @@
else
tlb_write_indexed();
BARRIER;
- restore_flags(flags);
+ __restore_flags(flags);
}
#endif
@@ -2503,7 +2390,7 @@
unsigned long old_pagemask;
unsigned long old_ctx;
- save_and_cli(flags);
+ __save_and_cli(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = (get_entryhi() & 0xff);
old_pagemask = get_pagemask();
@@ -2523,7 +2410,7 @@
BARRIER;
set_pagemask (old_pagemask);
flush_tlb_all();
- restore_flags(flags);
+ __restore_flags(flags);
}
/* Detect and size the various r4k caches. */
@@ -2532,7 +2419,7 @@
icache_size = 1 << (12 + ((config >> 9) & 7));
ic_lsize = 16 << ((config >> 5) & 1);
- printk("Primary instruction cache %dkb, linesize %d bytes)\n",
+ printk("Primary instruction cache %dkb, linesize %d bytes.\n",
icache_size >> 10, ic_lsize);
}
@@ -2541,7 +2428,7 @@
dcache_size = 1 << (12 + ((config >> 6) & 7));
dc_lsize = 16 << ((config >> 4) & 1);
- printk("Primary data cache %dkb, linesize %d bytes)\n",
+ printk("Primary data cache %dkb, linesize %d bytes.\n",
dcache_size >> 10, dc_lsize);
}
@@ -2583,7 +2470,7 @@
/* This is such a bitch, you'd think they would make it
* easy to do this. Away you daemons of stupidity!
*/
- save_and_cli(flags);
+ __save_and_cli(flags);
/* Fill each size-multiple cache line with a valid tag. */
pow2 = (64 * 1024);
@@ -2627,9 +2514,9 @@
break;
pow2 <<= 1;
}
- restore_flags(flags);
+ __restore_flags(flags);
addr -= begin;
- printk("Secondary cache sized at %dK linesize %d\n",
+ printk("Secondary cache sized at %dK linesize %d bytes.\n",
(int) (addr >> 10), sc_lsize);
scache_size = addr;
return 1;
@@ -2647,27 +2534,33 @@
_flush_cache_mm = r4k_flush_cache_mm_d16i16;
_flush_cache_range = r4k_flush_cache_range_d16i16;
_flush_cache_page = r4k_flush_cache_page_d16i16;
- _flush_page_to_ram = r4k_flush_page_to_ram_d16i16;
+ _flush_page_to_ram = r4k_flush_page_to_ram_d16;
break;
case 32:
prid = read_32bit_cp0_register(CP0_PRID) & 0xfff0;
if (prid == 0x2010) { /* R4600 V1.7 */
_clear_page = r4k_clear_page_r4600_v1;
_copy_page = r4k_copy_page_r4600_v1;
+ _flush_page_to_ram = r4k_flush_page_to_ram_d32_r4600;
} else if (prid == 0x2020) { /* R4600 V2.0 */
_clear_page = r4k_clear_page_r4600_v2;
_copy_page = r4k_copy_page_r4600_v2;
+ _flush_page_to_ram = r4k_flush_page_to_ram_d32;
} else {
_clear_page = r4k_clear_page_d32;
_copy_page = r4k_copy_page_d32;
+ _flush_page_to_ram = r4k_flush_page_to_ram_d32;
}
_flush_cache_all = r4k_flush_cache_all_d32i32;
_flush_cache_mm = r4k_flush_cache_mm_d32i32;
_flush_cache_range = r4k_flush_cache_range_d32i32;
_flush_cache_page = r4k_flush_cache_page_d32i32;
- _flush_page_to_ram = r4k_flush_page_to_ram_d32i32;
break;
}
+ ___flush_cache_all = _flush_cache_all;
+
+ _flush_icache_page = r4k_flush_icache_page_p;
+
_dma_cache_wback_inv = r4k_dma_cache_wback_inv_pc;
_dma_cache_wback = r4k_dma_cache_wback;
_dma_cache_inv = r4k_dma_cache_inv_pc;
@@ -2683,11 +2576,11 @@
_flush_cache_mm = r4k_flush_cache_mm_s16d16i16;
_flush_cache_range = r4k_flush_cache_range_s16d16i16;
_flush_cache_page = r4k_flush_cache_page_s16d16i16;
- _flush_page_to_ram = r4k_flush_page_to_ram_s16d16i16;
break;
case 32:
panic("Invalid cache configuration detected");
};
+ _flush_page_to_ram = r4k_flush_page_to_ram_s16;
_clear_page = r4k_clear_page_s16;
_copy_page = r4k_copy_page_s16;
break;
@@ -2698,16 +2591,15 @@
_flush_cache_mm = r4k_flush_cache_mm_s32d16i16;
_flush_cache_range = r4k_flush_cache_range_s32d16i16;
_flush_cache_page = r4k_flush_cache_page_s32d16i16;
- _flush_page_to_ram = r4k_flush_page_to_ram_s32d16i16;
break;
case 32:
_flush_cache_all = r4k_flush_cache_all_s32d32i32;
_flush_cache_mm = r4k_flush_cache_mm_s32d32i32;
_flush_cache_range = r4k_flush_cache_range_s32d32i32;
_flush_cache_page = r4k_flush_cache_page_s32d32i32;
- _flush_page_to_ram = r4k_flush_page_to_ram_s32d32i32;
break;
};
+ _flush_page_to_ram = r4k_flush_page_to_ram_s32;
_clear_page = r4k_clear_page_s32;
_copy_page = r4k_copy_page_s32;
break;
@@ -2718,16 +2610,15 @@
_flush_cache_mm = r4k_flush_cache_mm_s64d16i16;
_flush_cache_range = r4k_flush_cache_range_s64d16i16;
_flush_cache_page = r4k_flush_cache_page_s64d16i16;
- _flush_page_to_ram = r4k_flush_page_to_ram_s64d16i16;
break;
case 32:
_flush_cache_all = r4k_flush_cache_all_s64d32i32;
_flush_cache_mm = r4k_flush_cache_mm_s64d32i32;
_flush_cache_range = r4k_flush_cache_range_s64d32i32;
_flush_cache_page = r4k_flush_cache_page_s64d32i32;
- _flush_page_to_ram = r4k_flush_page_to_ram_s64d32i32;
break;
};
+ _flush_page_to_ram = r4k_flush_page_to_ram_s64;
_clear_page = r4k_clear_page_s64;
_copy_page = r4k_copy_page_s64;
break;
@@ -2738,20 +2629,21 @@
_flush_cache_mm = r4k_flush_cache_mm_s128d16i16;
_flush_cache_range = r4k_flush_cache_range_s128d16i16;
_flush_cache_page = r4k_flush_cache_page_s128d16i16;
- _flush_page_to_ram = r4k_flush_page_to_ram_s128d16i16;
break;
case 32:
_flush_cache_all = r4k_flush_cache_all_s128d32i32;
_flush_cache_mm = r4k_flush_cache_mm_s128d32i32;
_flush_cache_range = r4k_flush_cache_range_s128d32i32;
_flush_cache_page = r4k_flush_cache_page_s128d32i32;
- _flush_page_to_ram = r4k_flush_page_to_ram_s128d32i32;
break;
};
+ _flush_page_to_ram = r4k_flush_page_to_ram_s128;
_clear_page = r4k_clear_page_s128;
_copy_page = r4k_copy_page_s128;
break;
}
+ ___flush_cache_all = _flush_cache_all;
+ _flush_icache_page = r4k_flush_icache_page_s;
_dma_cache_wback_inv = r4k_dma_cache_wback_inv_sc;
_dma_cache_wback = r4k_dma_cache_wback;
_dma_cache_inv = r4k_dma_cache_inv_sc;
@@ -2782,13 +2674,17 @@
printk("CPU revision is: %08x\n", read_32bit_cp0_register(CP0_PRID));
- set_cp0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_NONCOHERENT);
+#ifdef CONFIG_MIPS_UNCACHED
+ change_cp0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
+#else
+ change_cp0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_NONCOHERENT);
+#endif
probe_icache(config);
probe_dcache(config);
setup_scache(config);
- switch(mips_cputype) {
+ switch(mips_cpu.cputype) {
case CPU_R4600: /* QED style two way caches? */
case CPU_R4700:
case CPU_R5000:
@@ -2797,11 +2693,12 @@
}
_flush_cache_sigtramp = r4k_flush_cache_sigtramp;
+ _flush_icache_range = r4k_flush_icache_range; /* Ouch */
if ((read_32bit_cp0_register(CP0_PRID) & 0xfff0) == 0x2020) {
_flush_cache_sigtramp = r4600v20k_flush_cache_sigtramp;
}
- flush_cache_all();
+ __flush_cache_all();
write_32bit_cp0_register(CP0_WIRED, 0);
/*
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)