patch-2.4.22 linux-2.4.22/arch/ppc64/kernel/stab.c
Next file: linux-2.4.22/arch/ppc64/kernel/sys_ppc32.c
Previous file: linux-2.4.22/arch/ppc64/kernel/signal.c
Back to the patch index
Back to the overall index
- Lines: 157
- Date:
2003-08-25 04:44:40.000000000 -0700
- Orig file:
linux-2.4.21/arch/ppc64/kernel/stab.c
- Orig date:
2002-11-28 15:53:11.000000000 -0800
diff -urN linux-2.4.21/arch/ppc64/kernel/stab.c linux-2.4.22/arch/ppc64/kernel/stab.c
@@ -17,6 +17,7 @@
#include <asm/paca.h>
#include <asm/naca.h>
#include <asm/pmc.h>
+#include <asm/cputable.h>
inline int make_ste(unsigned long stab,
unsigned long esid, unsigned long vsid);
@@ -35,10 +36,7 @@
esid = GET_ESID(KERNELBASE);
vsid = get_kernel_vsid(esid << SID_SHIFT);
- if (!__is_processor(PV_POWER4) && !__is_processor(PV_POWER4p)) {
- __asm__ __volatile__("isync; slbia; isync":::"memory");
- make_ste(stab, esid, vsid);
- } else {
+ if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
/* Invalidate the entire SLB & all the ERATS */
__asm__ __volatile__("isync" : : : "memory");
#ifndef CONFIG_PPC_ISERIES
@@ -49,7 +47,10 @@
#else
__asm__ __volatile__("isync; slbia; isync":::"memory");
#endif
- }
+ } else {
+ __asm__ __volatile__("isync; slbia; isync":::"memory");
+ make_ste(stab, esid, vsid);
+ }
}
/*
@@ -61,6 +62,15 @@
unsigned long entry, group, old_esid, castout_entry, i;
unsigned int global_entry;
STE *ste, *castout_ste;
+ unsigned char kp = 1;
+
+#ifdef CONFIG_SHARED_MEMORY_ADDRESSING
+ if(((esid >> SMALLOC_ESID_SHIFT) ==
+ (SMALLOC_START >> SMALLOC_EA_SHIFT)) &&
+ (current->thread.flags & PPC_FLAG_SHARED)) {
+ kp = 0;
+ }
+#endif
/* Search the primary group first. */
global_entry = (esid & 0x1f) << 3;
@@ -77,7 +87,7 @@
__asm__ __volatile__ ("eieio" : : : "memory");
ste->dw0.dw0.esid = esid;
ste->dw0.dw0.v = 1;
- ste->dw0.dw0.kp = 1;
+ ste->dw0.dw0.kp = kp;
/* Order update */
__asm__ __volatile__ ("sync" : : : "memory");
@@ -135,7 +145,7 @@
old_esid = castout_ste->dw0.dw0.esid;
castout_ste->dw0.dw0.esid = esid;
castout_ste->dw0.dw0.v = 1;
- castout_ste->dw0.dw0.kp = 1;
+ castout_ste->dw0.dw0.kp = kp;
__asm__ __volatile__ ("slbie %0" : : "r" (old_esid << SID_SHIFT));
/* Ensure completion of slbie */
__asm__ __volatile__ ("sync" : : : "memory" );
@@ -158,6 +168,15 @@
unsigned long word0;
slb_dword1 data;
} vsid_data;
+ unsigned char kp = 1;
+
+#ifdef CONFIG_SHARED_MEMORY_ADDRESSING
+ if(((esid >> SMALLOC_ESID_SHIFT) ==
+ (SMALLOC_START >> SMALLOC_EA_SHIFT)) &&
+ (current->thread.flags & PPC_FLAG_SHARED)) {
+ kp = 0;
+ }
+#endif
/*
* Find an empty entry, if one exists.
@@ -171,7 +190,7 @@
*/
vsid_data.word0 = 0;
vsid_data.data.vsid = vsid;
- vsid_data.data.kp = 1;
+ vsid_data.data.kp = kp;
if (large)
vsid_data.data.l = 1;
@@ -220,7 +239,7 @@
*/
vsid_data.word0 = 0;
vsid_data.data.vsid = vsid;
- vsid_data.data.kp = 1;
+ vsid_data.data.kp = kp;
if (large)
vsid_data.data.l = 1;
@@ -264,6 +283,15 @@
}
}
+#ifdef CONFIG_SHARED_MEMORY_ADDRESSING
+ /* Shared segments might be mapped into a user task space,
+ * so we need to add them to the list of entries to flush
+ */
+ if ((ea >> SMALLOC_EA_SHIFT) == (SMALLOC_START >> SMALLOC_EA_SHIFT)) {
+ kernel_segment = 0;
+ }
+#endif
+
esid = GET_ESID(ea);
if (trap == 0x380 || trap == 0x480) {
#ifndef CONFIG_PPC_ISERIES
@@ -305,7 +333,15 @@
unsigned char *segments = get_paca()->xSegments;
unsigned long flags, i;
- if (!__is_processor(PV_POWER4) && !__is_processor(PV_POWER4p)) {
+ if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
+ unsigned long flags;
+
+ PMC_SW_PROCESSOR(stab_invalidations);
+
+ __save_and_cli(flags);
+ __asm__ __volatile__("isync; slbia; isync":::"memory");
+ __restore_flags(flags);
+ } else {
unsigned long entry;
STE *ste;
@@ -330,7 +366,8 @@
entry++, ste++) {
unsigned long ea;
ea = ste->dw0.dw0.esid << SID_SHIFT;
- if (STAB_PRESSURE || (!REGION_ID(ea))) {
+ if (STAB_PRESSURE || (!REGION_ID(ea)) ||
+ (REGION_ID(ea) == VMALLOC_REGION_ID)) {
ste->dw0.dw0.v = 0;
PMC_SW_PROCESSOR(stab_invalidations);
}
@@ -347,13 +384,5 @@
__asm__ __volatile__ ("slbia" : : : "memory");
/* Force flush to complete. */
__asm__ __volatile__ ("sync" : : : "memory");
- } else {
- unsigned long flags;
-
- PMC_SW_PROCESSOR(stab_invalidations);
-
- __save_and_cli(flags);
- __asm__ __volatile__("isync; slbia; isync":::"memory");
- __restore_flags(flags);
}
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)