patch-2.4.21 linux-2.4.21/arch/x86_64/kernel/process.c
Next file: linux-2.4.21/arch/x86_64/kernel/ptrace.c
Previous file: linux-2.4.21/arch/x86_64/kernel/pci-x86_64.h
Back to the patch index
Back to the overall index
- Lines: 177
- Date:
2003-06-13 07:51:32.000000000 -0700
- Orig file:
linux-2.4.20/arch/x86_64/kernel/process.c
- Orig date:
2002-11-28 15:53:12.000000000 -0800
diff -urN linux-2.4.20/arch/x86_64/kernel/process.c linux-2.4.21/arch/x86_64/kernel/process.c
@@ -9,7 +9,7 @@
* X86-64 port
* Andi Kleen.
*
- * $Id: process.c,v 1.57 2002/09/12 12:56:36 ak Exp $
+ * $Id: process.c,v 1.64 2003/03/31 15:11:26 ak Exp $
*/
/*
@@ -242,10 +242,9 @@
* Stop all CPUs and turn off local APICs and the IO-APIC, so
* other OSs see a clean IRQ state.
*/
- if (notify_die(DIE_STOP,"cpustop",0,0) != NOTIFY_BAD)
smp_send_stop();
- disable_IO_APIC();
#endif
+ disable_IO_APIC();
/* Could do reset through the northbridge of the Hammer here. */
/* rebooting needs to touch the page at absolute addr 0 */
@@ -275,6 +274,8 @@
pm_power_off();
}
+extern int printk_address(unsigned long);
+
/* Prints also some state that isn't saved in the pt_regs */
void show_regs(struct pt_regs * regs)
{
@@ -284,8 +285,9 @@
printk("\n");
printk("Pid: %d, comm: %.20s %s\n", current->pid, current->comm, print_tainted());
- printk("RIP: %04lx:[<%016lx>]\n", regs->cs & 0xffff, regs->rip);
- printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
+ printk("RIP: %04lx:", regs->cs & 0xffff);
+ printk_address(regs->rip);
+ printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
regs->rax, regs->rbx, regs->rcx);
printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
@@ -316,6 +318,8 @@
fs,fsindex,gs,gsindex,shadowgs);
printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
+
+ show_trace(®s->rsp);
}
/*
@@ -336,37 +340,6 @@
}
/*
- * Reloading %gs is a bit complicated because the kernel relies on it
- * This includes the exception handlers, so we cannot take any exceptions
- * while doing this. Check the new gs value manually for validity and only
- * then load it. This needs locking again parallel CPUs that share the same
- * LDT. This has to be done in the context switch iff %gs changes.
- */
-void load_gs_index(unsigned gs)
-{
- struct mm_struct *mm = current->mm;
- int access;
- /* paranoia: */
- if ((gs & 3) != 2) gs = 0;
- if (mm)
- read_lock(&mm->context.ldtlock);
- asm volatile("pushf\n\t"
- "cli\n\t"
- "swapgs\n\t"
- /* cannot take any exception until the next swapgs */
- "lar %1,%0\n\t"
- "jnz 1f\n\t"
- "movl %1,%%eax\n\t"
- "movl %%eax,%%gs\n\t"
- "jmp 2f\n\t"
- "1: movl %2,%%gs\n\t"
- "2: swapgs\n\t"
- "popf" : "=g" (access) : "g" (gs), "r" (0) : "rax");
- if (mm)
- read_unlock(&mm->context.ldtlock);
-}
-
-/*
* Free current thread data structures etc..
*/
void exit_thread(void)
@@ -486,10 +459,6 @@
/*
* switch_to(x,y) should switch tasks from x to y.
*
- * We fsave/fwait so that an exception goes off at the right time
- * (as a call from the fsave or fwait in effect) rather than to
- * the wrong process.
- *
* This could still be optimized:
* - fold all the options into a flag word and test it with a single test.
* - could test fs/gs bitsliced
@@ -520,29 +489,37 @@
/*
* Switch FS and GS.
- * XXX Check if this is safe on SMP (!= -> |)
*/
{
- unsigned int fsindex;
-
+ unsigned fsindex;
asm volatile("movl %%fs,%0" : "=g" (fsindex));
- if (unlikely(fsindex != next->fsindex)) /* or likely? */
+ /* segment register != 0 always requires a reload.
+ also reload when it has changed.
+ when prev process used 64bit base always reload
+ to avoid an information leak. */
+ if (unlikely((fsindex | next->fsindex) || prev->fs)) {
loadsegment(fs, next->fsindex);
- if (unlikely(fsindex != prev->fsindex))
+ /* check if the user use a selector != 0
+ * if yes clear 64bit base, since overloaded base
+ * is allways mapped to the Null selector
+ */
+ if (fsindex)
prev->fs = 0;
- if ((fsindex != prev->fsindex) || (prev->fs != next->fs))
+ }
+ /* when next process has a 64bit base use it */
+ if (next->fs)
wrmsrl(MSR_FS_BASE, next->fs);
prev->fsindex = fsindex;
}
{
- unsigned int gsindex;
-
+ unsigned gsindex;
asm volatile("movl %%gs,%0" : "=g" (gsindex));
- if (unlikely(gsindex != next->gsindex))
- load_gs_index(next->gs);
- if (unlikely(gsindex != prev->gsindex))
+ if (unlikely((gsindex | next->gsindex) || prev->gs)) {
+ load_gs_index(next->gsindex);
+ if (gsindex)
prev->gs = 0;
- if (gsindex != prev->gsindex || prev->gs != next->gs)
+ }
+ if (next->gs)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
prev->gsindex = gsindex;
}
@@ -695,16 +672,18 @@
case ARCH_SET_GS:
if (addr >= TASK_SIZE)
return -EPERM;
- asm volatile("movw %%gs,%0" : "=g" (current->thread.gsindex));
+ asm volatile("movl %0,%%gs" :: "r" (0));
+ current->thread.gsindex = 0;
current->thread.gs = addr;
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
break;
case ARCH_SET_FS:
/* Not strictly needed for fs, but do it for symmetry
- with gs */
+ with gs. */
if (addr >= TASK_SIZE)
return -EPERM;
- asm volatile("movw %%fs,%0" : "=g" (current->thread.fsindex));
+ asm volatile("movl %0,%%fs" :: "r" (0));
+ current->thread.fsindex = 0;
current->thread.fs = addr;
ret = checking_wrmsrl(MSR_FS_BASE, addr);
break;
@@ -726,4 +705,3 @@
}
return ret;
}
-
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)