patch-2.4.21 linux-2.4.21/include/asm-ppc64/mmu_context.h
Next file: linux-2.4.21/include/asm-ppc64/naca.h
Previous file: linux-2.4.21/include/asm-ppc64/mmu.h
Back to the patch index
Back to the overall index
- Lines: 76
- Date:
2003-06-13 07:51:38.000000000 -0700
- Orig file:
linux-2.4.20/include/asm-ppc64/mmu_context.h
- Orig date:
2002-08-02 17:39:45.000000000 -0700
diff -urN linux-2.4.20/include/asm-ppc64/mmu_context.h linux-2.4.21/include/asm-ppc64/mmu_context.h
@@ -67,11 +67,12 @@
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
long head, size;
+ unsigned long flags;
- spin_lock( &mmu_context_queue.lock );
+ spin_lock_irqsave(&mmu_context_queue.lock, flags);
if ( (size = mmu_context_queue.size) <= 0 ) {
- spin_unlock( &mmu_context_queue.lock );
+ spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
return -ENOMEM;
}
@@ -82,7 +83,7 @@
mmu_context_queue.head = head;
mmu_context_queue.size = size-1;
- spin_unlock( &mmu_context_queue.lock );
+ spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
return 0;
}
@@ -94,11 +95,12 @@
destroy_context(struct mm_struct *mm)
{
long index, size = mmu_context_queue.size;
+ unsigned long flags;
- spin_lock( &mmu_context_queue.lock );
+ spin_lock_irqsave(&mmu_context_queue.lock, flags);
if ( (size = mmu_context_queue.size) >= NUM_USER_CONTEXT ) {
- spin_unlock( &mmu_context_queue.lock );
+ spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
mmu_context_underflow();
}
@@ -115,9 +117,10 @@
mmu_context_queue.size = size+1;
mmu_context_queue.elements[index] = mm->context;
- spin_unlock( &mmu_context_queue.lock );
+ spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
}
+extern void flush_stab(void);
/*
* switch_mm is the entry point called from the architecture independent
@@ -127,8 +130,6 @@
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, int cpu)
{
- tsk->thread.pgdir = next->pgd; /* cache the pgdir in the thread
- maybe not needed any more */
flush_stab();
}
@@ -136,13 +137,8 @@
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
-static inline void
-activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
- current->thread.pgdir = mm->pgd;
- flush_stab();
-}
-
+#define activate_mm(active_mm, mm) \
+ switch_mm(active_mm, mm, current, smp_processor_id());
#define VSID_RANDOMIZER 42470972311
#define VSID_MASK 0xfffffffff
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)