patch-2.4.2 linux/include/asm-s390/pgalloc.h
Next file: linux/include/asm-s390/pgtable.h
Previous file: linux/include/asm-s390/param.h
Back to the patch index
Back to the overall index
- Lines: 461
- Date:
Tue Feb 13 14:13:44 2001
- Orig file:
v2.4.1/linux/include/asm-s390/pgalloc.h
- Orig date:
Fri May 12 11:41:44 2000
diff -u --recursive --new-file v2.4.1/linux/include/asm-s390/pgalloc.h linux/include/asm-s390/pgalloc.h
@@ -31,7 +31,7 @@
extern __inline__ pgd_t* get_pgd_slow(void)
{
int i;
- pgd_t *pgd,*ret = (pgd_t *)__get_free_pages(GFP_KERNEL,2);
+ pgd_t *pgd,*ret = (pgd_t *)__get_free_pages(GFP_KERNEL,1);
if (ret)
for (i=0,pgd=ret;i<USER_PTRS_PER_PGD;i++,pgd++)
pmd_clear(pmd_offset(pgd,i*PGDIR_SIZE));
@@ -40,47 +40,80 @@
extern __inline__ pgd_t* get_pgd_fast(void)
{
- unsigned long *ret;
+ unsigned long *ret = pgd_quicklist;
- if((ret = pgd_quicklist) != NULL) {
+ if (ret != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
- pgtable_cache_size--;
- /*
- * Need to flush tlb, since private page tables
- * are unique thru address of pgd and virtual address.
- * If we reuse pgd we need to be sure no tlb entry
- * with that pdg is left -> global flush
- *
- * Fixme: To avoid this global flush we should
- * use pdg_quicklist as fix lenght fifo list
- * and not as stack
- */
- } else
- ret = (unsigned long *)get_pgd_slow();
+ pgtable_cache_size -= 2;
+ }
return (pgd_t *)ret;
}
+extern __inline__ pgd_t *pgd_alloc(void)
+{
+ pgd_t *pgd;
+
+ pgd = get_pgd_fast();
+ if (!pgd)
+ pgd = get_pgd_slow();
+ return pgd;
+}
+
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
+ pgtable_cache_size += 2;
}
extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
- free_pages((unsigned long)pgd,2);
+ free_pages((unsigned long) pgd, 1);
+}
+
+#define pgd_free(pgd) free_pgd_fast(pgd)
+
+/*
+ * page middle directory allocation/free routines.
+ * We don't use pmd cache, so these are dummy routines.
+ */
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+ return (pmd_t *)0;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
+{
+}
+
+extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
+{
+ return (pmd_t *) pgd;
+}
+
+extern __inline__ void free_pmd_slow(pmd_t *pmd)
+{
+}
+
+extern inline void pmd_free(pmd_t * pmd)
+{
}
+#define pmd_free_kernel pmd_free
+#define pmd_alloc_kernel pmd_alloc
+
+/*
+ * page table entry allocation/free routines.
+ */
+extern pte_t empty_bad_pte_table[];
extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
-extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
extern __inline__ pte_t* get_pte_fast(void)
{
- unsigned long *ret;
+ unsigned long *ret = (unsigned long *) pte_quicklist;
- if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ if (ret != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
@@ -90,6 +123,8 @@
extern __inline__ void free_pte_fast(pte_t *pte)
{
+ if (pte == empty_bad_pte_table)
+ return;
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
@@ -97,79 +132,41 @@
extern __inline__ void free_pte_slow(pte_t *pte)
{
- free_page((unsigned long)pte);
-}
-
-#define pte_free_kernel(pte) free_pte_fast(pte)
-#define pte_free(pte) free_pte_fast(pte)
-#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
-
-extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t * page = (pte_t *) get_pte_fast();
-
- if (!page)
- return get_pte_kernel_slow(pmd, address);
- pmd_val(pmd[0]) = _KERNPG_TABLE + __pa(page);
- pmd_val(pmd[1]) = _KERNPG_TABLE + __pa(page+1024);
- pmd_val(pmd[2]) = _KERNPG_TABLE + __pa(page+2048);
- pmd_val(pmd[3]) = _KERNPG_TABLE + __pa(page+3072);
- return page + address;
- }
- if (pmd_bad(*pmd)) {
- __handle_bad_pmd_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
+ free_page((unsigned long) pte);
}
-extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
+extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long vmaddr)
{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ unsigned long offset;
- if (pmd_none(*pmd))
- goto getnew;
+ offset = (vmaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ unsigned long page = (unsigned long) get_pte_fast();
+
+ if (!page)
+ return get_pte_slow(pmd, offset);
+ pmd_val(pmd[0]) = _PAGE_TABLE + __pa(page);
+ pmd_val(pmd[1]) = _PAGE_TABLE + __pa(page+1024);
+ pmd_val(pmd[2]) = _PAGE_TABLE + __pa(page+2048);
+ pmd_val(pmd[3]) = _PAGE_TABLE + __pa(page+3072);
+ return (pte_t *) page + offset;
+ }
if (pmd_bad(*pmd))
- goto fix;
- return (pte_t *) pmd_page(*pmd) + address;
-getnew:
-{
- unsigned long page = (unsigned long) get_pte_fast();
-
- if (!page)
- return get_pte_slow(pmd, address);
- pmd_val(pmd[0]) = _PAGE_TABLE + __pa(page);
- pmd_val(pmd[1]) = _PAGE_TABLE + __pa(page+1024);
- pmd_val(pmd[2]) = _PAGE_TABLE + __pa(page+2048);
- pmd_val(pmd[3]) = _PAGE_TABLE + __pa(page+3072);
- return (pte_t *) page + address;
-}
-fix:
- __handle_bad_pmd(pmd);
- return NULL;
+ BUG();
+ return (pte_t *) pmd_page(*pmd) + offset;
}
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-extern inline void pmd_free(pmd_t * pmd)
-{
-}
-
-extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
-{
- return (pmd_t *) pgd;
-}
-
-#define pmd_free_kernel pmd_free
-#define pmd_alloc_kernel pmd_alloc
+#define pte_alloc_kernel(pmd, addr) pte_alloc(pmd, addr)
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(pte)
extern int do_check_pgt_cache(int, int);
+/*
+ * This establishes kernel virtual mappings (e.g., as a result of a
+ * vmalloc call). Since s390-esame uses a separate kernel page table,
+ * there is nothing to do here... :)
+ */
#define set_pgdir(addr,entry) do { } while(0)
/*
@@ -185,161 +182,122 @@
*/
/*
- * s390 has two ways of flushing TLBs
+ * S/390 has three ways of flushing TLBs
* 'ptlb' does a flush of the local processor
- * 'ipte' invalidates a pte in a page table and flushes that out of
- * the TLBs of all PUs of a SMP
+ * 'csp' flushes the TLBs on all PUs of a SMP
+ * 'ipte' invalidates a pte in a page table and flushes that out of
+ * the TLBs of all PUs of a SMP
*/
-#define __flush_tlb() \
+#define local_flush_tlb() \
do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
-static inline void __flush_global_tlb(void)
-{
- int cs1=0,dum=0;
- int *adr;
- long long dummy=0;
- adr = (int*) (((int)(((int*) &dummy)+1) & 0xfffffffc)|1);
- __asm__ __volatile__("lr 2,%0\n\t"
- "lr 3,%1\n\t"
- "lr 4,%2\n\t"
- ".long 0xb2500024" :
- : "d" (cs1), "d" (dum), "d" (adr)
- : "2", "3", "4");
-}
-
-#if 0
-#define flush_tlb_one(a,b) __flush_tlb()
-#define __flush_tlb_one(a,b) __flush_tlb()
-#else
-static inline void __flush_tlb_one(struct mm_struct *mm,
- unsigned long addr)
-{
- pgd_t * pgdir;
- pmd_t * pmd;
- pte_t * pte, *pto;
-
- pgdir = pgd_offset(mm, addr);
- if (pgd_none(*pgdir) || pgd_bad(*pgdir))
- return;
- pmd = pmd_offset(pgdir, addr);
- if (pmd_none(*pmd) || pmd_bad(*pmd))
- return;
- pte = pte_offset(pmd,addr);
-
- /*
- * S390 has 1mb segments, we are emulating 4MB segments
- */
-
- pto = (pte_t*) (((unsigned long) pte) & 0x7ffffc00);
-
- __asm__ __volatile(" ic 0,2(%0)\n"
- " ipte %1,%2\n"
- " stc 0,2(%0)"
- : : "a" (pte), "a" (pto), "a" (addr): "0");
-}
-#endif
-
-
#ifndef CONFIG_SMP
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb()
-#define local_flush_tlb() __flush_tlb()
-
/*
* We always need to flush, since s390 does not flush tlb
* on each context switch
*/
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- __flush_tlb();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- __flush_tlb_one(vma->vm_mm,addr);
-}
-
-static inline void flush_tlb_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- __flush_tlb();
-}
+#define flush_tlb() local_flush_tlb()
+#define flush_tlb_all() local_flush_tlb()
+#define flush_tlb_mm(mm) local_flush_tlb()
+#define flush_tlb_page(vma, va) local_flush_tlb()
+#define flush_tlb_range(mm, start, end) local_flush_tlb()
#else
-/*
- * We aren't very clever about this yet - SMP could certainly
- * avoid some global flushes..
- */
-
#include <asm/smp.h>
-#define local_flush_tlb() \
- __flush_tlb()
+extern void smp_ptlb_all(void);
+static inline void global_flush_tlb_csp(void)
+{
+ int cs1=0,dum=0;
+ int *adr;
+ long long dummy=0;
+ adr = (int*) (((int)(((int*) &dummy)+1) & 0xfffffffc)|1);
+ __asm__ __volatile__("lr 2,%0\n\t"
+ "lr 3,%1\n\t"
+ "lr 4,%2\n\t"
+ "csp 2,4" :
+ : "d" (cs1), "d" (dum), "d" (adr)
+ : "2", "3", "4");
+}
+static inline void global_flush_tlb(void)
+{
+ if (MACHINE_HAS_CSP)
+ global_flush_tlb_csp();
+ else
+ smp_ptlb_all();
+}
/*
- * We only have to do global flush of tlb if process run since last
- * flush on any other pu than current.
- * If we have threads (mm->count > 1) we always do a global flush,
- * since the process runs on more than one processor at the same time.
+ * We only have to do global flush of tlb if process run since last
+ * flush on any other pu than current.
+ * If we have threads (mm->count > 1) we always do a global flush,
+ * since the process runs on more than one processor at the same time.
*/
-static inline void flush_tlb_current_task(void)
+static inline void __flush_tlb_mm(struct mm_struct * mm)
{
- if ((atomic_read(¤t->mm->mm_count) != 1) ||
- (current->mm->cpu_vm_mask != (1UL << smp_processor_id()))) {
- current->mm->cpu_vm_mask = (1UL << smp_processor_id());
- __flush_global_tlb();
+ if ((smp_num_cpus > 1) &&
+ ((atomic_read(&mm->mm_count) != 1) ||
+ (mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
+ mm->cpu_vm_mask = (1UL << smp_processor_id());
+ global_flush_tlb();
} else {
local_flush_tlb();
}
}
-#define flush_tlb() flush_tlb_current_task()
+#define flush_tlb() __flush_tlb_mm(current->mm)
+#define flush_tlb_all() global_flush_tlb()
+#define flush_tlb_mm(mm) __flush_tlb_mm(mm)
+#define flush_tlb_page(vma, va) __flush_tlb_mm((vma)->vm_mm)
+#define flush_tlb_range(mm, start, end) __flush_tlb_mm(mm)
-#define flush_tlb_all() __flush_global_tlb()
+#endif
-static inline void flush_tlb_mm(struct mm_struct * mm)
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
- if ((atomic_read(&mm->mm_count) != 1) ||
- (mm->cpu_vm_mask != (1UL << smp_processor_id()))) {
- mm->cpu_vm_mask = (1UL << smp_processor_id());
- __flush_global_tlb();
- } else {
- local_flush_tlb();
- }
+ /* S/390 does not keep any page table caches in TLB */
}
-static inline void flush_tlb_page(struct vm_area_struct * vma,
- unsigned long va)
+
+static inline int ptep_test_and_clear_and_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
{
- __flush_tlb_one(vma->vm_mm,va);
+ /* No need to flush TLB; bits are in storage key */
+ return ptep_test_and_clear_young(ptep);
}
-static inline void flush_tlb_range(struct mm_struct * mm,
- unsigned long start, unsigned long end)
+static inline int ptep_test_and_clear_and_flush_dirty(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
{
- if ((atomic_read(&mm->mm_count) != 1) ||
- (mm->cpu_vm_mask != (1UL << smp_processor_id()))) {
- mm->cpu_vm_mask = (1UL << smp_processor_id());
- __flush_global_tlb();
- } else {
- local_flush_tlb();
- }
+ /* No need to flush TLB; bits are in storage key */
+ return ptep_test_and_clear_dirty(ptep);
}
-#endif
+static inline pte_t ptep_invalidate(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!(pte_val(pte) & _PAGE_INVALID)) {
+ /* S390 has 1mb segments, we are emulating 4MB segments */
+ pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
+ __asm__ __volatile__ ("ipte %0,%1" : : "a" (pto), "a" (address));
+ }
+ pte_clear(ptep);
+ return pte;
+}
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void ptep_establish(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, pte_t entry)
{
- /* S/390 does not keep any page table caches in TLB */
+ ptep_invalidate(vma, address, ptep);
+ set_pte(ptep, entry);
}
#endif /* _S390_PGALLOC_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)