The pgalloc.h changes broke ppc64:

In file included from include/asm-generic/tlb.h:18,
                 from include/asm/tlb.h:24,
                 from arch/ppc64/mm/hash_utils.c:48:
include/asm/pgalloc.h: In function `__pte_free_tlb':
include/asm/pgalloc.h:110: dereferencing pointer to incomplete type
include/asm/pgalloc.h:111: dereferencing pointer to incomplete type

Uninlining __pte_free_tlb() fixes that.


---

 25-akpm/arch/ppc64/mm/tlb.c         |   27 +++++++++++++++++++++++++++
 25-akpm/include/asm-ppc64/pgalloc.h |   28 +---------------------------
 2 files changed, 28 insertions(+), 27 deletions(-)

diff -puN arch/ppc64/mm/tlb.c~ppc64-uninline-__pte_free_tlb arch/ppc64/mm/tlb.c
--- 25/arch/ppc64/mm/tlb.c~ppc64-uninline-__pte_free_tlb	2004-05-14 22:23:49.056917432 -0700
+++ 25-akpm/arch/ppc64/mm/tlb.c	2004-05-14 22:23:49.071915152 -0700
@@ -41,6 +41,33 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_ga
 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
 unsigned long pte_freelist_forced_free;
 
+void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
+{
+	/* This is safe as we are holding page_table_lock */
+        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
+	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+
+	if (atomic_read(&tlb->mm->mm_users) < 2 ||
+	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
+		pte_free(ptepage);
+		return;
+	}
+
+	if (*batchp == NULL) {
+		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
+		if (*batchp == NULL) {
+			pte_free_now(ptepage);
+			return;
+		}
+		(*batchp)->index = 0;
+	}
+	(*batchp)->pages[(*batchp)->index++] = ptepage;
+	if ((*batchp)->index == PTE_FREELIST_SIZE) {
+		pte_free_submit(*batchp);
+		*batchp = NULL;
+	}
+}
+
 /*
  * Update the MMU hash table to correspond with a change to
  * a Linux PTE.  If wrprot is true, it is permissible to
diff -puN include/asm-ppc64/pgalloc.h~ppc64-uninline-__pte_free_tlb include/asm-ppc64/pgalloc.h
--- 25/include/asm-ppc64/pgalloc.h~ppc64-uninline-__pte_free_tlb	2004-05-14 22:23:49.062916520 -0700
+++ 25-akpm/include/asm-ppc64/pgalloc.h	2004-05-14 22:23:49.072915000 -0700
@@ -101,33 +101,7 @@ extern void pte_free_submit(struct pte_f
 
 DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
 
-static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
-{
-	/* This is safe as we are holding page_table_lock */
-        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
-	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
-
-	if (atomic_read(&tlb->mm->mm_users) < 2 ||
-	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
-		pte_free(ptepage);
-		return;
-	}
-
-	if (*batchp == NULL) {
-		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
-		if (*batchp == NULL) {
-			pte_free_now(ptepage);
-			return;
-		}
-		(*batchp)->index = 0;
-	}
-	(*batchp)->pages[(*batchp)->index++] = ptepage;
-	if ((*batchp)->index == PTE_FREELIST_SIZE) {
-		pte_free_submit(*batchp);
-		*batchp = NULL;
-	}
-}
-
+void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage);
 #define __pmd_free_tlb(tlb, pmd)	__pte_free_tlb(tlb, virt_to_page(pmd))
 
 #define check_pgt_cache()	do { } while (0)

_