Seeing as we just churned all of the mm code, it's a good time for some
codingstyle cleanups.

Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/include/linux/mm.h    |   38 +++++++----
 25-akpm/include/linux/sched.h |   29 ++++----
 25-akpm/mm/memory.c           |  142 ++++++++++++++++++++++--------------------
 25-akpm/mm/msync.c            |   24 +++----
 4 files changed, 126 insertions(+), 107 deletions(-)

diff -puN include/linux/mm.h~4level-core-tweaks include/linux/mm.h
--- 25/include/linux/mm.h~4level-core-tweaks	2004-11-18 23:43:58.373282176 -0800
+++ 25-akpm/include/linux/mm.h	2004-11-18 23:43:58.382280808 -0800
@@ -566,7 +566,8 @@ int unmap_vmas(struct mmu_gather **tlbp,
 		struct vm_area_struct *start_vma, unsigned long start_addr,
 		unsigned long end_addr, unsigned long *nr_accounted,
 		struct zap_details *);
-void clear_page_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end);
+void clear_page_range(struct mmu_gather *tlb, unsigned long addr,
+		unsigned long end);
 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
 			struct vm_area_struct *vma);
 int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,
@@ -581,19 +582,28 @@ static inline void unmap_shared_mapping_
 }
 
 extern int vmtruncate(struct inode * inode, loff_t offset);
-extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
-extern pgd_t fastcall *__pgd_alloc(struct mm_struct *mm, pml4_t *pgd, unsigned long address);
-extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
-extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
-extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
-extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
-extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
+extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd,
+		unsigned long address));
+extern pgd_t fastcall *__pgd_alloc(struct mm_struct *mm, pml4_t *pgd,
+		unsigned long address);
+extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd,
+		unsigned long address));
+extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
+		unsigned long address));
+extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
+		unsigned long addr, struct page *page, pgprot_t prot);
+extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+		unsigned long addr, unsigned long pgoff, pgprot_t prot);
+extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
+		unsigned long address, int write_access);
 extern int make_pages_present(unsigned long addr, unsigned long end);
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
+		void *buf, int len, int write);
 void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
 
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
-		int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+		unsigned long start, int len, int write, int force,
+		struct page **pages, struct vm_area_struct **vmas);
 
 int __set_page_dirty_buffers(struct page *page);
 int __set_page_dirty_nobuffers(struct page *page);
@@ -638,14 +648,16 @@ static inline pmd_t *pmd_alloc(struct mm
 	return pmd_offset(pgd, address);
 }
 
-static inline pgd_t *pgd_alloc(struct mm_struct *mm, pml4_t *pml4, unsigned long address)
+static inline pgd_t *pgd_alloc(struct mm_struct *mm, pml4_t *pml4,
+				unsigned long address)
 {
 	if (pml4_none(*pml4))
 		return __pgd_alloc(mm, pml4, address);
 	return pml4_pgd_offset(pml4, address);
 }
 
-static inline pgd_t *pgd_alloc_k(struct mm_struct *mm, pml4_t *pml4, unsigned long address)
+static inline pgd_t *pgd_alloc_k(struct mm_struct *mm, pml4_t *pml4,
+				unsigned long address)
 {
 	if (pml4_none(*pml4))
 		return __pgd_alloc(mm, pml4, address);
diff -puN include/linux/sched.h~4level-core-tweaks include/linux/sched.h
--- 25/include/linux/sched.h~4level-core-tweaks	2004-11-18 23:43:58.375281872 -0800
+++ 25-akpm/include/linux/sched.h	2004-11-18 23:43:58.384280504 -0800
@@ -202,26 +202,27 @@ extern void arch_unmap_area_topdown(stru
 
 
 struct mm_struct {
-	struct vm_area_struct * mmap;		/* list of VMAs */
+	struct vm_area_struct *mmap;	/* list of VMAs */
 	struct rb_root mm_rb;
-	struct vm_area_struct * mmap_cache;	/* last find_vma result */
+	struct vm_area_struct *mmap_cache; /* last find_vma result */
 	unsigned long (*get_unmapped_area) (struct file *filp,
 				unsigned long addr, unsigned long len,
 				unsigned long pgoff, unsigned long flags);
-	void (*unmap_area) (struct vm_area_struct *area);
-	unsigned long mmap_base;		/* base of mmap area */
-	unsigned long free_area_cache;		/* first hole */
-	pml4_t * pml4;
-	atomic_t mm_users;			/* How many users with user space? */
-	atomic_t mm_count;			/* How many references to "struct mm_struct" (users count as 1) */
-	int map_count;				/* number of VMAs */
+	void (*unmap_area)(struct vm_area_struct *area);
+	unsigned long mmap_base;	/* base of mmap area */
+	unsigned long free_area_cache;	/* first hole */
+	pml4_t *pml4;
+	atomic_t mm_users;		/* How many users with user space? */
+	atomic_t mm_count;		/* How many references to "struct mm_struct" (users count as 1) */
+	int map_count;			/* number of VMAs */
 	struct rw_semaphore mmap_sem;
-	spinlock_t page_table_lock;		/* Protects page tables, mm->rss, mm->anon_rss */
+	spinlock_t page_table_lock;	/* page tables, mm->rss, mm->anon_rss */
 
-	struct list_head mmlist;		/* List of maybe swapped mm's.  These are globally strung
-						 * together off init_mm.mmlist, and are protected
-						 * by mmlist_lock
-						 */
+	struct list_head mmlist;	/* List of maybe swapped mm's.  These
+					 * are globally strung together off
+					 * init_mm.mmlist, and are protected
+					 * by mmlist_lock
+					 */
 
 	unsigned long start_code, end_code, start_data, end_data;
 	unsigned long start_brk, brk, start_stack;
diff -puN mm/memory.c~4level-core-tweaks mm/memory.c
--- 25/mm/memory.c~4level-core-tweaks	2004-11-18 23:43:58.377281568 -0800
+++ 25-akpm/mm/memory.c	2004-11-18 23:43:58.388279896 -0800
@@ -180,7 +180,8 @@ static inline void free_one_pml4(struct 
  * This function is not exact and may clear less than the range if
  * addr and end are not suitably aligned.
  */
-void clear_page_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end)
+void clear_page_range(struct mmu_gather *tlb, unsigned long addr,
+			unsigned long end)
 {
 	int i;
 	pml4_t *pml4 = tlb->mm->pml4;
@@ -196,7 +197,8 @@ void clear_page_range(struct mmu_gather 
 	}
 }
 
-pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
+				unsigned long address)
 {
 	if (!pmd_present(*pmd)) {
 		struct page *new;
@@ -223,7 +225,8 @@ out:
 	return pte_offset_map(pmd, address);
 }
 
-pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall *pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd,
+				unsigned long address)
 {
 	if (!pmd_present(*pmd)) {
 		pte_t *new;
@@ -324,9 +327,10 @@ copy_one_pte(struct mm_struct *dst_mm,  
 	page_dup_rmap(page);
 }
 
-static int copy_pte_range(struct mm_struct *dst_mm,  struct mm_struct *src_mm,
-			   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
-			   unsigned long addr, unsigned long end)
+static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+				pmd_t *dst_pmd, pmd_t *src_pmd,
+				struct vm_area_struct *vma,
+			   	unsigned long addr, unsigned long end)
 {
 	pte_t * src_pte, * dst_pte;
 	unsigned long vm_flags = vma->vm_flags;
@@ -351,9 +355,10 @@ static int copy_pte_range(struct mm_stru
 	return 0;
 }
 
-static int copy_pmd_range(struct mm_struct *dst_mm,  struct mm_struct *src_mm,
-			   pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
-			   unsigned long addr, unsigned long end)
+static int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+				pgd_t *dst_pgd, pgd_t *src_pgd,
+				struct vm_area_struct *vma,
+				unsigned long addr, unsigned long end)
 {
 	pmd_t *src_pmd, *dst_pmd;
 	int err = 0;
@@ -375,14 +380,15 @@ static int copy_pmd_range(struct mm_stru
 			pmd_clear(src_pmd);
 			continue;
 		}
-		err = copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, vma, addr, next);
+		err = copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, vma,
+					addr, next);
 		if (err)
 			break;
 	}
 	return err;
 }
 
-static int copy_pgd_range(struct mm_struct *dst_mm,  struct mm_struct *src_mm,
+static int copy_pgd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 			   pml4_t *dst, pml4_t *src, struct vm_area_struct *vma,
 			   unsigned long addr, unsigned long end)
 {
@@ -414,7 +420,8 @@ static int copy_pgd_range(struct mm_stru
 	return err;
 }
 
-int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma)
+int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+			struct vm_area_struct *vma)
 {
 	pml4_t *src_pml4, *dst_pml4;
 	unsigned long addr, start, end, next;
@@ -454,9 +461,9 @@ int copy_page_range(struct mm_struct *ds
 	return err;
 }
 
-static void zap_pte_range(struct mmu_gather *tlb,
-		pmd_t *pmd, unsigned long address,
-		unsigned long size, struct zap_details *details)
+static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+			unsigned long address, unsigned long size,
+			struct zap_details *details)
 {
 	unsigned long offset;
 	pte_t *ptep;
@@ -537,9 +544,9 @@ static void zap_pte_range(struct mmu_gat
 	pte_unmap(ptep-1);
 }
 
-static void zap_pmd_range(struct mmu_gather *tlb,
-		pgd_t * dir, unsigned long address,
-		unsigned long size, struct zap_details *details)
+static void zap_pmd_range(struct mmu_gather *tlb, pgd_t *dir,
+		unsigned long address, unsigned long size,
+		struct zap_details *details)
 {
 	pmd_t * pmd;
 	unsigned long end;
@@ -562,9 +569,9 @@ static void zap_pmd_range(struct mmu_gat
 	} while (address && (address < end));
 }
 
-static void zap_pgd_range(struct mmu_gather *tlb,
-		pml4_t *pml4, unsigned long address,
-		unsigned long end, struct zap_details *details)
+static void zap_pgd_range(struct mmu_gather *tlb, pml4_t *pml4,
+		unsigned long address, unsigned long end,
+		struct zap_details *details)
 {
 	pgd_t * pgd;
 
@@ -583,9 +590,9 @@ static void zap_pgd_range(struct mmu_gat
 	} while (address && (address < end));
 }
 
-static void unmap_page_range(struct mmu_gather *tlb,
-		struct vm_area_struct *vma, unsigned long address,
-		unsigned long end, struct zap_details *details)
+static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+		unsigned long address, unsigned long end,
+		struct zap_details *details)
 {
 	unsigned long next;
 	pml4_t *pml4;
@@ -810,7 +817,6 @@ static inline struct page *get_page_map(
 	return page;
 }
 
-
 static inline int
 untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
 			 unsigned long address)
@@ -975,7 +981,6 @@ int get_user_pages(struct task_struct *t
 out:
 	return i;
 }
-
 EXPORT_SYMBOL(get_user_pages);
 
 static void zeromap_pte_range(pte_t * pte, unsigned long address,
@@ -988,7 +993,9 @@ static void zeromap_pte_range(pte_t * pt
 	if (end > PMD_SIZE)
 		end = PMD_SIZE;
 	do {
-		pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
+		pte_t zero_pte;
+
+		zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
 		BUG_ON(!pte_none(*pte));
 		set_pte(pte, zero_pte);
 		address += PAGE_SIZE;
@@ -996,8 +1003,8 @@ static void zeromap_pte_range(pte_t * pt
 	} while (address && (address < end));
 }
 
-static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
-                                    unsigned long size, pgprot_t prot)
+static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t *pmd,
+		unsigned long address, unsigned long size, pgprot_t prot)
 {
 	unsigned long base, end;
 
@@ -1018,9 +1025,8 @@ static inline int zeromap_pmd_range(stru
 	return 0;
 }
 
-static inline int zeromap_pgd_range(struct mm_struct *mm, pgd_t * pgd,
-				    unsigned long address,
-                                    unsigned long size, pgprot_t prot)
+static inline int zeromap_pgd_range(struct mm_struct *mm, pgd_t *pgd,
+		unsigned long address, unsigned long size, pgprot_t prot)
 {
 	unsigned long base, end;
 
@@ -1038,7 +1044,8 @@ static inline int zeromap_pgd_range(stru
 	return 0;
 }
 
-int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, pgprot_t prot)
+int zeromap_page_range(struct vm_area_struct *vma, unsigned long address,
+			unsigned long size, pgprot_t prot)
 {
 	int i;
 	int error = 0;
@@ -1062,7 +1069,7 @@ int zeromap_page_range(struct vm_area_st
 		next = (address + PML4_SIZE) & PML4_MASK;
 		if (next <= beg || next > end)
 			next = end;
-		error = zeromap_pgd_range(mm, pgd, address, next - address, prot);
+		error = zeromap_pgd_range(mm, pgd, address, next-address, prot);
 		if (error)
 			break;
 		address = next;
@@ -1082,7 +1089,7 @@ int zeromap_page_range(struct vm_area_st
  * in null mappings (currently treated as "copy-on-access")
  */
 static inline void
-remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
+remap_pte_range(pte_t *pte, unsigned long address, unsigned long size,
 		unsigned long pfn, pgprot_t prot)
 {
 	unsigned long end;
@@ -1126,7 +1133,7 @@ remap_pmd_range(struct mm_struct *mm, pm
 	return 0;
 }
 
-static inline int remap_pgd_range(struct mm_struct *mm, pgd_t * pgd,
+static inline int remap_pgd_range(struct mm_struct *mm, pgd_t *pgd,
 				  unsigned long address, unsigned long size,
 				  unsigned long pfn, pgprot_t prot)
 {
@@ -1150,8 +1157,7 @@ static inline int remap_pgd_range(struct
 
 /*  Note: this is only safe if the mm semaphore is held when called. */
 int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-		    unsigned long pfn, unsigned long size,
-		    pgprot_t prot)
+		    unsigned long pfn, unsigned long size, pgprot_t prot)
 {
 	int error = 0;
 	pml4_t *pml4;
@@ -1201,7 +1207,6 @@ int remap_pfn_range(struct vm_area_struc
 
 	return error;
 }
-
 EXPORT_SYMBOL(remap_pfn_range);
 
 /*
@@ -1220,8 +1225,8 @@ static inline pte_t maybe_mkwrite(pte_t 
 /*
  * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
  */
-static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address, 
-		pte_t *page_table)
+static inline void break_cow(struct vm_area_struct *vma, struct page *new_page,
+				unsigned long address, pte_t *page_table)
 {
 	pte_t entry;
 
@@ -1237,9 +1242,6 @@ static inline void break_cow(struct vm_a
  * to a shared page. It is done by copying the page to a new address
  * and decrementing the shared-page counter for the old page.
  *
- * Goto-purists beware: the only reason for goto's here is that it results
- * in better assembly code.. The "default" path will see no jumps at all.
- *
  * Note that this routine assumes that the protection checks have been
  * done by the caller (the low-level page fault routine in most cases).
  * Thus we can safely just mark it writable once we've done any necessary
@@ -1252,7 +1254,7 @@ static inline void break_cow(struct vm_a
  * We hold the mm semaphore and the page_table_lock on entry and exit
  * with the page_table_lock released.
  */
-static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
+static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
 {
 	struct page *old_page, *new_page;
@@ -1279,8 +1281,9 @@ static int do_wp_page(struct mm_struct *
 		if (reuse) {
 			flush_cache_page(vma, address);
 			entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
-					      vma);
-			ptep_set_access_flags(vma, address, page_table, entry, 1);
+						vma);
+			ptep_set_access_flags(vma, address, page_table,
+						entry, 1);
 			update_mmu_cache(vma, address, entry);
 			pte_unmap(page_table);
 			spin_unlock(&mm->page_table_lock);
@@ -1433,7 +1436,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
  * between the file and the memory map for a potential last
  * incomplete page.  Ugly, but necessary.
  */
-int vmtruncate(struct inode * inode, loff_t offset)
+int vmtruncate(struct inode *inode, loff_t offset)
 {
 	struct address_space *mapping = inode->i_mapping;
 	unsigned long limit;
@@ -1470,7 +1473,6 @@ out_big:
 out_busy:
 	return -ETXTBSY;
 }
-
 EXPORT_SYMBOL(vmtruncate);
 
 /* 
@@ -1484,7 +1486,8 @@ EXPORT_SYMBOL(vmtruncate);
  *
  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
  */
-void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
+void swapin_readahead(swp_entry_t entry, unsigned long addr,
+			struct vm_area_struct *vma)
 {
 #ifdef CONFIG_NUMA
 	struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
@@ -1533,9 +1536,9 @@ void swapin_readahead(swp_entry_t entry,
  * We hold the mm semaphore and the page_table_lock on entry and
  * should release the pagetable lock on exit..
  */
-static int do_swap_page(struct mm_struct * mm,
-	struct vm_area_struct * vma, unsigned long address,
-	pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
+static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+		unsigned long address, pte_t *page_table, pmd_t *pmd,
+		pte_t orig_pte, int write_access)
 {
 	struct page *page;
 	swp_entry_t entry = pte_to_swp_entry(orig_pte);
@@ -1804,7 +1807,7 @@ oom:
  * from the encoded file_pte if possible. This enables swappable
  * nonlinear vmas.
  */
-static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
+static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
 {
 	unsigned long pgoff;
@@ -1826,7 +1829,8 @@ static int do_file_page(struct mm_struct
 	pte_unmap(pte);
 	spin_unlock(&mm->page_table_lock);
 
-	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, vma->vm_page_prot, pgoff, 0);
+	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
+					vma->vm_page_prot, pgoff, 0);
 	if (err == -ENOMEM)
 		return VM_FAULT_OOM;
 	if (err)
@@ -1856,7 +1860,7 @@ static int do_file_page(struct mm_struct
  * release it when done.
  */
 static inline int handle_pte_fault(struct mm_struct *mm,
-	struct vm_area_struct * vma, unsigned long address,
+	struct vm_area_struct *vma, unsigned long address,
 	int write_access, pte_t *pte, pmd_t *pmd)
 {
 	pte_t entry;
@@ -1869,10 +1873,13 @@ static inline int handle_pte_fault(struc
 		 * drop the lock.
 		 */
 		if (pte_none(entry))
-			return do_no_page(mm, vma, address, write_access, pte, pmd);
+			return do_no_page(mm, vma, address, write_access,
+					pte, pmd);
 		if (pte_file(entry))
-			return do_file_page(mm, vma, address, write_access, pte, pmd);
-		return do_swap_page(mm, vma, address, pte, pmd, entry, write_access);
+			return do_file_page(mm, vma, address, write_access,
+					pte, pmd);
+		return do_swap_page(mm, vma, address, pte, pmd, entry,
+					write_access);
 	}
 
 	if (write_access) {
@@ -1921,10 +1928,11 @@ int handle_mm_fault(struct mm_struct *mm
 	if (pmd) {
 		pte_t * pte = pte_alloc_map(mm, pmd, address);
 		if (pte)
-			return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
+			return handle_pte_fault(mm, vma, address, write_access,
+						pte, pmd);
 	}
 
- oom:
+oom:
 	spin_unlock(&mm->page_table_lock);
 	return VM_FAULT_OOM;
 }
@@ -1938,7 +1946,8 @@ int handle_mm_fault(struct mm_struct *mm
  * On a two-level page table, this ends up actually being entirely
  * optimized away.
  */
-pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd,
+			unsigned long address)
 {
 	pmd_t *new;
 
@@ -1962,7 +1971,8 @@ out:
 }
 
 #if PTRS_PER_PML4 > 1
-pgd_t fastcall *__pgd_alloc(struct mm_struct *mm, pml4_t *pml4, unsigned long address)
+pgd_t fastcall *__pgd_alloc(struct mm_struct *mm, pml4_t *pml4,
+				unsigned long address)
 {
 	pgd_t *new;
 
@@ -2008,7 +2018,7 @@ int make_pages_present(unsigned long add
 /* 
  * Map a vmalloc()-space virtual address to the physical page.
  */
-struct page * vmalloc_to_page(void * vmalloc_addr)
+struct page *vmalloc_to_page(void *vmalloc_addr)
 {
 	unsigned long addr = (unsigned long) vmalloc_addr;
 	struct page *page = NULL;
@@ -2033,17 +2043,15 @@ struct page * vmalloc_to_page(void * vma
 	}
 	return page;
 }
-
 EXPORT_SYMBOL(vmalloc_to_page);
 
 /*
  * Map a vmalloc()-space virtual address to the physical page frame number.
  */
-unsigned long vmalloc_to_pfn(void * vmalloc_addr)
+unsigned long vmalloc_to_pfn(void *vmalloc_addr)
 {
 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 }
-
 EXPORT_SYMBOL(vmalloc_to_pfn);
 
 #if !defined(CONFIG_ARCH_GATE_AREA)
diff -puN mm/msync.c~4level-core-tweaks mm/msync.c
--- 25/mm/msync.c~4level-core-tweaks	2004-11-18 23:43:58.378281416 -0800
+++ 25-akpm/mm/msync.c	2004-11-18 23:43:58.389279744 -0800
@@ -38,9 +38,9 @@ static int filemap_sync_pte(pte_t *ptep,
 	return 0;
 }
 
-static int filemap_sync_pte_range(pmd_t * pmd,
-	unsigned long address, unsigned long end, 
-	struct vm_area_struct *vma, unsigned int flags)
+static int filemap_sync_pte_range(pmd_t *pmd, unsigned long address,
+		unsigned long end, struct vm_area_struct *vma,
+		unsigned int flags)
 {
 	pte_t *pte;
 	int error;
@@ -67,9 +67,8 @@ static int filemap_sync_pte_range(pmd_t 
 	return error;
 }
 
-static inline int filemap_sync_pmd_range(pgd_t * pgd,
-	unsigned long address, unsigned long end, 
-	struct vm_area_struct *vma, unsigned int flags)
+static inline int filemap_sync_pmd_range(pgd_t *pgd, unsigned long address,
+	unsigned long end, struct vm_area_struct *vma, unsigned int flags)
 {
 	pmd_t * pmd;
 	int error;
@@ -93,9 +92,8 @@ static inline int filemap_sync_pmd_range
 	return error;
 }
 
-static inline int filemap_sync_pgd_range(pml4_t *pml4,
-	unsigned long address, unsigned long end,
-	struct vm_area_struct *vma, unsigned int flags)
+static inline int filemap_sync_pgd_range(pml4_t *pml4, unsigned long address,
+	unsigned long end, struct vm_area_struct *vma, unsigned int flags)
 {
 	pgd_t *pgd;
 	int error;
@@ -117,8 +115,8 @@ static inline int filemap_sync_pgd_range
 	return error;
 }
 
-static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
-	size_t size, unsigned int flags)
+static int filemap_sync(struct vm_area_struct *vma, unsigned long address,
+			size_t size, unsigned int flags)
 {
 	pml4_t *pml4;
 	unsigned long end = address + size;
@@ -172,8 +170,8 @@ static int filemap_sync(struct vm_area_s
  * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
  * applications.
  */
-static int msync_interval(struct vm_area_struct * vma,
-	unsigned long start, unsigned long end, int flags)
+static int msync_interval(struct vm_area_struct *vma, unsigned long start,
+			unsigned long end, int flags)
 {
 	int ret = 0;
 	struct file * file = vma->vm_file;
_