diff -purN -X /home/mbligh/.diff.exclude 770-mmu_context_to_struct/arch/ppc64/kernel/setup.c 780-hugetlb_dyn_as/arch/ppc64/kernel/setup.c
--- 770-mmu_context_to_struct/arch/ppc64/kernel/setup.c	2004-02-18 14:56:52.000000000 -0800
+++ 780-hugetlb_dyn_as/arch/ppc64/kernel/setup.c	2004-02-20 15:59:49.000000000 -0800
@@ -618,6 +618,10 @@ void __init setup_arch(char **cmdline_p)
 	init_mm.end_code = (unsigned long) _etext;
 	init_mm.end_data = (unsigned long) _edata;
 	init_mm.brk = klimit;
+#ifdef CONFIG_HUGETLBFS
+	init_mm.context.hugetlb_end = TASK_HPAGE_END_32;
+	init_mm.context.hugetlb_base = TASK_HPAGE_END_32;
+#endif
 	
 	/* Save unparsed command line copy for /proc/cmdline */
 	strcpy(saved_command_line, cmd_line);
diff -purN -X /home/mbligh/.diff.exclude 770-mmu_context_to_struct/arch/ppc64/mm/hugetlbpage.c 780-hugetlb_dyn_as/arch/ppc64/mm/hugetlbpage.c
--- 770-mmu_context_to_struct/arch/ppc64/mm/hugetlbpage.c	2004-02-20 15:59:46.000000000 -0800
+++ 780-hugetlb_dyn_as/arch/ppc64/mm/hugetlbpage.c	2004-02-20 15:59:49.000000000 -0800
@@ -22,6 +22,7 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
+#include <asm/mmu.h>
 #include <asm/machdep.h>
 #include <asm/cputable.h>
 #include <asm/tlb.h>
@@ -237,6 +238,23 @@ static void do_slbia(void *unused)
 	asm volatile ("isync; slbia; isync":::"memory");
 }
 
+/* Returns the correct ending address for a process' hugetlb region */
+static inline unsigned long 
+get_hugetlb_area_end(struct task_struct *task)
+{
+	u32 stack_end, default_end = TASK_HPAGE_END_32;
+
+	/*
+	 * We use rlim_cur so that unprivileged applications can signal
+	 * our code using ulimit.
+	 */
+	stack_end = 0xffffffff - task->rlim[RLIMIT_STACK].rlim_cur;
+	stack_end = min(default_end, stack_end);
+
+	/* Boundary must be segment aligned */
+	return _ALIGN_DOWN(stack_end, MM_SEGMENT_SIZE);
+}
+
 /* Activate the low hpage region for 32bit processes.  mmap_sem must
  * be held*/
 static int open_32bit_htlbpage_range(struct mm_struct *mm)
@@ -247,15 +265,19 @@ static int open_32bit_htlbpage_range(str
 	if (mm->context.cid & CONTEXT_LOW_HPAGES)
 		return 0; /* The window is already open */
 	
+	/* Set up the area boundaries */
+	mm->context.hugetlb_end = get_hugetlb_area_end(current);
+	mm->context.hugetlb_base = mm->context.hugetlb_end;
+	
 	/* Check no VMAs are in the region */
-	vma = find_vma(mm, TASK_HPAGE_BASE_32);
+	vma = find_vma(mm, mm->context.hugetlb_base);
 
-	if (vma && (vma->vm_start < TASK_HPAGE_END_32))
+	if (vma && (vma->vm_start < mm->context.hugetlb_end))
 		return -EBUSY;
 
 	/* Clean up any leftover PTE pages in the region */
 	spin_lock(&mm->page_table_lock);
-	for (addr = TASK_HPAGE_BASE_32; addr < TASK_HPAGE_END_32;
+	for (addr = mm->context.hugetlb_base; addr < mm->context.hugetlb_end;
 	     addr += PMD_SIZE) {
 		pgd_t *pgd = pgd_offset(mm, addr);
 		pmd_t *pmd = pmd_offset(pgd, addr);
@@ -572,8 +594,8 @@ full_search:
 		}
 		if (!vma || addr + len <= vma->vm_start) {
 			if (is_hugepage_only_range(addr, len)) {
-				if (addr < TASK_HPAGE_END_32)
-					addr = TASK_HPAGE_END_32;
+				if (addr < mm->context.hugetlb_end)
+					addr = mm->context.hugetlb_end;
 				else
 					addr = TASK_HPAGE_END;
 
@@ -590,6 +612,32 @@ full_search:
 }
 
 
+unsigned long grow_hugetlb_region(unsigned long hpage_base, unsigned long len)
+{
+	struct vm_area_struct *vma = NULL;
+	unsigned long new_base, vma_start = hpage_base;
+
+	vma = find_vma(current->mm, vma_start);
+	vma_start = (vma && vma->vm_start < current->mm->context.hugetlb_end) ? 
+		vma->vm_start : current->mm->context.hugetlb_end;
+	printk("First vma in hugetlb region starts at: %lx\n", vma_start);
+
+	new_base = _ALIGN_DOWN(vma_start - len, MM_SEGMENT_SIZE);
+	if (new_base < TASK_HPAGE_BASE_32)
+		return -ENOMEM;
+
+	printk("Try to move hugetlb_base down to: %lx\n", new_base);
+	vma = find_vma(current->mm, new_base);
+	if (vma && vma->vm_start < hpage_base) {
+		printk("Found vma at %lx aborting\n", vma->vm_start);
+		return -ENOMEM;
+	}
+
+	current->mm->context.hugetlb_base = new_base;
+	printk("Area clean returning an area at: %lx\n", vma_start-len); 
+	return vma_start - len;
+}
+
 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 					unsigned long len, unsigned long pgoff,
 					unsigned long flags)
@@ -610,8 +658,8 @@ unsigned long hugetlb_get_unmapped_area(
 		if (err)
 			return err; /* Should this just be EINVAL? */
 
-		base = TASK_HPAGE_BASE_32;
-		end = TASK_HPAGE_END_32;
+		base = current->mm->context.hugetlb_base;
+		end = current->mm->context.hugetlb_end;
 	} else {
 		base = TASK_HPAGE_BASE;
 		end = TASK_HPAGE_END;
@@ -624,7 +672,7 @@ unsigned long hugetlb_get_unmapped_area(
 	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
 		/* At this point:  (!vma || addr < vma->vm_end). */
 		if (addr + len > end)
-			return -ENOMEM;
+			break; /* We couldn't find an area */
 		if (!vma || (addr + len) <= vma->vm_start)
 			return addr;
 		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
@@ -633,6 +681,8 @@ unsigned long hugetlb_get_unmapped_area(
 		 * this alignment shouldn't have skipped over any
 		 * other vmas */
 	}
+	/* Get the space by expanding the hugetlb region */
+	return grow_hugetlb_region(base, len);
 }
 
 static inline unsigned long computeHugeHptePP(unsigned int hugepte)
diff -purN -X /home/mbligh/.diff.exclude 770-mmu_context_to_struct/include/asm-ppc64/mmu.h 780-hugetlb_dyn_as/include/asm-ppc64/mmu.h
--- 770-mmu_context_to_struct/include/asm-ppc64/mmu.h	2004-02-20 15:59:46.000000000 -0800
+++ 780-hugetlb_dyn_as/include/asm-ppc64/mmu.h	2004-02-20 15:59:49.000000000 -0800
@@ -21,6 +21,10 @@
 /* Time to allow for more things here */
 typedef struct {
 	unsigned long cid;
+#ifdef CONFIG_HUGETLB_PAGE
+	unsigned long hugetlb_base;
+	unsigned long hugetlb_end;
+#endif
 } mm_context_t;
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -183,6 +187,8 @@ void create_valid_hpte( unsigned long sl
 
 #define LARGE_PAGE_SHIFT 24
 
+#define MM_SEGMENT_SIZE (1UL << 28)
+
 static inline unsigned long hpt_hash(unsigned long vpn, int large)
 {
 	unsigned long vsid;
diff -purN -X /home/mbligh/.diff.exclude 770-mmu_context_to_struct/include/asm-ppc64/page.h 780-hugetlb_dyn_as/include/asm-ppc64/page.h
--- 770-mmu_context_to_struct/include/asm-ppc64/page.h	2004-02-20 15:59:46.000000000 -0800
+++ 780-hugetlb_dyn_as/include/asm-ppc64/page.h	2004-02-20 15:59:49.000000000 -0800
@@ -32,15 +32,22 @@
 /* For 64-bit processes the hugepage range is 1T-1.5T */
 #define TASK_HPAGE_BASE 	(0x0000010000000000UL)
 #define TASK_HPAGE_END 	(0x0000018000000000UL)
-/* For 32-bit processes the hugepage range is 2-3G */
-#define TASK_HPAGE_BASE_32	(0x80000000UL)
-#define TASK_HPAGE_END_32	(0xc0000000UL)
 
+/*
+ * We have much greater contention for segments in a
+ * 32-bit address space.  Therefore, the region reserved
+ * for huge pages is dynamically resized.  These values
+ * define the maximum range allowed for huge pages.
+ */
+#define TASK_HPAGE_BASE_32	(0x40000000UL)
+#define TASK_HPAGE_END_32	(0xf0000000UL)
+ 
 #define ARCH_HAS_HUGEPAGE_ONLY_RANGE
 #define is_hugepage_only_range(addr, len) \
 	( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \
 	  ((current->mm->context.cid & CONTEXT_LOW_HPAGES) && \
-	   (addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) )
+	   (addr > (current->mm->context.hugetlb_base-len)) && \
+	   (addr < current->mm->context.hugetlb_end)) )
 #define hugetlb_free_pgtables free_pgtables
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 
@@ -48,7 +55,7 @@
 	((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) && \
 	 ((((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \
 	  (((context.cid) & CONTEXT_LOW_HPAGES) && \
-	   (((addr) >= TASK_HPAGE_BASE_32) && ((addr) < TASK_HPAGE_END_32)))))
+	   (((addr) >= context.hugetlb_base) && ((addr) < context.hugetlb_end)))))
 
 #else /* !CONFIG_HUGETLB_PAGE */
 
diff -purN -X /home/mbligh/.diff.exclude 770-mmu_context_to_struct/include/linux/sched.h 780-hugetlb_dyn_as/include/linux/sched.h
--- 770-mmu_context_to_struct/include/linux/sched.h	2004-02-20 15:58:32.000000000 -0800
+++ 780-hugetlb_dyn_as/include/linux/sched.h	2004-02-20 16:00:29.000000000 -0800
@@ -509,6 +509,7 @@ struct task_struct {
  * to a stack based synchronous wait) if its doing sync IO.
  */
 	wait_queue_t *io_wait;
+	unsigned long fault_count;
 };
 
 static inline pid_t process_group(struct task_struct *tsk)