patch-2.2.0-final linux/mm/vmscan.c

Next file: linux/net/decnet/README
Previous file: linux/mm/page_alloc.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.2.0-pre9/linux/mm/vmscan.c linux/mm/vmscan.c
@@ -368,6 +368,54 @@
 }
 
 /*
+ * We need to make the locks finer granularity, but right
+ * now we need this so that we can do page allocations
+ * without holding the kernel lock etc.
+ *
+ * We want to try to free "count" pages, and we need to 
+ * cluster them so that we get good swap-out behaviour. See
+ * the "free_memory()" macro for details.
+ */
+static int do_try_to_free_pages(unsigned int gfp_mask)
+{
+	int priority;
+	int count = SWAP_CLUSTER_MAX;
+
+	lock_kernel();
+
+	/* Always trim SLAB caches when memory gets low. */
+	kmem_cache_reap(gfp_mask);
+
+	priority = 6;
+	do {
+		while (shrink_mmap(priority, gfp_mask)) {
+			if (!--count)
+				goto done;
+		}
+
+		/* Try to get rid of some shared memory pages.. */
+		if (gfp_mask & __GFP_IO) {
+			while (shm_swap(priority, gfp_mask)) {
+				if (!--count)
+					goto done;
+			}
+		}
+
+		/* Then, try to page stuff out.. */
+		while (swap_out(priority, gfp_mask)) {
+			if (!--count)
+				goto done;
+		}
+
+		shrink_dcache_memory(priority, gfp_mask);
+	} while (--priority >= 0);
+done:
+	unlock_kernel();
+
+	return priority >= 0;
+}
+
+/*
  * Before we start the kernel thread, print out the 
  * kswapd initialization message (otherwise the init message 
  * may be printed in the middle of another driver's init 
@@ -388,6 +436,8 @@
        printk ("Starting kswapd v%.*s\n", i, s);
 }
 
+static struct task_struct *kswapd_process;
+
 /*
  * The background pageout daemon, started as a kernel thread
  * from the init process. 
@@ -404,10 +454,13 @@
  */
 int kswapd(void *unused)
 {
-	current->session = 1;
-	current->pgrp = 1;
-	strcpy(current->comm, "kswapd");
-	sigfillset(&current->blocked);
+	struct task_struct *tsk = current;
+
+	kswapd_process = tsk;
+	tsk->session = 1;
+	tsk->pgrp = 1;
+	strcpy(tsk->comm, "kswapd");
+	sigfillset(&tsk->blocked);
 	
 	/*
 	 * Tell the memory management that we're a "memory allocator",
@@ -421,78 +474,52 @@
 	 * us from recursively trying to free more memory as we're
 	 * trying to free the first piece of memory in the first place).
 	 */
-	current->flags |= PF_MEMALLOC;
+	tsk->flags |= PF_MEMALLOC;
 
 	while (1) {
-		int tmo;
-
 		/*
 		 * Wake up once a second to see if we need to make
-		 * more memory available. When we get into a low
-		 * memory situation, we start waking up more often.
+		 * more memory available.
 		 *
-		 * We consider "freepages.low" to be low on memory,
-		 * but we also try to be aggressive if other processes
-		 * are low on memory and would otherwise block when
-		 * calling __get_free_page().
+		 * If we actually get into a low-memory situation,
+		 * the processes needing more memory will wake us
+		 * up on a more timely basis.
 		 */
-		tmo = HZ;
-		if (nr_free_pages < freepages.high) {
-			if (nr_free_pages < freepages.low || low_on_memory) {
-				if (try_to_free_pages(GFP_KSWAPD))
-					tmo = (HZ+9)/10;
-			}
-		}
+		do {
+			if (nr_free_pages >= freepages.high)
+				break;
+
+			if (!do_try_to_free_pages(GFP_KSWAPD))
+				break;
+		} while (!tsk->need_resched);
 		run_task_queue(&tq_disk);
-		current->state = TASK_INTERRUPTIBLE;
-		schedule_timeout(tmo);
+		tsk->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(HZ);
 	}
 }
 
 /*
- * We need to make the locks finer granularity, but right
- * now we need this so that we can do page allocations
- * without holding the kernel lock etc.
+ * Called by non-kswapd processes when they want more
+ * memory.
  *
- * We want to try to free "count" pages, and we need to 
- * cluster them so that we get good swap-out behaviour. See
- * the "free_memory()" macro for details.
+ * In a perfect world, this should just wake up kswapd
+ * and return. We don't actually want to swap stuff out
+ * from user processes, because the locking issues are
+ * nasty to the extreme (file write locks, and MM locking)
+ *
+ * One option might be to let kswapd do all the page-out
+ * and VM page table scanning that needs locking, and this
+ * process thread could do just the mmap shrink stage that
+ * can be done by just dropping cached pages without having
+ * any deadlock issues.
  */
 int try_to_free_pages(unsigned int gfp_mask)
 {
-	int priority;
-	int count = SWAP_CLUSTER_MAX;
-
-	lock_kernel();
-
-	/* Always trim SLAB caches when memory gets low. */
-	kmem_cache_reap(gfp_mask);
+	int retval = 1;
 
-	priority = 6;
-	do {
-		while (shrink_mmap(priority, gfp_mask)) {
-			if (!--count)
-				goto done;
-		}
-
-		/* Try to get rid of some shared memory pages.. */
-		if (gfp_mask & __GFP_IO) {
-			while (shm_swap(priority, gfp_mask)) {
-				if (!--count)
-					goto done;
-			}
-		}
-
-		/* Then, try to page stuff out.. */
-		while (swap_out(priority, gfp_mask)) {
-			if (!--count)
-				goto done;
-		}
-
-		shrink_dcache_memory(priority, gfp_mask);
-	} while (--priority >= 0);
-done:
-	unlock_kernel();
-
-	return priority >= 0;
+	wake_up_process(kswapd_process);
+	if (gfp_mask & __GFP_WAIT)
+		retval = do_try_to_free_pages(gfp_mask);
+	return retval;
 }
+	

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov