patch-2.1.37 linux/mm/page_alloc.c

Next file: linux/mm/page_io.c
Previous file: linux/mm/mmap.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.36/linux/mm/page_alloc.c linux/mm/page_alloc.c
@@ -18,12 +18,14 @@
 #include <linux/fs.h>
 #include <linux/swapctl.h>
 #include <linux/interrupt.h>
+#include <linux/init.h>
 
 #include <asm/dma.h>
 #include <asm/system.h> /* for cli()/sti() */
 #include <asm/uaccess.h> /* for copy_to/from_user */
 #include <asm/bitops.h>
 #include <asm/pgtable.h>
+#include <asm/spinlock.h>
 
 int nr_swap_pages = 0;
 int nr_free_pages = 0;
@@ -88,10 +90,6 @@
  *
  * With the above two rules, you get a straight-line execution path
  * for the normal case, giving better asm-code.
- *
- * free_page() may sleep since the page being freed may be a buffer
- * page or present in the swap cache. It will not sleep, however,
- * for a freshly allocated page (get_free_page()).
  */
 
 /*
@@ -99,6 +97,8 @@
  *
  * Hint: -mask = 1+~mask
  */
+static spinlock_t page_alloc_lock;
+
 static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
 {
 	struct free_area_struct *area = free_area + order;
@@ -106,15 +106,14 @@
 	unsigned long mask = (~0UL) << order;
 	unsigned long flags;
 
-	save_flags(flags);
-	cli();
+	spin_lock_irqsave(&page_alloc_lock, flags);
 
 #define list(x) (mem_map+(x))
 
 	map_nr &= mask;
 	nr_free_pages -= mask;
 	while (mask + (1 << (NR_MEM_LISTS-1))) {
-		if (!change_bit(index, area->map))
+		if (!test_and_change_bit(index, area->map))
 			break;
 		remove_mem_queue(list(map_nr ^ -mask));
 		mask <<= 1;
@@ -126,7 +125,7 @@
 
 #undef list
 
-	restore_flags(flags);
+	spin_unlock_irqrestore(&page_alloc_lock, flags);
 }
 
 void __free_page(struct page *page)
@@ -172,7 +171,7 @@
 				MARK_USED(map_nr, new_order, area); \
 				nr_free_pages -= 1 << order; \
 				EXPAND(ret, map_nr, order, new_order, area); \
-				restore_flags(flags); \
+				spin_unlock_irqrestore(&page_alloc_lock, flags); \
 				return ADDRESS(map_nr); \
 			} \
 			prev = ret; \
@@ -214,15 +213,14 @@
 	reserved_pages = 5;
 	if (priority != GFP_NFS)
 		reserved_pages = min_free_pages;
-	save_flags(flags);
 repeat:
-	cli();
+	spin_lock_irqsave(&page_alloc_lock, flags);
 	if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
 		RMQUEUE(order, dma);
-		restore_flags(flags);
+		spin_unlock_irqrestore(&page_alloc_lock, flags);
 		return 0;
 	}
-	restore_flags(flags);
+	spin_unlock_irqrestore(&page_alloc_lock, flags);
 	if (priority != GFP_BUFFER && try_to_free_page(priority, dma, 1))
 		goto repeat;
 	return 0;
@@ -239,8 +237,7 @@
  	unsigned long total = 0;
 
 	printk("Free pages:      %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
-	save_flags(flags);
-	cli();
+	spin_lock_irqsave(&page_alloc_lock, flags);
  	for (order=0 ; order < NR_MEM_LISTS; order++) {
 		struct page * tmp;
 		unsigned long nr = 0;
@@ -250,7 +247,7 @@
 		total += nr * ((PAGE_SIZE>>10) << order);
 		printk("%lu*%lukB ", nr, (unsigned long)((PAGE_SIZE>>10) << order));
 	}
-	restore_flags(flags);
+	spin_unlock_irqrestore(&page_alloc_lock, flags);
 	printk("= %lukB)\n", total);
 #ifdef SWAP_CACHE_INFO
 	show_swap_cache_info();
@@ -265,7 +262,7 @@
  *   - mark all memory queues empty
  *   - clear the memory bitmaps
  */
-unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem)
+__initfunc(unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem))
 {
 	mem_map_t * p;
 	unsigned long mask = PAGE_MASK;
@@ -273,7 +270,7 @@
 
 	/*
 	 * select nr of pages we try to keep free for important stuff
-	 * with a minimum of 16 pages. This is totally arbitrary
+	 * with a minimum of 48 pages. This is totally arbitrary
 	 */
 	i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
 	if (i < 48)

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov