patch-1.3.90 linux/ipc/shm.c

Next file: linux/kernel/sys.c
Previous file: linux/init/main.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v1.3.89/linux/ipc/shm.c linux/ipc/shm.c
@@ -421,6 +421,7 @@
 	pmd_t *page_middle;
 	pte_t *page_table;
 	unsigned long tmp, shm_sgn;
+	int error;
 
 	/* clear old mappings */
 	do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
@@ -431,6 +432,7 @@
 	merge_segments(current, shmd->vm_start, shmd->vm_end);
 
 	/* map page range */
+	error = 0;
 	shm_sgn = shmd->vm_pte +
 	  SWP_ENTRY(0, (shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
 	flush_cache_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
@@ -440,11 +442,15 @@
 	{
 		page_dir = pgd_offset(shmd->vm_mm,tmp);
 		page_middle = pmd_alloc(page_dir,tmp);
-		if (!page_middle)
-			return -ENOMEM;
+		if (!page_middle) {
+			error = -ENOMEM;
+			break;
+		}
 		page_table = pte_alloc(page_middle,tmp);
-		if (!page_table)
-			return -ENOMEM;
+		if (!page_table) {
+			error = -ENOMEM;
+			break;
+		}
 		set_pte(page_table, __pte(shm_sgn));
 	}
 	flush_tlb_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
@@ -712,7 +718,7 @@
 	pte_val(page) = shp->shm_pages[idx];
 	if (!pte_present(page))
 		goto check_table;
-	if (dma && !PageDMA(MAP_NR(pte_page(page)) + mem_map))
+	if (dma && !PageDMA(&mem_map[MAP_NR(pte_page(page))]))
 		goto check_table;
 	swap_attempts++;
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov with Sam's (original) version
of this