From: Nick Piggin <piggin@cyberone.com.au>

In shrink_slab(), do the multiply before the divide to avoid losing
precision.



---

 mm/vmscan.c |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff -puN mm/vmscan.c~shrink_slab-precision-fix mm/vmscan.c
--- 25/mm/vmscan.c~shrink_slab-precision-fix	2004-02-27 01:57:38.000000000 -0800
+++ 25-akpm/mm/vmscan.c	2004-02-27 01:57:38.000000000 -0800
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(remove_shrinker);
  *
  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  */
-static int shrink_slab(long scanned, unsigned int gfp_mask)
+static int shrink_slab(unsigned long scanned, unsigned int gfp_mask)
 {
 	struct shrinker *shrinker;
 	long pages;
@@ -149,7 +149,7 @@ static int shrink_slab(long scanned, uns
 	list_for_each_entry(shrinker, &shrinker_list, list) {
 		unsigned long long delta;
 
-		delta = 4 * (scanned / shrinker->seeks);
+		delta = 4 * scanned / shrinker->seeks;
 		delta *= (*shrinker->shrinker)(0, gfp_mask);
 		do_div(delta, pages + 1);
 		shrinker->nr += delta;

_