We get

kernel/built-in.o: In function `load_balance':
/tmp/distcc_1108/kernel/sched.c:1188: undefined reference to `__udivdi3'

Because CAN_MIGRATE_TASK's use of NS_TO_JIFFIES requires a 64-bit divide.

Fix that by using JIFFIES_TO_NS on the rhs of the expression instead, and
rework it to make the code more understandable.



 kernel/sched.c |   32 ++++++++++++++++++++++++--------
 1 files changed, 24 insertions(+), 8 deletions(-)

diff -puN kernel/sched.c~o14int-div-fix kernel/sched.c
--- 25/kernel/sched.c~o14int-div-fix	2003-08-30 15:42:03.000000000 -0700
+++ 25-akpm/kernel/sched.c	2003-08-30 15:42:03.000000000 -0700
@@ -1129,6 +1129,29 @@ static inline void pull_task(runqueue_t 
 }
 
 /*
+ * Previously:
+ *
+ * #define CAN_MIGRATE_TASK(p,rq,this_cpu)	\
+ *	((!idle || (NS_TO_JIFFIES(now - (p)->timestamp) > \
+ *		cache_decay_ticks)) && !task_running(rq, p) && \
+ *			cpu_isset(this_cpu, (p)->cpus_allowed))
+ */
+
+static inline int
+can_migrate_task(task_t *tsk, runqueue_t *rq, int this_cpu, int idle)
+{
+	unsigned long delta = sched_clock() - tsk->timestamp;
+
+	if (!idle && (delta <= JIFFIES_TO_NS(cache_decay_ticks)))
+		return 0;
+	if (task_running(rq, tsk))
+		return 0;
+	if (!cpu_isset(this_cpu, tsk->cpus_allowed))
+		return 0;
+	return 1;
+}
+
+/*
  * Current runqueue is empty, or rebalance tick: if there is an
  * inbalance (current runqueue is too short) then pull from
  * busiest runqueue(s).
@@ -1142,14 +1165,12 @@ static void load_balance(runqueue_t *thi
 	runqueue_t *busiest;
 	prio_array_t *array;
 	struct list_head *head, *curr;
-	unsigned long long now;
 	task_t *tmp;
 
 	busiest = find_busiest_queue(this_rq, this_cpu, idle, &imbalance, cpumask);
 	if (!busiest)
 		goto out;
 
-	now = sched_clock();
 	/*
 	 * We only want to steal a number of tasks equal to 1/2 the imbalance,
 	 * otherwise we'll just shift the imbalance to the new queue:
@@ -1195,14 +1216,9 @@ skip_queue:
 	 * 3) are cache-hot on their current CPU.
 	 */
 
-#define CAN_MIGRATE_TASK(p,rq,this_cpu)	\
-	((idle || (NS_TO_JIFFIES(now - (p)->timestamp) > \
-		cache_decay_ticks)) && !task_running(rq, p) && \
-			cpu_isset(this_cpu, (p)->cpus_allowed))
-
 	curr = curr->prev;
 
-	if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) {
+	if (!can_migrate_task(tmp, busiest, this_cpu, idle)) {
 		if (curr != head)
 			goto skip_queue;
 		idx++;

_