From: Con Kolivas <kernel@kolivas.org>

We can requeue tasks for cheaper then doing a complete dequeue followed by
an enqueue.  Add the requeue_task function and perform it where possible.

This will be hit frequently by upcoming changes to the requeueing in
timeslice granularity.

Signed-off-by: Con Kolivas <kernel@kolivas.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/kernel/sched.c |   22 ++++++++++++++++++----
 1 files changed, 18 insertions(+), 4 deletions(-)

diff -puN kernel/sched.c~sched-add_requeue_task kernel/sched.c
--- 25/kernel/sched.c~sched-add_requeue_task	2004-12-03 20:56:31.447979368 -0800
+++ 25-akpm/kernel/sched.c	2004-12-03 20:56:31.454978304 -0800
@@ -601,6 +601,15 @@ static void enqueue_task(struct task_str
 }
 
 /*
+ * Put task to the end of the run list without the overhead of dequeue
+ * followed by enqueue.
+ */
+static void requeue_task(struct task_struct *p, prio_array_t *array)
+{
+	list_move_tail(&p->run_list, array->queue + p->prio);
+}
+
+/*
  * Used by the migration code - we pull tasks from the head of the
  * remote queue so we want these tasks to show up at the head of the
  * local queue:
@@ -2349,8 +2358,7 @@ void scheduler_tick(int user_ticks, int 
 			set_tsk_need_resched(p);
 
 			/* put it at the end of the queue: */
-			dequeue_task(p, rq->active);
-			enqueue_task(p, rq->active);
+			requeue_task(p, rq->active);
 		}
 		goto out_unlock;
 	}
@@ -3449,8 +3457,14 @@ asmlinkage long sys_sched_yield(void)
 	} else if (!rq->expired->nr_active)
 		schedstat_inc(rq, yld_exp_empty);
 
-	dequeue_task(current, array);
-	enqueue_task(current, target);
+	if (array != target) {
+		dequeue_task(current, array);
+		enqueue_task(current, target);
+	} else
+		/*
+		 * requeue_task is cheaper so perform that if possible.
+		 */
+		requeue_task(current, array);
 
 	/*
 	 * Since we are going to call schedule() anyway, there's
_