From: Rusty Russell <rusty@rustcorp.com.au>

This simply changes over the migration threads, the workqueue threads
and the ksoftirqd threads to use kthread.



---

 kernel/sched.c     |   60 +++++++++++++++--------------------------------------
 kernel/softirq.c   |   22 ++++++++-----------
 kernel/workqueue.c |   60 ++++++++++++++++++-----------------------------------
 3 files changed, 49 insertions(+), 93 deletions(-)

diff -puN kernel/sched.c~use-kthread-primitives kernel/sched.c
--- 25/kernel/sched.c~use-kthread-primitives	2004-01-25 23:40:52.000000000 -0800
+++ 25-akpm/kernel/sched.c	2004-01-25 23:40:52.000000000 -0800
@@ -37,6 +37,7 @@
 #include <linux/rcupdate.h>
 #include <linux/cpu.h>
 #include <linux/percpu.h>
+#include <linux/kthread.h>
 
 #ifdef CONFIG_NUMA
 #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
@@ -3075,12 +3076,6 @@ out:
 	local_irq_restore(flags);
 }
 
-typedef struct {
-	int cpu;
-	struct completion startup_done;
-	task_t *task;
-} migration_startup_t;
-
 /*
  * migration_thread - this is a highprio system thread that performs
  * thread migration by bumping thread off CPU then 'pushing' onto
@@ -3090,27 +3085,17 @@ static int migration_thread(void * data)
 {
 	/* Marking "param" __user is ok, since we do a set_fs(KERNEL_DS); */
 	struct sched_param __user param = { .sched_priority = MAX_RT_PRIO-1 };
-	migration_startup_t *startup = data;
-	int cpu = startup->cpu;
 	runqueue_t *rq;
+	int cpu = (long)data;
 	int ret;
 
-	startup->task = current;
-	complete(&startup->startup_done);
-	set_current_state(TASK_UNINTERRUPTIBLE);
-	schedule();
-
 	BUG_ON(smp_processor_id() != cpu);
-
-	daemonize("migration/%d", cpu);
-	set_fs(KERNEL_DS);
-
 	ret = setscheduler(0, SCHED_FIFO, &param);
 
 	rq = this_rq();
-	rq->migration_thread = current;
+	BUG_ON(rq->migration_thread != current);
 
-	for (;;) {
+	while (!signal_pending(current)) {
 		struct list_head *head;
 		migration_req_t *req;
 
@@ -3139,6 +3124,7 @@ static int migration_thread(void * data)
 			       any_online_cpu(req->task->cpus_allowed));
 		complete(&req->done);
 	}
+	return 0;
 }
 
 /*
@@ -3148,37 +3134,27 @@ static int migration_thread(void * data)
 static int migration_call(struct notifier_block *nfb, unsigned long action,
 			  void *hcpu)
 {
-	long cpu = (long)hcpu;
-	migration_startup_t startup;
+	int cpu = (long)hcpu;
+	struct task_struct *p;
 
 	switch (action) {
 	case CPU_ONLINE:
-
-		printk("Starting migration thread for cpu %li\n", cpu);
-
-		startup.cpu = cpu;
-		startup.task = NULL;
-		init_completion(&startup.startup_done);
-
-		kernel_thread(migration_thread, &startup, CLONE_KERNEL);
-		wait_for_completion(&startup.startup_done);
-		wait_task_inactive(startup.task);
-
-		startup.task->thread_info->cpu = cpu;
-		startup.task->cpus_allowed = cpumask_of_cpu(cpu);
-
-		wake_up_process(startup.task);
-
-		while (!cpu_rq(cpu)->migration_thread)
-			yield();
-
+		p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
+		if (IS_ERR(p))
+			return NOTIFY_BAD;
+		kthread_bind(p, cpu);
+		cpu_rq(cpu)->migration_thread = p;
+		wake_up_process(p);
 		break;
 	}
 	return NOTIFY_OK;
 }
 
-static struct notifier_block migration_notifier
-			= { .notifier_call = &migration_call };
+/* Want this before the other threads, so they can use set_cpus_allowed. */
+static struct notifier_block migration_notifier = {
+	.notifier_call = &migration_call,
+	.priority = 10,
+};
 
 int __init migration_init(void)
 {
diff -puN kernel/softirq.c~use-kthread-primitives kernel/softirq.c
--- 25/kernel/softirq.c~use-kthread-primitives	2004-01-25 23:40:52.000000000 -0800
+++ 25-akpm/kernel/softirq.c	2004-01-25 23:40:52.000000000 -0800
@@ -14,6 +14,7 @@
 #include <linux/notifier.h>
 #include <linux/percpu.h>
 #include <linux/cpu.h>
+#include <linux/kthread.h>
 
 /*
    - No shared variables, all the data are CPU local.
@@ -348,20 +349,14 @@ static int ksoftirqd(void * __bind_cpu)
 {
 	int cpu = (int) (long) __bind_cpu;
 
-	daemonize("ksoftirqd/%d", cpu);
 	set_user_nice(current, 19);
 	current->flags |= PF_IOTHREAD;
 
-	/* Migrate to the right CPU */
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
 	BUG_ON(smp_processor_id() != cpu);
 
-	__set_current_state(TASK_INTERRUPTIBLE);
-	mb();
+	set_current_state(TASK_INTERRUPTIBLE);
 
-	__get_cpu_var(ksoftirqd) = current;
-
-	for (;;) {
+	while (!signal_pending(current)) {
 		if (!local_softirq_pending())
 			schedule();
 
@@ -374,6 +369,7 @@ static int ksoftirqd(void * __bind_cpu)
 
 		__set_current_state(TASK_INTERRUPTIBLE);
 	}
+	return 0;
 }
 
 static int __devinit cpu_callback(struct notifier_block *nfb,
@@ -381,15 +377,17 @@ static int __devinit cpu_callback(struct
 				  void *hcpu)
 {
 	int hotcpu = (unsigned long)hcpu;
+	struct task_struct *p;
 
 	if (action == CPU_ONLINE) {
-		if (kernel_thread(ksoftirqd, hcpu, CLONE_KERNEL) < 0) {
+		p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
+		if (IS_ERR(p)) {
 			printk("ksoftirqd for %i failed\n", hotcpu);
 			return NOTIFY_BAD;
 		}
-
-		while (!per_cpu(ksoftirqd, hotcpu))
-			yield();
+		per_cpu(ksoftirqd, hotcpu) = p;
+		kthread_bind(p, hotcpu);
+		wake_up_process(p);
  	}
 	return NOTIFY_OK;
 }
diff -puN kernel/workqueue.c~use-kthread-primitives kernel/workqueue.c
--- 25/kernel/workqueue.c~use-kthread-primitives	2004-01-25 23:40:52.000000000 -0800
+++ 25-akpm/kernel/workqueue.c	2004-01-25 23:40:52.000000000 -0800
@@ -22,6 +22,7 @@
 #include <linux/completion.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
+#include <linux/kthread.h>
 
 /*
  * The per-CPU workqueue.
@@ -45,7 +46,6 @@ struct cpu_workqueue_struct {
 
 	struct workqueue_struct *wq;
 	task_t *thread;
-	struct completion exit;
 
 } ____cacheline_aligned;
 
@@ -153,28 +153,23 @@ static inline void run_workqueue(struct 
 	spin_unlock_irqrestore(&cwq->lock, flags);
 }
 
-typedef struct startup_s {
-	struct cpu_workqueue_struct *cwq;
-	struct completion done;
-	const char *name;
-} startup_t;
-
-static int worker_thread(void *__startup)
+static int worker_thread(void *__cwq)
 {
-	startup_t *startup = __startup;
-	struct cpu_workqueue_struct *cwq = startup->cwq;
+	struct cpu_workqueue_struct *cwq = __cwq;
 	int cpu = cwq - cwq->wq->cpu_wq;
 	DECLARE_WAITQUEUE(wait, current);
 	struct k_sigaction sa;
+	sigset_t blocked;
 
-	daemonize("%s/%d", startup->name, cpu);
 	current->flags |= PF_IOTHREAD;
-	cwq->thread = current;
 
 	set_user_nice(current, -10);
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	BUG_ON(smp_processor_id() != cpu);
 
-	complete(&startup->done);
+	/* Block and flush all signals */
+	sigfillset(&blocked);
+	sigprocmask(SIG_BLOCK, &blocked, NULL);
+	flush_signals(current);
 
 	/* SIG_IGN makes children autoreap: see do_notify_parent(). */
 	sa.sa.sa_handler = SIG_IGN;
@@ -182,12 +177,10 @@ static int worker_thread(void *__startup
 	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
 	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
 
-	for (;;) {
+	while (!signal_pending(current)) {
 		set_task_state(current, TASK_INTERRUPTIBLE);
 
 		add_wait_queue(&cwq->more_work, &wait);
-		if (!cwq->thread)
-			break;
 		if (list_empty(&cwq->worklist))
 			schedule();
 		else
@@ -197,9 +190,6 @@ static int worker_thread(void *__startup
 		if (!list_empty(&cwq->worklist))
 			run_workqueue(cwq);
 	}
-	remove_wait_queue(&cwq->more_work, &wait);
-	complete(&cwq->exit);
-
 	return 0;
 }
 
@@ -251,9 +241,8 @@ static int create_workqueue_thread(struc
 				   const char *name,
 				   int cpu)
 {
-	startup_t startup;
 	struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu;
-	int ret;
+	struct task_struct *p;
 
 	spin_lock_init(&cwq->lock);
 	cwq->wq = wq;
@@ -263,17 +252,13 @@ static int create_workqueue_thread(struc
 	INIT_LIST_HEAD(&cwq->worklist);
 	init_waitqueue_head(&cwq->more_work);
 	init_waitqueue_head(&cwq->work_done);
-	init_completion(&cwq->exit);
 
-	init_completion(&startup.done);
-	startup.cwq = cwq;
-	startup.name = name;
-	ret = kernel_thread(worker_thread, &startup, CLONE_FS | CLONE_FILES);
-	if (ret >= 0) {
-		wait_for_completion(&startup.done);
-		BUG_ON(!cwq->thread);
-	}
-	return ret;
+	p = kthread_create(worker_thread, cwq, "%s/%d", name, cpu);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+	cwq->thread = p;
+	kthread_bind(p, cpu);
+	return 0;
 }
 
 struct workqueue_struct *create_workqueue(const char *name)
@@ -292,6 +277,8 @@ struct workqueue_struct *create_workqueu
 			continue;
 		if (create_workqueue_thread(wq, name, cpu) < 0)
 			destroy = 1;
+		else
+			wake_up_process(wq->cpu_wq[cpu].thread);
 	}
 	/*
 	 * Was there any error during startup? If yes then clean up:
@@ -308,13 +295,8 @@ static void cleanup_workqueue_thread(str
 	struct cpu_workqueue_struct *cwq;
 
 	cwq = wq->cpu_wq + cpu;
-	if (cwq->thread) {
-		/* Tell thread to exit and wait for it. */
-		cwq->thread = NULL;
-		wake_up(&cwq->more_work);
-
-		wait_for_completion(&cwq->exit);
-	}
+	if (cwq->thread)
+		kthread_stop(cwq->thread);
 }
 
 void destroy_workqueue(struct workqueue_struct *wq)

_