From: Zwane Mwaikambo <zwane@arm.linux.org.uk>

Via reading the code, my understanding is that powernow-k8 uses
preempt_disable to ensure that driver->target doesn't migrate across cpus
whilst it's accessing per processor registers, however set_cpus_allowed
will provide this for us.  Additionally, remove schedule() calls from
set_cpus_allowed as set_cpus_allowed ensures that you're executing on the
target processor on return.

Signed-off-by: Zwane Mwaikambo <zwane@arm.linux.org.uk>
Cc: Dave Jones <davej@codemonkey.org.uk>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 arch/i386/kernel/cpu/cpufreq/powernow-k8.c |   10 ----------
 1 files changed, 10 deletions(-)

diff -puN arch/i386/kernel/cpu/cpufreq/powernow-k8.c~remove-preempt_disable-from-powernow-k8 arch/i386/kernel/cpu/cpufreq/powernow-k8.c
--- 25/arch/i386/kernel/cpu/cpufreq/powernow-k8.c~remove-preempt_disable-from-powernow-k8	Mon Jul 11 15:54:40 2005
+++ 25-akpm/arch/i386/kernel/cpu/cpufreq/powernow-k8.c	Mon Jul 11 15:54:40 2005
@@ -453,7 +453,6 @@ static int check_supported_cpu(unsigned 
 
 	oldmask = current->cpus_allowed;
 	set_cpus_allowed(current, cpumask_of_cpu(cpu));
-	schedule();
 
 	if (smp_processor_id() != cpu) {
 		printk(KERN_ERR "limiting to cpu %u failed\n", cpu);
@@ -488,7 +487,6 @@ static int check_supported_cpu(unsigned 
 
 out:
 	set_cpus_allowed(current, oldmask);
-	schedule();
 	return rc;
 
 }
@@ -895,7 +893,6 @@ static int powernowk8_target(struct cpuf
 	/* only run on specific CPU from here on */
 	oldmask = current->cpus_allowed;
 	set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
-	schedule();
 
 	if (smp_processor_id() != pol->cpu) {
 		printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
@@ -959,8 +956,6 @@ static int powernowk8_target(struct cpuf
 
 err_out:
 	set_cpus_allowed(current, oldmask);
-	schedule();
-
 	return ret;
 }
 
@@ -1017,7 +1012,6 @@ static int __init powernowk8_cpu_init(st
 	/* only run on specific CPU from here on */
 	oldmask = current->cpus_allowed;
 	set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
-	schedule();
 
 	if (smp_processor_id() != pol->cpu) {
 		printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
@@ -1036,7 +1030,6 @@ static int __init powernowk8_cpu_init(st
 
 	/* run on any CPU again */
 	set_cpus_allowed(current, oldmask);
-	schedule();
 
 	pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
 	pol->cpus = cpu_core_map[pol->cpu];
@@ -1069,7 +1062,6 @@ static int __init powernowk8_cpu_init(st
 
 err_out:
 	set_cpus_allowed(current, oldmask);
-	schedule();
 	powernow_k8_cpu_exit_acpi(data);
 
 	kfree(data);
@@ -1105,7 +1097,6 @@ static unsigned int powernowk8_get (unsi
 		set_cpus_allowed(current, oldmask);
 		return 0;
 	}
-	preempt_disable();
 	
 	if (query_current_values_with_pending_wait(data))
 		goto out;
@@ -1113,7 +1104,6 @@ static unsigned int powernowk8_get (unsi
 	khz = find_khz_freq_from_fid(data->currfid);
 
  out:
-	preempt_enable_no_resched();
 	set_cpus_allowed(current, oldmask);
 
 	return khz;
_