From: Mikael Pettersson <mikpe@csd.uu.se>

- Add facility for masking perfctr interrupts.  To reduce overheads, this
  is done in software via a per-cpu mask instead of writing to the local
  APIC.

- Mask interrupts when interrupt-mode counters are suspended, and unmask
  when they are resumed.  Prevents delayed interrupts (due to HW quirk)
  from being delivered to the wrong tasks.

- Suspend path records if any interrupt-mode counters are in overflow
  state.  This informs the higher levels that a pending interrupt (now
  masked) must be simulated.

Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/drivers/perfctr/x86.c      |   22 ++++++++++++++++++++++
 25-akpm/include/asm-i386/perfctr.h |    3 +++
 2 files changed, 25 insertions(+)

diff -puN drivers/perfctr/x86.c~perfctr-x86-driver-updates drivers/perfctr/x86.c
--- 25/drivers/perfctr/x86.c~perfctr-x86-driver-updates	2004-11-11 01:44:20.097389680 -0800
+++ 25-akpm/drivers/perfctr/x86.c	2004-11-11 01:44:20.103388768 -0800
@@ -34,6 +34,9 @@ struct per_cpu_cache {	/* roughly a subs
 		unsigned int pebs_enable;
 		unsigned int pebs_matrix_vert;
 	} control;
+#ifdef CONFIG_PERFCTR_INTERRUPT_SUPPORT
+	unsigned int interrupts_masked;
+#endif
 } ____cacheline_aligned;
 static DEFINE_PER_CPU(struct per_cpu_cache, per_cpu_cache);
 #define __get_cpu_cache(cpu) (&per_cpu(per_cpu_cache, cpu))
@@ -155,6 +158,8 @@ asmlinkage void smp_perfctr_interrupt(st
 	   masks interrupts. We're still on the originating CPU. */
 	/* XXX: recursive interrupts? delay the ACK, mask LVTPC, or queue? */
 	ack_APIC_irq();
+	if (get_cpu_cache()->interrupts_masked)
+		return;
 	irq_enter();
 	(*perfctr_ihandler)(instruction_pointer(regs));
 	irq_exit();
@@ -165,6 +170,16 @@ void perfctr_cpu_set_ihandler(perfctr_ih
 	perfctr_ihandler = ihandler ? ihandler : perfctr_default_ihandler;
 }
 
+static inline void perfctr_cpu_mask_interrupts(struct per_cpu_cache *cache)
+{
+	cache->interrupts_masked = 1;
+}
+
+static inline void perfctr_cpu_unmask_interrupts(struct per_cpu_cache *cache)
+{
+	cache->interrupts_masked = 0;
+}
+
 #else
 #define perfctr_cstatus_has_ictrs(cstatus)	0
 #undef cpu_has_apic
@@ -452,10 +467,12 @@ static void p6_like_isuspend(struct perf
 	struct per_cpu_cache *cache;
 	unsigned int cstatus, nrctrs, i;
 	int cpu;
+	unsigned int pending = 0;
 
 	cpu = smp_processor_id();
 	set_isuspend_cpu(state, cpu); /* early to limit cpu's live range */
 	cache = __get_cpu_cache(cpu);
+	perfctr_cpu_mask_interrupts(cache);
 	cstatus = state->cstatus;
 	nrctrs = perfctr_cstatus_nrctrs(cstatus);
 	for(i = perfctr_cstatus_nractrs(cstatus); i < nrctrs; ++i) {
@@ -471,7 +488,10 @@ static void p6_like_isuspend(struct perf
 		rdpmc_low(pmc_raw, now);
 		state->pmc[i].sum += now - state->pmc[i].start;
 		state->pmc[i].start = now;
+		if ((int)now >= 0)
+			++pending;
 	}
+	state->pending_interrupt = pending;
 	/* cache->k1.id is still == state->k1.id */
 }
 
@@ -487,6 +507,7 @@ static void p6_like_iresume(const struct
 
 	cpu = smp_processor_id();
 	cache = __get_cpu_cache(cpu);
+	perfctr_cpu_unmask_interrupts(cache);
 	if (cache->k1.id == state->k1.id) {
 		cache->k1.id = 0; /* force reload of cleared EVNTSELs */
 		if (is_isuspend_cpu(state, cpu))
@@ -976,6 +997,7 @@ unsigned int perfctr_cpu_identify_overfl
 	pmc = perfctr_cstatus_nractrs(cstatus);
 	nrctrs = perfctr_cstatus_nrctrs(cstatus);
 
+	state->pending_interrupt = 0;
 	for(pmc_mask = 0; pmc < nrctrs; ++pmc) {
 		if ((int)state->pmc[pmc].start >= 0) { /* XXX: ">" ? */
 			/* XXX: "+=" to correct for overshots */
diff -puN include/asm-i386/perfctr.h~perfctr-x86-driver-updates include/asm-i386/perfctr.h
--- 25/include/asm-i386/perfctr.h~perfctr-x86-driver-updates	2004-11-11 01:44:20.098389528 -0800
+++ 25-akpm/include/asm-i386/perfctr.h	2004-11-11 01:44:20.104388616 -0800
@@ -66,6 +66,9 @@ struct perfctr_cpu_state {
 #ifdef __KERNEL__
 	struct perfctr_cpu_control control;
 	unsigned int p4_escr_map[18];
+#ifdef CONFIG_PERFCTR_INTERRUPT_SUPPORT
+	unsigned int pending_interrupt;
+#endif
 #endif
 };
 
_