I finally have something that works rock solid here:
andrea@alpha:~ > cat /proc/interrupts
CPU0 CPU1 TRY0 TRY1
0: 1170513 0 1170513 0 XT-PIC +timer
1: 8579 0 8579 0 XT-PIC keyboard
2: 0 0 0 0 XT-PIC cascade
4: 183 0 183 0 XT-PIC serial
8: 1144157 0 1170362 0 RTC +rtc
12: 5755 0 5755 0 XT-PIC PS/2 Mouse
27: 2391048 1782527 4107496 4159601 TSUNAMI eth0
46: 24952 24510 48246 48888 TSUNAMI sym53c8xx
47: 31 6 37 37 TSUNAMI sym53c8xx
LOC: 1170363 1174876
For now only the dp264 and the sx164 platforms are been updated to use the
new SMP safe irq code, thus the kernel doesn't compile with ALPHA_GENERIC
but it compiles only when configured for DP264 and SX164.
Updating all the other archs can be done without any problems at any time.
All ISA irqs are still broadcasted to the first CPU because ISA irqs have
no latency issues at all. The only irq that we may want to let to scale
across all CPUs is the timer irq, but it's almost impossible to make it to
scale without risking to overstimate the number of ticks and without
hardware support to distribute the irq at the hardware level. If somebody
have a clever idea it's welcome of course :).
This is the strict patch (note: I am not proposing it for inclusion yet):
diff -urN 2.3.35pre6/arch/alpha/kernel/Makefile a/arch/alpha/kernel/Makefile
--- 2.3.35pre6/arch/alpha/kernel/Makefile Wed Dec 8 00:05:25 1999
+++ a/arch/alpha/kernel/Makefile Thu Dec 30 16:18:28 1999
@@ -14,7 +14,7 @@
O_TARGET := kernel.o
O_OBJS := entry.o traps.o process.o osf_sys.o irq.o signal.o setup.o \
- ptrace.o time.o semaphore.o
+ ptrace.o time.o semaphore.o i8259.o rtc_irq.o
OX_OBJS := alpha_ksyms.o
diff -urN 2.3.35pre6/arch/alpha/kernel/i8259.c a/arch/alpha/kernel/i8259.c
--- 2.3.35pre6/arch/alpha/kernel/i8259.c Thu Jan 1 01:00:00 1970
+++ a/arch/alpha/kernel/i8259.c Thu Dec 30 16:18:28 1999
@@ -0,0 +1,123 @@
+/* started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c */
+
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+
+/*
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ */
+
+static void enable_8259A_irq(unsigned int irq);
+static void disable_8259A_irq(unsigned int irq);
+
+/* shutdown is same as "disable" */
+#define end_8259A_irq enable_8259A_irq
+#define shutdown_8259A_irq disable_8259A_irq
+
+static void mask_and_ack_8259A(unsigned int);
+
+static unsigned int startup_8259A_irq(unsigned int irq)
+{
+ enable_8259A_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static struct hw_interrupt_type i8259A_irq_type = {
+ "XT-PIC",
+ startup_8259A_irq,
+ shutdown_8259A_irq,
+ enable_8259A_irq,
+ disable_8259A_irq,
+ mask_and_ack_8259A,
+ end_8259A_irq
+};
+
+/*
+ * 8259A PIC functions to handle ISA devices:
+ */
+
+/*
+ * This contains the irq mask for both 8259A irq controllers,
+ */
+static unsigned int cached_irq_mask = 0xffff;
+
+#define __byte(x,y) (((unsigned char *)&(y))[x])
+#define cached_21 (__byte(0,cached_irq_mask))
+#define cached_A1 (__byte(1,cached_irq_mask))
+
+/*
+ * These have to be protected by the irq controller spinlock
+ * before being called.
+ */
+static void disable_8259A_irq(unsigned int irq)
+{
+ unsigned int mask = 1 << irq;
+ cached_irq_mask |= mask;
+ if (irq & 8)
+ outb(cached_A1,0xA1);
+ else
+ outb(cached_21,0x21);
+}
+
+static void enable_8259A_irq(unsigned int irq)
+{
+ unsigned int mask = ~(1 << irq);
+ cached_irq_mask &= mask;
+ if (irq & 8)
+ outb(cached_A1,0xA1);
+ else
+ outb(cached_21,0x21);
+}
+
+static void mask_and_ack_8259A(unsigned int irq)
+{
+ disable_8259A_irq(irq);
+
+ /* Ack the interrupt making it the lowest priority */
+ /* First the slave .. */
+ if (irq > 7) {
+ outb(0xE0 | (irq - 8), 0xa0);
+ irq = 2;
+ }
+ /* .. then the master */
+ outb(0xE0 | irq, 0x20);
+}
+
+static void init_8259A(void)
+{
+ outb(0xff, 0x21); /* mask all of 8259A-1 */
+ outb(0xff, 0xA1); /* mask all of 8259A-2 */
+}
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL};
+
+void __init
+init_ISA_irqs (void)
+{
+ int i;
+
+ for (i = 0; i < NR_IRQS; i++) {
+ if (i == RTC_IRQ)
+ continue;
+ if (i >= 16)
+ break;
+ irq_desc[i].status = IRQ_DISABLED;
+ /*
+ * 16 old-style INTA-cycle interrupts:
+ */
+ irq_desc[i].handler = &i8259A_irq_type;
+ }
+
+ init_8259A();
+ setup_irq(2, &irq2);
+}
diff -urN 2.3.35pre6/arch/alpha/kernel/irq.c a/arch/alpha/kernel/irq.c
--- 2.3.35pre6/arch/alpha/kernel/irq.c Thu Dec 9 02:27:27 1999
+++ a/arch/alpha/kernel/irq.c Fri Dec 31 15:31:42 1999
@@ -39,6 +39,7 @@
#ifndef __SMP__
int __local_irq_count;
int __local_bh_count;
+unsigned long __irq_attempt[NR_IRQS];
#endif
#if NR_IRQS > 128
@@ -57,12 +58,6 @@
/*
- * Shadow-copy of masked interrupts.
- */
-
-unsigned long _alpha_irq_masks[2] = { ~0UL, ~0UL };
-
-/*
* The ack_irq routine used by 80% of the systems.
*/
@@ -135,7 +130,7 @@
return;
}
}
- handle_irq(j, j, regs);
+ handle_irq(j, regs);
#else
unsigned long pic;
@@ -169,77 +164,201 @@
void
srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
- int irq, ack;
+ int irq;
- ack = irq = (vector - 0x800) >> 4;
- handle_irq(irq, ack, regs);
+ irq = (vector - 0x800) >> 4;
+ handle_irq(irq, regs);
}
/*
+ * Special irq handlers.
+ */
+
+void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+
+/*
* Initial irq handlers.
*/
-static struct irqaction timer_irq = { NULL, 0, 0, NULL, NULL, NULL};
-spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { [0 ... NR_IRQS-1] = {0,} };
+static void enable_none(unsigned int irq) { }
+static unsigned int startup_none(unsigned int irq) { return 0; }
+static void disable_none(unsigned int irq) { }
+static void ack_none(unsigned int irq)
+{
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+}
+
+/* startup is the same as "enable", shutdown is same as "disable" */
+#define shutdown_none disable_none
+#define end_none enable_none
+
+struct hw_interrupt_type no_irq_type = {
+ "none",
+ startup_none,
+ shutdown_none,
+ enable_none,
+ disable_none,
+ ack_none,
+ end_none
+};
+spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
+ { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
-static inline void
-mask_irq(unsigned long irq)
+int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction *
+action)
{
- set_bit(irq, _alpha_irq_masks);
- alpha_mv.update_irq_hw(irq, alpha_irq_mask, 0);
-}
+ int status;
+ int cpu = smp_processor_id();
-static inline void
-unmask_irq(unsigned long irq)
-{
- clear_bit(irq, _alpha_irq_masks);
- alpha_mv.update_irq_hw(irq, alpha_irq_mask, 1);
+ kstat.irqs[cpu][irq]++;
+ irq_enter(cpu, irq);
+
+ status = 1; /* Force the "do bottom halves" bit */
+
+ do {
+ if (!(action->flags & SA_INTERRUPT))
+ __sti();
+ else
+ __cli();
+
+ status |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ __cli();
+
+ irq_exit(cpu, irq);
+
+ return status;
}
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
void
-disable_irq_nosync(unsigned int irq_nr)
+disable_irq_nosync(unsigned int irq)
{
unsigned long flags;
- save_and_cli(flags);
- mask_irq(irq_nr);
- restore_flags(flags);
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ if (!irq_desc[irq].depth++) {
+ irq_desc[irq].status |= IRQ_DISABLED;
+ irq_desc[irq].handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
}
+/*
+ * Synchronous version of the above, making sure the IRQ is
+ * no longer running on any other IRQ..
+ */
void
-disable_irq(unsigned int irq_nr)
+disable_irq(unsigned int irq)
{
- /* This works non-SMP, and SMP until we write code to distribute
- interrupts to more that cpu 0. */
- disable_irq_nosync(irq_nr);
+ disable_irq_nosync(irq);
+
+ if (!local_irq_count(smp_processor_id())) {
+ do {
+ barrier();
+ } while (irq_desc[irq].status & IRQ_INPROGRESS);
+ }
}
void
-enable_irq(unsigned int irq_nr)
+enable_irq(unsigned int irq)
{
unsigned long flags;
- save_and_cli(flags);
- unmask_irq(irq_nr);
- restore_flags(flags);
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ switch (irq_desc[irq].depth) {
+ case 1: {
+ unsigned int status = irq_desc[irq].status & ~IRQ_DISABLED;
+ irq_desc[irq].status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ irq_desc[irq].status = status | IRQ_REPLAY;
+ hw_resend_irq(irq_desc[irq].handler,irq); /* noop */
+ }
+ irq_desc[irq].handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ irq_desc[irq].depth--;
+ break;
+ case 0:
+ printk("enable_irq() unbalanced from %p\n",
+ __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
}
int
-check_irq(unsigned int irq)
+setup_irq(unsigned int irq, struct irqaction * new)
{
- return irq_desc[irq].action ? -EBUSY : 0;
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&irq_controller_lock,flags);
+ p = &irq_desc[irq].action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ irq_desc[irq].depth = 0;
+ irq_desc[irq].status &= ~IRQ_DISABLED;
+ irq_desc[irq].handler->startup(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+ return 0;
}
int
request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
{
- int shared = 0;
- struct irqaction * action, **p;
- unsigned long flags;
+ int retval;
+ struct irqaction * action;
if (irq >= ACTUAL_NR_IRQS)
return -EINVAL;
@@ -248,36 +367,25 @@
if (!handler)
return -EINVAL;
- p = &irq_desc[irq].action;
- action = *p;
- if (action) {
- /* Can't share interrupts unless both agree to */
- if (!(action->flags & irqflags & SA_SHIRQ))
- return -EBUSY;
-
- /* Can't share interrupts unless both are same type */
- if ((action->flags ^ irqflags) & SA_INTERRUPT)
- return -EBUSY;
-
- /* Add new interrupt at end of irq queue */
- do {
- p = &action->next;
- action = *p;
- } while (action);
- shared = 1;
+#if 1
+ /*
+ * Sanity-check: shared interrupts should REALLY pass in
+ * a real dev-ID, otherwise we'll have trouble later trying
+ * to figure out which interrupt is which (messes up the
+ * interrupt freeing logic etc).
+ */
+ if (irqflags & SA_SHIRQ) {
+ if (!dev_id)
+ printk("Bad boy: %s (at %p) called us without a dev_id!\n",
+ devname, __builtin_return_address(0));
}
+#endif
- action = &timer_irq;
- if (irq != TIMER_IRQ) {
- action = (struct irqaction *)
+ action = (struct irqaction *)
kmalloc(sizeof(struct irqaction), GFP_KERNEL);
- }
if (!action)
return -ENOMEM;
- if (irqflags & SA_SAMPLE_RANDOM)
- rand_initialize_irq(irq);
-
action->handler = handler;
action->flags = irqflags;
action->mask = 0;
@@ -285,20 +393,16 @@
action->next = NULL;
action->dev_id = dev_id;
- save_and_cli(flags);
- *p = action;
-
- if (!shared)
- unmask_irq(irq);
-
- restore_flags(flags);
- return 0;
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+ return retval;
}
-
+
void
free_irq(unsigned int irq, void *dev_id)
{
- struct irqaction * action, **p;
+ struct irqaction **p;
unsigned long flags;
if (irq >= ACTUAL_NR_IRQS) {
@@ -309,25 +413,39 @@
printk("Trying to free reserved IRQ %d\n", irq);
return;
}
- for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
- if (action->dev_id != dev_id)
- continue;
+ spin_lock_irqsave(&irq_controller_lock,flags);
+ p = &irq_desc[irq].action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
- /* Found it - now free it */
- save_and_cli(flags);
- *p = action->next;
- if (!irq_desc[irq].action)
- mask_irq(irq);
- restore_flags(flags);
- kfree(action);
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!irq_desc[irq].action) {
+ irq_desc[irq].status |= IRQ_DISABLED;
+ irq_desc[irq].handler->shutdown(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+
+ /* Wait to make sure it's not being used on another CPU */
+ while (irq_desc[irq].status & IRQ_INPROGRESS)
+ barrier();
+ kfree(action);
+ return;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
return;
}
- printk("Trying to free free IRQ%d\n",irq);
}
int get_irq_list(char *buf)
{
- int i;
+ int i, j;
struct irqaction * action;
char *p = buf;
@@ -335,6 +453,8 @@
p += sprintf(p, " ");
for (i = 0; i < smp_num_cpus; i++)
p += sprintf(p, "CPU%d ", i);
+ for (i = 0; i < smp_num_cpus; i++)
+ p += sprintf(p, "TRY%d ", i);
*p++ = '\n';
#endif
@@ -346,13 +466,14 @@
#ifndef __SMP__
p += sprintf(p, "%10u ", kstat_irqs(i));
#else
- {
- int j;
- for (j = 0; j < smp_num_cpus; j++)
- p += sprintf(p, "%10u ",
- kstat.irqs[cpu_logical_map(j)][i]);
- }
+ for (j = 0; j < smp_num_cpus; j++)
+ p += sprintf(p, "%10u ",
+ kstat.irqs[cpu_logical_map(j)][i]);
+ for (j = 0; j < smp_num_cpus; j++)
+ p += sprintf(p, "%10lu ",
+ irq_attempt(cpu_logical_map(j), i));
#endif
+ p += sprintf(p, " %14s", irq_desc[i].handler->typename);
p += sprintf(p, " %c%s",
(action->flags & SA_INTERRUPT)?'+':' ',
action->name);
@@ -364,6 +485,13 @@
}
*p++ = '\n';
}
+#if CONFIG_SMP
+ p += sprintf(p, "LOC: ");
+ for (j = 0; j < smp_num_cpus; j++)
+ p += sprintf(p, "%10lu ",
+ cpu_data[cpu_logical_map(j)].smp_local_irq_count);
+ p += sprintf(p, "\n");
+#endif
return p - buf;
}
@@ -638,139 +766,157 @@
}
#endif /* __SMP__ */
-static void
-unexpected_irq(int irq, struct pt_regs * regs)
-{
-#if 0
-#if 1
- printk("device_interrupt: unexpected interrupt %d\n", irq);
-#else
- struct irqaction *action;
- int i;
-
- printk("IO device interrupt, irq = %d\n", irq);
- printk("PC = %016lx PS=%04lx\n", regs->pc, regs->ps);
- printk("Expecting: ");
- for (i = 0; i < ACTUAL_NR_IRQS; i++)
- if ((action = irq_desc[i].action))
- while (action->handler) {
- printk("[%s:%d] ", action->name, i);
- action = action->next;
- }
- printk("\n");
-#endif
-#endif
-
-#if defined(CONFIG_ALPHA_JENSEN)
- /* ??? Is all this just debugging, or are the inb's and outb's
- necessary to make things work? */
- printk("64=%02x, 60=%02x, 3fa=%02x 2fa=%02x\n",
- inb(0x64), inb(0x60), inb(0x3fa), inb(0x2fa));
- outb(0x0c, 0x3fc);
- outb(0x0c, 0x2fc);
- outb(0,0x61);
- outb(0,0x461);
-#endif
-}
-
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
void
-handle_irq(int irq, int ack, struct pt_regs * regs)
-{
- struct irqaction * action;
+handle_irq(int irq, struct pt_regs * regs)
+{
+ /*
+ * We ack quickly, we don't want the irq controller
+ * thinking we're snobs just because some other CPU has
+ * disabled global interrupts (we have already done the
+ * INT_ACK cycles, it's too late to try to pretend to the
+ * controller that we aren't taking the interrupt).
+ *
+ * 0 return value means that this irq is already being
+ * handled by some other CPU. (or is disabled)
+ */
int cpu = smp_processor_id();
+ irq_desc_t *desc;
+ struct irqaction * action;
+ unsigned int status;
if ((unsigned) irq > ACTUAL_NR_IRQS) {
printk("device_interrupt: illegal interrupt %d\n", irq);
return;
}
-#if 0
- /* A useful bit of code to find out if an interrupt is going wild. */
- {
- static unsigned int last_msg, last_cc;
- static int last_irq, count;
- unsigned int cc;
-
- __asm __volatile("rpcc %0" : "=r"(cc));
- ++count;
- if (cc - last_msg > 150000000 || irq != last_irq) {
- printk("handle_irq: irq %d count %d cc %u @ %p\n",
- irq, count, cc-last_cc, regs->pc);
- count = 0;
- last_msg = cc;
- last_irq = irq;
- }
- last_cc = cc;
+ irq_attempt(cpu, irq)++;
+ desc = irq_desc + irq;
+ spin_lock_irq(&irq_controller_lock); /* mask also the RTC */
+ desc->handler->ack(irq);
+ /*
+ REPLAY is when Linux resends an IRQ that was dropped earlier
+ WAITING is used by probe to mark irqs that are being tested
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
}
-#endif
+ desc->status = status;
+ spin_unlock(&irq_controller_lock);
- irq_enter(cpu, irq);
- kstat.irqs[cpu][irq] += 1;
- action = irq_desc[irq].action;
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ Since we set PENDING, if another processor is handling
+ a different instance of this same irq, the other processor
+ will take care of it.
+ */
+ if (!action)
+ return;
/*
- * For normal interrupts, we mask it out, and then ACK it.
- * This way another (more timing-critical) interrupt can
- * come through while we're doing this one.
- *
- * Note! An irq without a handler gets masked and acked, but
- * never unmasked. The autoirq stuff depends on this (it looks
- * at the masks before and after doing the probing).
- */
- if (ack >= 0) {
- mask_irq(ack);
- alpha_mv.ack_irq(ack);
- }
- if (action) {
- if (action->flags & SA_SAMPLE_RANDOM)
- add_interrupt_randomness(irq);
- do {
- action->handler(irq, action->dev_id, regs);
- action = action->next;
- } while (action);
- if (ack >= 0)
- unmask_irq(ack);
- } else {
- unexpected_irq(irq, regs);
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in do_IRQ
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ handle_IRQ_event(irq, regs, action);
+ spin_lock(&irq_controller_lock);
+
+ if (!(desc->status & IRQ_PENDING)
+ || (desc->status & IRQ_LEVEL))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ spin_unlock(&irq_controller_lock);
}
- irq_exit(cpu, irq);
+ desc->status &= ~IRQ_INPROGRESS;
+ if (!(desc->status & IRQ_DISABLED))
+ desc->handler->end(irq);
+ spin_unlock(&irq_controller_lock);
}
-
/*
- * Start listening for interrupts..
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
*/
-
unsigned long
probe_irq_on(void)
{
- struct irqaction * action;
- unsigned long irqs = 0;
- unsigned long delay;
unsigned int i;
+ unsigned long delay;
- /* Handle only the first 64 IRQs here. This is enough for
- [E]ISA, which is the only thing that needs probing anyway. */
- for (i = (ACTUAL_NR_IRQS - 1) & 63; i > 0; i--) {
- if (!(PROBE_MASK & (1UL << i))) {
- continue;
- }
- action = irq_desc[i].action;
- if (!action) {
- enable_irq(i);
- irqs |= (1UL << i);
+ /* Something may have generated an irq long ago and we want to
+ flush such a longstanding irq before considering it as spurious. */
+ spin_lock_irq(&irq_controller_lock);
+ for (i = NR_IRQS-1; i > 0; i--)
+ if (!irq_desc[i].action)
+ irq_desc[i].handler->startup(i);
+ spin_unlock_irq(&irq_controller_lock);
+
+ /* Wait for longstanding interrupts to trigger. */
+ for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+ /* about 20ms delay */ synchronize_irq();
+
+ /* enable any unassigned irqs (we must startup again here because
+ if a longstanding irq happened in the previous stage, it may have
+ masked itself) first, enable any unassigned irqs. */
+ spin_lock_irq(&irq_controller_lock);
+ for (i = NR_IRQS-1; i > 0; i--) {
+ if (!irq_desc[i].action) {
+ irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if(irq_desc[i].handler->startup(i))
+ irq_desc[i].status |= IRQ_PENDING;
}
}
+ spin_unlock_irq(&irq_controller_lock);
/*
- * Wait about 100ms for spurious interrupts to mask themselves
- * out again...
+ * Wait for spurious interrupts to trigger
*/
- for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
- barrier();
+ for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+ /* about 100ms delay */ synchronize_irq();
+
+ /*
+ * Now filter out any obviously spurious interrupts
+ */
+ spin_lock_irq(&irq_controller_lock);
+ for (i=0; i<NR_IRQS; i++) {
+ unsigned int status = irq_desc[i].status;
- /* Now filter out any obviously spurious interrupts. */
- return irqs & ~alpha_irq_mask;
+ if (!(status & IRQ_AUTODETECT))
+ continue;
+
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ irq_desc[i].status = status & ~IRQ_AUTODETECT;
+ irq_desc[i].handler->shutdown(i);
+ }
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ return 0x12345678;
}
/*
@@ -780,19 +926,35 @@
*/
int
-probe_irq_off(unsigned long irqs)
+probe_irq_off(unsigned long unused)
{
- int i;
-
- /* Handle only the first 64 IRQs here. This is enough for
- [E]ISA, which is the only thing that needs probing anyway. */
- irqs &= alpha_irq_mask;
- if (!irqs)
- return 0;
- i = ffz(~irqs);
- if (irqs != (1UL << i))
- i = -i;
- return i;
+ int i, irq_found, nr_irqs;
+
+ if (unused != 0x12345678)
+ printk("Bad IRQ probe from %lx\n", (&unused)[-1]);
+
+ nr_irqs = 0;
+ irq_found = 0;
+ spin_lock_irq(&irq_controller_lock);
+ for (i=0; i<NR_IRQS; i++) {
+ unsigned int status = irq_desc[i].status;
+
+ if (!(status & IRQ_AUTODETECT))
+ continue;
+
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ irq_desc[i].status = status & ~IRQ_AUTODETECT;
+ irq_desc[i].handler->shutdown(i);
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ if (nr_irqs > 1)
+ irq_found = -irq_found;
+ return irq_found;
}
@@ -815,7 +977,12 @@
#endif
break;
case 1:
- handle_irq(RTC_IRQ, -1, ®s);
+#ifdef __SMP__
+ cpu_data[smp_processor_id()].smp_local_irq_count++;
+ smp_percpu_timer_interrupt(®s);
+ if (smp_processor_id() == smp_boot_cpuid)
+#endif
+ handle_irq(RTC_IRQ, ®s);
return;
case 2:
alpha_mv.machine_check(vector, la_ptr, ®s);
diff -urN 2.3.35pre6/arch/alpha/kernel/rtc_irq.c a/arch/alpha/kernel/rtc_irq.c
--- 2.3.35pre6/arch/alpha/kernel/rtc_irq.c Thu Jan 1 01:00:00 1970
+++ a/arch/alpha/kernel/rtc_irq.c Thu Dec 30 16:18:28 1999
@@ -0,0 +1,26 @@
+/* RTC irq callbacks, 1999 Andrea Arcangeli <[EMAIL PROTECTED]> */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+static void enable_rtc(unsigned int irq) { }
+static unsigned int startup_rtc(unsigned int irq) { return 0; }
+#define shutdown_rtc enable_rtc
+#define end_rtc enable_rtc
+#define ack_rtc enable_rtc
+#define disable_rtc enable_rtc
+
+void __init
+init_RTC_irq(void)
+{
+ static struct hw_interrupt_type rtc_irq_type = { "RTC",
+ startup_rtc,
+ shutdown_rtc,
+ enable_rtc,
+ disable_rtc,
+ ack_rtc,
+ end_rtc };
+ irq_desc[RTC_IRQ].status = IRQ_DISABLED;
+ irq_desc[RTC_IRQ].handler = &rtc_irq_type;
+}
diff -urN 2.3.35pre6/arch/alpha/kernel/smp.c a/arch/alpha/kernel/smp.c
--- 2.3.35pre6/arch/alpha/kernel/smp.c Wed Dec 8 00:05:25 1999
+++ a/arch/alpha/kernel/smp.c Thu Dec 30 17:14:59 1999
@@ -62,6 +62,7 @@
static unsigned long smp_secondary_alive;
unsigned long cpu_present_mask; /* Which cpus ids came online. */
+static unsigned long __cpu_present_mask __initdata = 0; /* cpu reported in the hwrpb
+*/
static int max_cpus = -1; /* Command-line limitation. */
int smp_boot_cpuid; /* Which processor we booted from. */
@@ -506,7 +507,7 @@
if ((cpu->flags & 0x1cc) == 0x1cc) {
smp_num_probed++;
/* Assume here that "whami" == index */
- cpu_present_mask |= (1L << i);
+ __cpu_present_mask |= (1L << i);
cpu->pal_revision = boot_cpu_palrev;
}
@@ -517,11 +518,12 @@
}
} else {
smp_num_probed = 1;
- cpu_present_mask = (1L << smp_boot_cpuid);
+ __cpu_present_mask = (1L << smp_boot_cpuid);
}
+ cpu_present_mask = 1L << smp_boot_cpuid;
printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
- smp_num_probed, cpu_present_mask);
+ smp_num_probed, __cpu_present_mask);
}
/*
@@ -565,13 +567,14 @@
if (i == smp_boot_cpuid)
continue;
- if (((cpu_present_mask >> i) & 1) == 0)
+ if (((__cpu_present_mask >> i) & 1) == 0)
continue;
if (smp_boot_one_cpu(i, cpu_count))
continue;
cpu_count++;
+ cpu_present_mask |= 1L << i;
}
if (cpu_count == 1) {
diff -urN 2.3.35pre6/arch/alpha/kernel/sys_dp264.c a/arch/alpha/kernel/sys_dp264.c
--- 2.3.35pre6/arch/alpha/kernel/sys_dp264.c Wed Dec 8 00:05:25 1999
+++ a/arch/alpha/kernel/sys_dp264.c Fri Dec 31 15:25:25 1999
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <asm/ptrace.h>
#include <asm/system.h>
@@ -36,60 +37,157 @@
* HACK ALERT! only the boot cpu is used for interrupts.
*/
+static void enable_tsunami_irq(unsigned int irq);
+static void disable_tsunami_irq(unsigned int irq);
+static void enable_clipper_irq(unsigned int irq);
+static void disable_clipper_irq(unsigned int irq);
+
+#define end_tsunami_irq enable_tsunami_irq
+#define shutdown_tsunami_irq disable_tsunami_irq
+#define mask_and_ack_tsunami_irq disable_tsunami_irq
+
+#define end_clipper_irq enable_clipper_irq
+#define shutdown_clipper_irq disable_clipper_irq
+#define mask_and_ack_clipper_irq disable_clipper_irq
+
+
+static unsigned int
+startup_tsunami_irq(unsigned int irq)
+{
+ enable_tsunami_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static unsigned int
+startup_clipper_irq(unsigned int irq)
+{
+ enable_clipper_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static struct hw_interrupt_type tsunami_irq_type = {
+ "TSUNAMI",
+ startup_tsunami_irq,
+ shutdown_tsunami_irq,
+ enable_tsunami_irq,
+ disable_tsunami_irq,
+ mask_and_ack_tsunami_irq,
+ end_tsunami_irq
+};
+
+static struct hw_interrupt_type clipper_irq_type = {
+ "CLIPPER",
+ startup_clipper_irq,
+ shutdown_clipper_irq,
+ enable_clipper_irq,
+ disable_clipper_irq,
+ mask_and_ack_clipper_irq,
+ end_clipper_irq
+};
+
+static unsigned long cached_irq_mask = ~0UL;
+
+#define TSUNAMI_SET_IRQ_MASK(cpu, value) \
+do { \
+ volatile unsigned long *csr; \
+ \
+ csr = &TSUNAMI_cchip->dim##cpu##.csr; \
+ *csr = (value); \
+ mb(); \
+ *csr; \
+} while(0)
+
+static inline void
+do_flush_irq_mask(unsigned long value)
+{
+ switch (TSUNAMI_bootcpu)
+ {
+ case 0:
+ TSUNAMI_SET_IRQ_MASK(0, value);
+ break;
+ case 1:
+ TSUNAMI_SET_IRQ_MASK(1, value);
+ break;
+ case 2:
+ TSUNAMI_SET_IRQ_MASK(2, value);
+ break;
+ case 3:
+ TSUNAMI_SET_IRQ_MASK(3, value);
+ break;
+ }
+}
+
+#ifdef CONFIG_SMP
+do_flush_smp_irq_mask(unsigned long value)
+{
+ extern unsigned long cpu_present_mask;
+ unsigned long other_cpus = cpu_present_mask & ~(1L << TSUNAMI_bootcpu);
+
+ if (other_cpus & 1)
+ TSUNAMI_SET_IRQ_MASK(0, value);
+ if (other_cpus & 2)
+ TSUNAMI_SET_IRQ_MASK(1, value);
+ if (other_cpus & 4)
+ TSUNAMI_SET_IRQ_MASK(2, value);
+ if (other_cpus & 8)
+ TSUNAMI_SET_IRQ_MASK(3, value);
+}
+#endif
+
static void
-dp264_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
+dp264_flush_irq_mask(unsigned long mask)
{
- volatile unsigned long *csr;
+ unsigned long value;
- if (TSUNAMI_bootcpu < 2) {
- if (!TSUNAMI_bootcpu)
- csr = &TSUNAMI_cchip->dim0.csr;
- else
- csr = &TSUNAMI_cchip->dim1.csr;
- } else {
- if (TSUNAMI_bootcpu == 2)
- csr = &TSUNAMI_cchip->dim2.csr;
- else
- csr = &TSUNAMI_cchip->dim3.csr;
- }
+ value = ~mask | (1UL << 55) | 0xffff; /* isa irqs always enabled */
+ do_flush_irq_mask(value);
- *csr = ~mask;
- mb();
- *csr;
-
- if (irq < 16) {
- if (irq >= 8)
- outb(mask >> 8, 0xA1); /* ISA PIC2 */
- else
- outb(mask, 0x21); /* ISA PIC1 */
- }
+#ifdef CONFIG_SMP
+ value = ~mask;
+ do_flush_smp_irq_mask(value);
+#endif
}
static void
-clipper_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
+enable_tsunami_irq(unsigned int irq)
{
- if (irq >= 16) {
- volatile unsigned long *csr;
+ cached_irq_mask &= ~(1UL << irq);
+ dp264_flush_irq_mask(cached_irq_mask);
+}
- if (TSUNAMI_bootcpu < 2)
- if (!TSUNAMI_bootcpu)
- csr = &TSUNAMI_cchip->dim0.csr;
- else
- csr = &TSUNAMI_cchip->dim1.csr;
- else
- if (TSUNAMI_bootcpu == 2)
- csr = &TSUNAMI_cchip->dim2.csr;
- else
- csr = &TSUNAMI_cchip->dim3.csr;
-
- *csr = (~mask >> 16) | (1UL << 55); /* master ISA enable */
- mb();
- *csr;
- }
- else if (irq >= 8)
- outb(mask >> 8, 0xA1); /* ISA PIC2 */
- else
- outb(mask, 0x21); /* ISA PIC1 */
+static void
+disable_tsunami_irq(unsigned int irq)
+{
+ cached_irq_mask |= 1UL << irq;
+ dp264_flush_irq_mask(cached_irq_mask);
+}
+
+static void
+clipper_flush_irq_mask(unsigned long mask)
+{
+ unsigned long value;
+
+ value = (~mask >> 16) | (1UL << 55); /* master ISA enable */
+ do_flush_irq_mask(value);
+
+#ifdef CONFIG_SMP
+ value = ~mask >> 16;
+ do_flush_smp_irq_mask(value);
+#endif
+}
+
+static void
+enable_clipper_irq(unsigned int irq)
+{
+ cached_irq_mask &= ~(1UL << irq);
+ clipper_flush_irq_mask(cached_irq_mask);
+}
+
+static void
+disable_clipper_irq(unsigned int irq)
+{
+ cached_irq_mask |= 1UL << irq;
+ clipper_flush_irq_mask(cached_irq_mask);
}
static void
@@ -126,9 +224,9 @@
static void
dp264_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
- int irq, ack;
+ int irq;
- ack = irq = (vector - 0x800) >> 4;
+ irq = (vector - 0x800) >> 4;
/*
* The SRM console reports PCI interrupts with a vector calculated by:
@@ -142,17 +240,17 @@
* so we don't count them.
*/
if (irq >= 32)
- ack = irq = irq - 16;
+ irq -= 16;
- handle_irq(irq, ack, regs);
+ handle_irq(irq, regs);
}
static void
clipper_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
- int irq, ack;
+ int irq;
- ack = irq = (vector - 0x800) >> 4;
+ irq = (vector - 0x800) >> 4;
/*
* The SRM console reports PCI interrupts with a vector calculated by:
@@ -166,7 +264,22 @@
*
* Eg IRQ 24 is DRIR bit 8, etc, etc
*/
- handle_irq(irq, ack, regs);
+ handle_irq(irq, regs);
+}
+
+static void __init
+init_TSUNAMI_irqs(struct hw_interrupt_type * ops)
+{
+ int i;
+
+ for (i = 0; i < NR_IRQS; i++) {
+ if (i == RTC_IRQ)
+ continue;
+ if (i < 16)
+ continue;
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = ops;
+ }
}
static void __init
@@ -180,10 +293,11 @@
if (alpha_using_srm)
alpha_mv.device_interrupt = dp264_srm_device_interrupt;
- dp264_update_irq_hw(16, alpha_irq_mask, 0);
+ init_ISA_irqs();
+ init_RTC_irq();
+ init_TSUNAMI_irqs(&tsunami_irq_type);
- enable_irq(55); /* Enable ISA interrupt controller. */
- enable_irq(2);
+ dp264_flush_irq_mask(~0UL);
}
static void __init
@@ -197,10 +311,11 @@
if (alpha_using_srm)
alpha_mv.device_interrupt = clipper_srm_device_interrupt;
- clipper_update_irq_hw(16, alpha_irq_mask, 0);
+ init_ISA_irqs();
+ init_RTC_irq();
+ init_TSUNAMI_irqs(&clipper_irq_type);
- enable_irq(55); /* Enable ISA interrupt controller. */
- enable_irq(2);
+ clipper_flush_irq_mask(~0UL);
}
@@ -431,9 +546,6 @@
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: TSUNAMI_PROBE_MASK,
- update_irq_hw: dp264_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
init_arch: tsunami_init_arch,
@@ -458,9 +570,6 @@
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: TSUNAMI_PROBE_MASK,
- update_irq_hw: dp264_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
init_arch: tsunami_init_arch,
@@ -484,9 +593,6 @@
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: TSUNAMI_PROBE_MASK,
- update_irq_hw: dp264_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
init_arch: tsunami_init_arch,
@@ -510,9 +616,6 @@
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: TSUNAMI_PROBE_MASK,
- update_irq_hw: clipper_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
init_arch: tsunami_init_arch,
diff -urN 2.3.35pre6/arch/alpha/kernel/sys_sx164.c a/arch/alpha/kernel/sys_sx164.c
--- 2.3.35pre6/arch/alpha/kernel/sys_sx164.c Wed Dec 8 00:05:25 1999
+++ a/arch/alpha/kernel/sys_sx164.c Thu Dec 30 16:18:28 1999
@@ -14,6 +14,8 @@
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <asm/ptrace.h>
#include <asm/system.h>
@@ -26,47 +28,83 @@
#include <asm/core_pyxis.h>
#include "proto.h"
-#include <asm/hw_irq.h>
#include "pci_impl.h"
#include "machvec_impl.h"
+/* Note invert on MASK bits. */
+static unsigned long cached_irq_mask;
+
+static inline void
+sx164_change_irq_mask(unsigned long mask)
+{
+ *(vulp)PYXIS_INT_MASK = mask;
+ mb();
+ *(vulp)PYXIS_INT_MASK;
+}
+
+static inline void
+sx164_enable_irq(unsigned int irq)
+{
+ sx164_change_irq_mask(cached_irq_mask |= 1UL << (irq - 16));
+}
+
static void
-sx164_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
+sx164_disable_irq(unsigned int irq)
{
- if (irq >= 16) {
- /* Make CERTAIN none of the bogus ints get enabled */
- *(vulp)PYXIS_INT_MASK =
- ~((long)mask >> 16) & ~0x000000000000003bUL;
- mb();
- /* ... and read it back to make sure it got written. */
- *(vulp)PYXIS_INT_MASK;
- }
- else if (irq >= 8)
- outb(mask >> 8, 0xA1); /* ISA PIC2 */
- else
- outb(mask, 0x21); /* ISA PIC1 */
+ sx164_change_irq_mask(cached_irq_mask &= ~(1UL << (irq - 16)));
+}
+
+static unsigned int
+sx164_startup_irq(unsigned int irq)
+{
+ sx164_enable_irq(irq);
+ return 0;
+}
+
+static inline void
+sx164_srm_enable_irq(unsigned int irq)
+{
+ cserve_ena(irq - 16);
}
static void
-sx164_srm_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
+sx164_srm_disable_irq(unsigned int irq)
{
- if (irq >= 16) {
- if (unmask_p)
- cserve_ena(irq - 16);
- else
- cserve_dis(irq - 16);
- }
- else if (irq >= 8)
- outb(mask >> 8, 0xA1); /* ISA PIC2 */
- else
- outb(mask, 0x21); /* ISA PIC1 */
+ cserve_dis(irq - 16);
}
+static unsigned int
+sx164_srm_startup_irq(unsigned int irq)
+{
+ sx164_srm_enable_irq(irq);
+ return 0;
+}
+
+static struct hw_interrupt_type sx164_irq_type = {
+ typename: "SX164",
+ startup: sx164_startup_irq,
+ shutdown: sx164_disable_irq,
+ enable: sx164_enable_irq,
+ disable: sx164_disable_irq,
+ ack: sx164_disable_irq,
+ end: sx164_enable_irq,
+};
+
+static struct hw_interrupt_type sx164_srm_irq_type = {
+ typename: "SX164-SRM",
+ startup: sx164_srm_startup_irq,
+ shutdown: sx164_srm_disable_irq,
+ enable: sx164_srm_enable_irq,
+ disable: sx164_srm_disable_irq,
+ ack: sx164_srm_disable_irq,
+ end: sx164_srm_enable_irq,
+};
+
static void
sx164_device_interrupt(unsigned long vector, struct pt_regs *regs)
{
- unsigned long pld, tmp;
+ unsigned long pld;
unsigned int i;
/* Read the interrupt summary register of PYXIS */
@@ -93,35 +131,48 @@
continue;
} else {
/* if not timer int */
- handle_irq(16 + i, 16 + i, regs);
+ handle_irq(16 + i, regs);
}
- *(vulp)PYXIS_INT_REQ = 1UL << i; mb();
- tmp = *(vulp)PYXIS_INT_REQ;
+
+ *(vulp)PYXIS_INT_REQ = 1UL << i;
+ mb();
+ *(vulp)PYXIS_INT_REQ;
}
}
static void
sx164_init_irq(void)
{
+ struct hw_interrupt_type *ops;
+ long i;
+
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
+ init_ISA_irqs();
+ init_RTC_irq();
+
if (alpha_using_srm) {
- alpha_mv.update_irq_hw = sx164_srm_update_irq_hw;
alpha_mv.device_interrupt = srm_device_interrupt;
+ ops = &sx164_srm_irq_type;
}
else {
- /* Note invert on MASK bits. */
- *(vulp)PYXIS_INT_MASK = ~((long)alpha_irq_mask >> 16);
- mb();
- *(vulp)PYXIS_INT_MASK;
+ sx164_change_irq_mask(0);
+ ops = &sx164_irq_type;
+ }
+
+ for (i = 16; i < 40; ++i) {
+ /* Make CERTAIN none of the bogus ints get enabled. */
+ if ((0x3b0000 >> i) & 1)
+ continue;
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].handler = ops;
}
- enable_irq(16 + 6); /* enable timer */
- enable_irq(16 + 7); /* enable ISA PIC cascade */
- enable_irq(2); /* enable cascade */
+ ops->startup(16 + 6); /* enable timer */
+ ops->startup(16 + 7); /* enable ISA PIC cascade */
}
/*
@@ -202,9 +253,6 @@
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 40,
- irq_probe_mask: _PROBE_MASK(40),
- update_irq_hw: sx164_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: sx164_device_interrupt,
init_arch: pyxis_init_arch,
diff -urN 2.3.35pre6/arch/alpha/kernel/time.c a/arch/alpha/kernel/time.c
--- 2.3.35pre6/arch/alpha/kernel/time.c Wed Dec 8 00:05:25 1999
+++ a/arch/alpha/kernel/time.c Thu Dec 30 16:18:28 1999
@@ -31,6 +31,8 @@
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -88,13 +90,7 @@
__u32 now;
long nticks;
-#ifdef __SMP__
- /* When SMP, do this for *all* CPUs, but only do the rest for
- the boot CPU. */
- smp_percpu_timer_interrupt(regs);
- if (smp_processor_id() != smp_boot_cpuid)
- return;
-#else
+#ifndef __SMP__
/* Not SMP, do kernel PC profiling here. */
if (!user_mode(regs))
alpha_do_profile(regs->pc);
@@ -248,10 +244,12 @@
void
time_init(void)
{
- void (*irq_handler)(int, void *, struct pt_regs *);
unsigned int year, mon, day, hour, min, sec, cc1, cc2;
unsigned long cycle_freq, one_percent;
long diff;
+ static struct irqaction timer_irqaction = { timer_interrupt,
+ SA_INTERRUPT, 0, "timer",
+ NULL, NULL};
/*
* The Linux interpretation of the CMOS clock register contents:
@@ -337,9 +335,7 @@
state.partial_tick = 0L;
/* setup timer */
- irq_handler = timer_interrupt;
- if (request_irq(TIMER_IRQ, irq_handler, 0, "timer", NULL))
- panic("Could not allocate timer IRQ!");
+ setup_irq(TIMER_IRQ, &timer_irqaction);
}
/*
diff -urN 2.3.35pre6/include/asm-alpha/hardirq.h a/include/asm-alpha/hardirq.h
--- 2.3.35pre6/include/asm-alpha/hardirq.h Wed Dec 29 22:55:04 1999
+++ a/include/asm-alpha/hardirq.h Fri Dec 31 02:41:30 1999
@@ -8,8 +8,11 @@
#ifndef __SMP__
extern int __local_irq_count;
#define local_irq_count(cpu) ((void)(cpu), __local_irq_count)
+extern unsigned long __irq_attempt[];
+#define irq_attempt(cpu, irq) ((void)(cpu), __irq_attempt[irq])
#else
#define local_irq_count(cpu) (cpu_data[cpu].irq_count)
+#define irq_attempt(cpu, irq) (cpu_data[cpu].irq_attempt[irq])
#endif
/*
diff -urN 2.3.35pre6/include/asm-alpha/hw_irq.h a/include/asm-alpha/hw_irq.h
--- 2.3.35pre6/include/asm-alpha/hw_irq.h Wed Dec 29 22:56:43 1999
+++ a/include/asm-alpha/hw_irq.h Fri Dec 31 02:43:11 1999
@@ -18,14 +18,11 @@
outb(0, DMA1_CLR_MASK_REG); \
outb(0, DMA2_CLR_MASK_REG)
-extern unsigned long _alpha_irq_masks[2];
-#define alpha_irq_mask _alpha_irq_masks[0]
-
extern void common_ack_irq(unsigned long irq);
extern void isa_device_interrupt(unsigned long vector, struct pt_regs * regs);
extern void srm_device_interrupt(unsigned long vector, struct pt_regs * regs);
-extern void handle_irq(int irq, int ack, struct pt_regs * regs);
+extern void handle_irq(int irq, struct pt_regs * regs);
#define RTC_IRQ 8
#ifdef CONFIG_RTC
@@ -71,10 +68,11 @@
#endif
-extern char _stext;
static inline void alpha_do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
+ extern char _stext;
+
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
/*
@@ -87,5 +85,10 @@
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
}
+
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+extern void no_action(int cpl, void *dev_id, struct pt_regs *regs);
+extern void init_ISA_irqs(void);
+extern void init_RTC_irq(void);
#endif
diff -urN 2.3.35pre6/include/asm-alpha/smp.h a/include/asm-alpha/smp.h
--- 2.3.35pre6/include/asm-alpha/smp.h Wed Dec 29 22:55:04 1999
+++ a/include/asm-alpha/smp.h Fri Dec 31 02:41:30 1999
@@ -20,6 +20,7 @@
#ifdef __SMP__
#include <linux/threads.h>
+#include <asm/irq.h>
struct cpuinfo_alpha {
unsigned long loops_per_sec;
@@ -28,6 +29,8 @@
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
unsigned long ipi_count;
+ unsigned long irq_attempt[NR_IRQS];
+ unsigned long smp_local_irq_count;
unsigned long prof_multiplier;
unsigned long prof_counter;
int irq_count, bh_count;
diff -urN 2.3.35pre6/include/linux/irq.h a/include/linux/irq.h
--- 2.3.35pre6/include/linux/irq.h Fri Dec 31 02:43:15 1999
+++ a/include/linux/irq.h Fri Dec 31 15:25:31 1999
@@ -11,6 +11,7 @@
#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
+#define IRQ_LEVEL 64 /* IRQ level triggered */
/*
* Interrupt controller descriptor. This is all we need
The above patch is also here:
ftp://ftp.*.kernel.org/pub/linux/kernel/people/andrea/patches/v2.3/2.3.35pre6/irq-SMP-alpha-1.gz
but I suggest you to use this other below patch that includes all the
above and it also includes some showstopper (alpha and not-alpha) fix
necessary to succesfully compile and run 2.3.35pre6:
ftp://ftp.*.kernel.org/pub/linux/kernel/people/andrea/patches/v2.3/2.3.35pre6/irq-SMP-alpha-all-1.gz
(basically it's my own alpha tree)
Benchmarks, comments and the updates for all the platforms I can't test
myself here are welcome! :).
Thanks.
Andrea
PS. Have a lot of fun this loong night!