Unless the user gives us an affinity mask, statically spread
the interrupts across the active processors.


r~

diff -rup linux/arch/alpha/kernel/irq.c 2.3.99-2-2/arch/alpha/kernel/irq.c
--- linux/arch/alpha/kernel/irq.c       Thu Mar 16 22:26:06 2000
+++ 2.3.99-2-2/arch/alpha/kernel/irq.c  Thu Mar 16 23:25:00 2000
@@ -219,15 +219,33 @@ setup_irq(unsigned int irq, struct irqac
        }
        spin_unlock_irqrestore(&desc->lock,flags);
 
-       register_irq_proc(irq);
        return 0;
 }
 
 static struct proc_dir_entry * root_irq_dir;
-static struct proc_dir_entry * irq_dir [NR_IRQS];
-static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+static struct proc_dir_entry * irq_dir[NR_IRQS];
 
-static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+#ifdef CONFIG_SMP
+static struct proc_dir_entry * smp_affinity_entry[NR_IRQS];
+static char irq_user_affinity[NR_IRQS];
+static unsigned long irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+
+static void
+select_smp_affinity(int irq)
+{
+       static int last_cpu;
+       int cpu = last_cpu + 1;
+
+       if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
+               return;
+
+       while (((cpu_present_mask >> cpu) & 1) == 0)
+               cpu = (cpu < NR_CPUS ? cpu + 1 : 0);
+       last_cpu = cpu;
+
+       irq_affinity[irq] = 1UL << cpu;
+       irq_desc[irq].handler->set_affinity(irq, 1UL << cpu);
+}
 
 #define HEX_DIGITS 16
 
@@ -290,18 +308,22 @@ irq_affinity_write_proc(struct file *fil
 
        err = parse_hex_value(buffer, count, &new_value);
 
-#if CONFIG_SMP
-       /*
-        * Do not allow disabling IRQs completely - it's a too easy
-        * way to make the system unusable accidentally :-) At least
-        * one online CPU still has to be targeted.
-        */
-       if (!(new_value & cpu_present_mask))
+       /* The special value 0 means release control of the
+          affinity to kernel.  */
+       if (new_value == 0) {
+               irq_user_affinity[irq] = 0;
+               select_smp_affinity(irq);
+       }
+       /* Do not allow disabling IRQs completely - it's a too easy
+          way to make the system unusable accidentally :-) At least
+          one online CPU still has to be targeted.  */
+       else if (!(new_value & cpu_present_mask))
                return -EINVAL;
-#endif
-
-       irq_affinity[irq] = new_value;
-       irq_desc[irq].handler->set_affinity(irq, new_value);
+       else {
+               irq_affinity[irq] = new_value;
+               irq_user_affinity[irq] = 1;
+               irq_desc[irq].handler->set_affinity(irq, new_value);
+       }
 
        return full_count;
 }
@@ -313,7 +335,7 @@ prof_cpu_mask_read_proc(char *page, char
        unsigned long *mask = (unsigned long *) data;
        if (count < HEX_DIGITS+1)
                return -EINVAL;
-       return sprintf (page, "%08lx\n", *mask);
+       return sprintf (page, "%016lx\n", *mask);
 }
 
 static int
@@ -330,6 +352,7 @@ prof_cpu_mask_write_proc(struct file *fi
        *mask = new_value;
        return full_count;
 }
+#endif /* CONFIG_SMP */
 
 #define MAX_NAMELEN 10
 
@@ -348,6 +371,7 @@ register_irq_proc (unsigned int irq)
        /* create /proc/irq/1234 */
        irq_dir[irq] = proc_mkdir(name, root_irq_dir);
 
+#ifdef CONFIG_SMP
        /* create /proc/irq/1234/smp_affinity */
        entry = create_proc_entry("smp_affinity", 0700, irq_dir[irq]);
 
@@ -357,6 +381,7 @@ register_irq_proc (unsigned int irq)
        entry->write_proc = irq_affinity_write_proc;
 
        smp_affinity_entry[irq] = entry;
+#endif
 }
 
 unsigned long prof_cpu_mask = ~0UL;
@@ -370,6 +395,7 @@ init_irq_proc (void)
        /* create /proc/irq */
        root_irq_dir = proc_mkdir("irq", 0);
 
+#ifdef CONFIG_SMP
        /* create /proc/irq/prof_cpu_mask */
        entry = create_proc_entry("prof_cpu_mask", 0700, root_irq_dir);
 
@@ -377,6 +403,7 @@ init_irq_proc (void)
        entry->data = (void *)&prof_cpu_mask;
        entry->read_proc = prof_cpu_mask_read_proc;
        entry->write_proc = prof_cpu_mask_write_proc;
+#endif
 
        /*
         * Create entries for all existing IRQs.
@@ -426,6 +453,10 @@ request_irq(unsigned int irq, void (*han
        action->next = NULL;
        action->dev_id = dev_id;
 
+#ifdef CONFIG_SMP
+       select_smp_affinity(irq);
+#endif
+
        retval = setup_irq(irq, action);
        if (retval)
                kfree(action);
@@ -522,10 +553,10 @@ get_irq_list(char *buf)
                *p++ = '\n';
        }
 #if CONFIG_SMP
-       p += sprintf(p, "LOC: ");
+       p += sprintf(p, "IPI: ");
        for (j = 0; j < smp_num_cpus; j++)
                p += sprintf(p, "%10lu ",
-                            cpu_data[cpu_logical_map(j)].smp_local_irq_count);
+                            cpu_data[cpu_logical_map(j)].ipi_count);
        p += sprintf(p, "\n");
 #endif
        p += sprintf(p, "ERR: %10lu\n", irq_err_count);
diff -rup linux/arch/alpha/kernel/irq_alpha.c 2.3.99-2-2/arch/alpha/kernel/irq_alpha.c
--- linux/arch/alpha/kernel/irq_alpha.c Thu Mar 16 22:26:06 2000
+++ 2.3.99-2-2/arch/alpha/kernel/irq_alpha.c    Thu Mar 16 23:09:17 2000
@@ -6,6 +6,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/irq.h>
+#include <linux/kernel_stat.h>
 
 #include <asm/machvec.h>
 #include <asm/dma.h>
@@ -69,14 +70,22 @@ do_entInt(unsigned long type, unsigned l
                break;
        case 1:
 #ifdef CONFIG_SMP
-               cpu_data[smp_processor_id()].smp_local_irq_count++;
+         {
+               long cpu;
                smp_percpu_timer_interrupt(&regs);
-               if (smp_processor_id() == boot_cpuid)
-#endif
+               cpu = smp_processor_id();
+               if (cpu != boot_cpuid) {
+                       irq_attempt(cpu, RTC_IRQ)++;
+                       kstat.irqs[cpu][RTC_IRQ]++;
+               } else {
                        handle_irq(RTC_IRQ, &regs);
+               }
+         }
+#else
+               handle_irq(RTC_IRQ, &regs);
+#endif
                return;
        case 2:
-               irq_err_count++;
                alpha_mv.machine_check(vector, la_ptr, &regs);
                return;
        case 3:
diff -rup linux/arch/alpha/kernel/setup.c 2.3.99-2-2/arch/alpha/kernel/setup.c
--- linux/arch/alpha/kernel/setup.c     Thu Mar 16 19:30:37 2000
+++ 2.3.99-2-2/arch/alpha/kernel/setup.c        Thu Mar 16 23:00:22 2000
@@ -846,6 +846,22 @@ platform_string(void)
        }
 }
 
+static int
+get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
+{
+       struct percpu_struct *cpu;
+       int i, count = 0;
+
+       for (i = 0; i < num; i++) {
+               cpu = (struct percpu_struct *)
+                       ((char *)cpubase + i*hwrpb->processor_size);
+               if ((cpu->flags & 0x1cc) == 0x1cc)
+                       count++;
+       }
+       return count;
+}
+
+
 /*
  * BUFFER is PAGE_SIZE bytes long.
  */
@@ -865,7 +881,7 @@ int get_cpuinfo(char *buffer)
        char *cpu_name;
        char *systype_name;
        char *sysvariation_name;
-       int len;
+       int len, nr_processors;
 
        cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
        cpu_index = (unsigned) (cpu->type - 1);
@@ -876,6 +892,8 @@ int get_cpuinfo(char *buffer)
        get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
                     &systype_name, &sysvariation_name);
 
+       nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
+
        len = sprintf(buffer,
                      "cpu\t\t\t: Alpha\n"
                      "cpu model\t\t: %s\n"
@@ -894,7 +912,8 @@ int get_cpuinfo(char *buffer)
                      "BogoMIPS\t\t: %lu.%02lu\n"
                      "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
                      "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
-                     "platform string\t\t: %s\n",
+                     "platform string\t\t: %s\n"
+                     "cpus detected\t\t: %d\n",
                       cpu_name, cpu->variation, cpu->revision,
                       (char*)cpu->serial_no,
                       systype_name, sysvariation_name, hwrpb->sys_revision,
@@ -909,7 +928,7 @@ int get_cpuinfo(char *buffer)
                       loops_per_sec / 500000, (loops_per_sec / 5000) % 100,
                       unaligned[0].count, unaligned[0].pc, unaligned[0].va,
                       unaligned[1].count, unaligned[1].pc, unaligned[1].va,
-                      platform_string());
+                      platform_string(), nr_processors);
 
 #ifdef __SMP__
        len += smp_info(buffer+len);
diff -rup linux/arch/alpha/kernel/smp.c 2.3.99-2-2/arch/alpha/kernel/smp.c
--- linux/arch/alpha/kernel/smp.c       Thu Mar 16 19:30:37 2000
+++ 2.3.99-2-2/arch/alpha/kernel/smp.c  Thu Mar 16 23:21:15 2000
@@ -1003,15 +1003,11 @@ flush_icache_page(struct vm_area_struct 
 int
 smp_info(char *buffer)
 {
-       long i;
-       unsigned long sum = 0;
-       for (i = 0; i < NR_CPUS; i++)
-               sum += cpu_data[i].ipi_count;
-
-       return sprintf(buffer, "CPUs probed %d active %d map 0x%lx IPIs %ld\n",
-                      smp_num_probed, smp_num_cpus, cpu_present_mask, sum);
+       return sprintf(buffer,
+                      "cpus active\t\t: %d\n"
+                      "cpu active mask\t\t: %016lx\n",
+                      smp_num_cpus, cpu_present_mask);
 }
-
 
 #if DEBUG_SPINLOCK
 void
diff -rup linux/include/asm-alpha/delay.h 2.3.99-2-2/include/asm-alpha/delay.h
--- linux/include/asm-alpha/delay.h     Thu Mar 16 22:26:31 2000
+++ 2.3.99-2-2/include/asm-alpha/delay.h        Thu Mar 16 23:32:20 2000
@@ -40,7 +40,7 @@ __udelay(unsigned long usecs, unsigned l
 #ifdef __SMP__
 #define udelay(u)  __udelay((u), cpu_data[smp_processor_id()].loops_per_sec)
 #else
-#define udelay(u)  __udelay((u), loops_per_sec);
+#define udelay(u)  __udelay((u), loops_per_sec)
 #endif
 
 #endif /* defined(__ALPHA_DELAY_H) */
diff -rup linux/include/asm-alpha/smp.h 2.3.99-2-2/include/asm-alpha/smp.h
--- linux/include/asm-alpha/smp.h       Thu Mar 16 19:30:38 2000
+++ 2.3.99-2-2/include/asm-alpha/smp.h  Thu Mar 16 23:31:55 2000
@@ -30,7 +30,6 @@ struct cpuinfo_alpha {
        unsigned long pgtable_cache_sz;
        unsigned long ipi_count;
        unsigned long irq_attempt[NR_IRQS];
-       unsigned long smp_local_irq_count;
        unsigned long prof_multiplier;
        unsigned long prof_counter;
        int irq_count, bh_count;

Reply via email to