Updated ppc64 I-pipe patch for 2.6.14. Changes:
* sync with ppc 1.0-07
* send IPI to self fixed
* additional IPI (#4) for xenomai SMP timer implementation
Also at the usual
http://www.cs.helsinki.fi/group/nonsto/rtaippc64.html
Philippe, please put this in Xenomai 2.1.
-- Heikki Lindholm
diff -Nru linux-2.6.14/arch/ppc64/Kconfig
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/Kconfig
--- linux-2.6.14/arch/ppc64/Kconfig 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/Kconfig
2005-11-04 08:56:30.000000000 +0200
@@ -227,6 +227,8 @@
depends on SMP
default "32"
+source "kernel/ipipe/Kconfig"
+
config HMT
bool "Hardware multithreading"
depends on SMP && PPC_PSERIES && BROKEN
diff -Nru linux-2.6.14/arch/ppc64/kernel/entry.S
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/entry.S
--- linux-2.6.14/arch/ppc64/kernel/entry.S 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/entry.S
2005-11-04 10:28:59.000000000 +0200
@@ -35,6 +35,14 @@
#define DO_SOFT_DISABLE
#endif
+#ifdef CONFIG_IPIPE
+#define STALL_ROOT_COND bl __ipipe_stall_root_raw
+#define UNSTALL_ROOT_COND bl __ipipe_unstall_root_raw
+#else /* !CONFIG_IPIPE */
+#define STALL_ROOT_COND
+#define UNSTALL_ROOT_COND
+#endif /* CONFIG_IPIPE */
+
/*
* System calls.
*/
@@ -108,6 +116,23 @@
ori r11,r11,MSR_EE
mtmsrd r11,1
+#ifdef CONFIG_IPIPE
+ addi r3,r1,GPR0
+ bl .__ipipe_syscall_root
+ cmpdi r3,0
+ ld r0,GPR0(r1)
+ ld r3,GPR3(r1)
+ ld r4,GPR4(r1)
+ ld r5,GPR5(r1)
+ ld r6,GPR6(r1)
+ ld r7,GPR7(r1)
+ ld r8,GPR8(r1)
+ ld r9,GPR9(r1)
+ bgt ipipe_end_syscall
+ blt syscall_exit
+ addi r9,r1,STACK_FRAME_OVERHEAD
+#endif /* CONFIG_IPIPE */
+
#ifdef SHOW_SYSCALLS
bl .do_show_syscall
REST_GPR(0,r1)
@@ -196,6 +221,35 @@
rfid
b . /* prevent speculative execution */
+#ifdef CONFIG_IPIPE
+ .globl ipipe_end_syscall
+ipipe_end_syscall:
+ mfmsr r10
+ rldicl r10,r10,48,1
+ rotldi r10,r10,16
+ mtmsrd r10,1
+ ld r5,_CCR(r1)
+ ld r8,_MSR(r1)
+ ld r7,_NIP(r1)
+ stdcx. r0,0,r1 /* to clear pending reservations */
+ andi. r6,r8,MSR_PR
+ ld r4,_LINK(r1)
+ beq- 1f /* only restore r13 if */
+ ld r13,GPR13(r1) /* returning to usermode */
+1: ld r2,GPR2(r1)
+ li r12,MSR_RI
+ mfmsr r10
+ andc r10,r10,r12
+ mtmsrd r10,1 /* clear MSR.RI */
+ ld r1,GPR1(r1)
+ mtlr r4
+ mtcr r5
+ mtspr SRR0,r7
+ mtspr SRR1,r8
+ rfid
+ b . /* prevent speculative execution */
+#endif /* CONFIG_IPIPE */
+
syscall_enosys:
li r3,-ENOSYS
std r3,RESULT(r1)
@@ -470,6 +524,13 @@
rotldi r9,r9,16
mtmsrd r9,1 /* Update machine state */
+#ifdef CONFIG_IPIPE
+ bl .__ipipe_check_root
+ cmpdi r3,0
+ mfmsr r10 /* this is used later, might be messed */
+ beq- restore
+#endif /* CONFIG_IPIPE */
+
#ifdef CONFIG_PREEMPT
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
li r0,_TIF_NEED_RESCHED /* bits to check */
@@ -843,3 +904,11 @@
blr
#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+#ifdef CONFIG_IPIPE
+
+_GLOBAL(__ipipe_ret_from_except_lite)
+ cmpdi r3,0
+ bne+ .ret_from_except_lite
+ b restore
+#endif /* CONFIG_IPIPE */
diff -Nru linux-2.6.14/arch/ppc64/kernel/head.S
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/head.S
--- linux-2.6.14/arch/ppc64/kernel/head.S 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/head.S
2005-11-04 10:36:08.000000000 +0200
@@ -376,6 +376,18 @@
bl hdlr; \
b .ret_from_except_lite
+#ifdef CONFIG_IPIPE
+#define IPIPE_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
+ .align 7; \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ DISABLE_INTS; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b .__ipipe_ret_from_except_lite
+#endif /* CONFIG_IPIPE */
+
/*
* Start of pSeries system interrupt routines
*/
@@ -685,7 +697,11 @@
bl .machine_check_exception
b .ret_from_except
+#ifdef CONFIG_IPIPE
+ IPIPE_EXCEPTION_COMMON_LITE(0x900, decrementer, .__ipipe_grab_timer)
+#else /* !CONFIG_IPIPE */
STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
+#endif /* CONFIG_IPIPE */
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
@@ -815,8 +831,13 @@
hardware_interrupt_entry:
DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_IPIPE
+ bl .__ipipe_grab_irq
+ b .__ipipe_ret_from_except_lite
+#else /* !CONFIG_IPIPE */
bl .do_IRQ
b .ret_from_except_lite
+#endif /* CONFIG_IPIPE */
.align 7
.globl alignment_common
diff -Nru linux-2.6.14/arch/ppc64/kernel/idle.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/idle.c
--- linux-2.6.14/arch/ppc64/kernel/idle.c 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/idle.c
2005-11-04 10:49:01.000000000 +0200
@@ -45,6 +45,7 @@
while (!need_resched() && !cpu_is_offline(cpu)) {
ppc64_runlatch_off();
+ ipipe_suspend_domain();
/*
* Go into low thread priority and possibly
* low power mode.
@@ -74,7 +75,10 @@
ppc64_runlatch_off();
if (!need_resched())
+ {
+ ipipe_suspend_domain();
power4_idle();
+ }
if (need_resched()) {
ppc64_runlatch_on();
diff -Nru linux-2.6.14/arch/ppc64/kernel/ipipe-core.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/ipipe-core.c
--- linux-2.6.14/arch/ppc64/kernel/ipipe-core.c 1970-01-01 02:00:00.000000000
+0200
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/ipipe-core.c
2005-12-06 17:37:06.000000000 +0200
@@ -0,0 +1,634 @@
+/* -*- linux-c -*-
+ * linux/arch/ppc/kernel/ipipe-core.c
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ * Copyright (C) 2004 Wolfgang Grandegger (Adeos/ppc port over 2.4).
+ * Copyright (C) 2005 Heikki Lindholm (64-bit PowerPC adoption).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-dependent I-PIPE core support for PowerPC.
+ */
+
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+/* Current reload value for the decrementer. */
+unsigned long __ipipe_decr_ticks;
+
+/* Next tick date (timebase value). */
+unsigned long long __ipipe_decr_next[IPIPE_NR_CPUS];
+
+struct pt_regs __ipipe_tick_regs[IPIPE_NR_CPUS];
+
+static inline unsigned long ffnz(unsigned long ul)
+{
+ __asm__ __volatile__("cntlzd %0, %1":"=r"(ul):"r"(ul & (-ul)));
+ return 63 - ul;
+}
+
+#ifdef CONFIG_SMP
+
+static cpumask_t __ipipe_cpu_sync_map;
+
+static cpumask_t __ipipe_cpu_lock_map;
+
+static __cacheline_aligned_in_smp ipipe_spinlock_t __ipipe_cpu_barrier =
IPIPE_SPIN_LOCK_UNLOCKED;
+
+static atomic_t __ipipe_critical_count = ATOMIC_INIT(0);
+
+static void (*__ipipe_cpu_sync) (void);
+
+struct ipipe_ipi_struct ipipe_ipi_message[IPIPE_NR_CPUS] __cacheline_aligned;
+
+/* XXX here because kernel/ipipe/core.c sets this for CRITICAL_IPI */
+int __ipipe_ack_system_irq(unsigned irq)
+{
+ return 1;
+}
+
+/* Always called with hw interrupts off. */
+
+void __ipipe_do_critical_sync(unsigned irq)
+{
+ ipipe_declare_cpuid;
+
+ ipipe_load_cpuid();
+
+ cpu_set(cpuid, __ipipe_cpu_sync_map);
+
+ /*
+ * Now we are in sync with the lock requestor running on another
+ * CPU. Enter a spinning wait until he releases the global
+ * lock.
+ */
+ spin_lock_hw(&__ipipe_cpu_barrier);
+
+ /* Got it. Now get out. */
+
+ if (__ipipe_cpu_sync)
+ /* Call the sync routine if any. */
+ __ipipe_cpu_sync();
+
+ DBG("__ipipe_do_critical_sync(%s[%d]): fn:%p irqs:%d\n",
ipipe_current_domain->name, cpuid, __ipipe_cpu_sync,
+ irqs_disabled_hw());
+ spin_unlock_hw(&__ipipe_cpu_barrier);
+
+ cpu_clear(cpuid, __ipipe_cpu_sync_map);
+}
+
+#endif /* CONFIG_SMP */
+
+/*
+ * ipipe_critical_enter() -- Grab the superlock excluding all CPUs
+ * but the current one from a critical section. This lock is used when
+ * we must enforce a global critical section for a single CPU in a
+ * possibly SMP system whichever context the CPUs are running.
+ */
+unsigned long ipipe_critical_enter(void (*syncfn) (void))
+{
+ unsigned long flags;
+
+ local_irq_save_hw(flags);
+
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) { /* We might be running a SMP-kernel on
a UP box... */
+ ipipe_declare_cpuid;
+ cpumask_t lock_map;
+ cpumask_t others;
+
+ ipipe_load_cpuid();
+
+ DBG("ipipe_critical_sync(%s[%d]) fn:%p cnt:%u irqs:%d\n",
+ ipipe_current_domain->name, cpuid,
+ syncfn, __ipipe_critical_count,
+ irqs_disabled_hw());
+
+ if (!cpu_test_and_set(cpuid, __ipipe_cpu_lock_map)) {
+ while (cpu_test_and_set(BITS_PER_LONG - 1,
+ __ipipe_cpu_lock_map)) {
+ int n = 0;
+ do {
+ cpu_relax();
+ } while (++n < cpuid);
+ }
+
+ spin_lock_hw(&__ipipe_cpu_barrier);
+
+ __ipipe_cpu_sync = syncfn;
+
+ /* Send the sync IPI to all processors but the current
one. */
+ cpus_setall(others);
+ cpu_clear(ipipe_processor_id(), others);
+ __ipipe_send_ipi(IPIPE_CRITICAL_IPI, others);
+
+ cpus_andnot(lock_map, cpu_online_map,
+ __ipipe_cpu_lock_map);
+
+ while (!cpus_equal(__ipipe_cpu_sync_map, lock_map))
+ cpu_relax();
+ }
+
+ atomic_inc(&__ipipe_critical_count);
+ }
+#endif /* CONFIG_SMP */
+
+ return flags;
+}
+
+/* ipipe_critical_exit() -- Release the superlock. */
+
+void ipipe_critical_exit(unsigned long flags)
+{
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) { /* We might be running a SMP-kernel on
a UP box... */
+ ipipe_declare_cpuid;
+
+ ipipe_load_cpuid();
+
+ DBG("ipipe_critical_exit(%s[%d]) cnt:%u\n",
+ ipipe_current_domain->name, cpuid,
+ __ipipe_critical_count);
+
+ if (atomic_dec_and_test(&__ipipe_critical_count)) {
+ spin_unlock_hw(&__ipipe_cpu_barrier);
+
+ while (!cpus_empty(__ipipe_cpu_sync_map))
+ cpu_relax();
+
+ cpu_clear(cpuid, __ipipe_cpu_lock_map);
+ cpu_clear(BITS_PER_LONG - 1, __ipipe_cpu_lock_map);
+ }
+ }
+#endif /* CONFIG_SMP */
+
+ local_irq_restore_hw(flags);
+}
+
+void __ipipe_init_platform(void)
+{
+ unsigned virq;
+
+ /*
+ * Allocate a virtual IRQ for the decrementer trap early to
+ * get it mapped to IPIPE_VIRQ_BASE
+ */
+
+ virq = ipipe_alloc_virq();
+ if (virq != IPIPE_TIMER_VIRQ)
+ panic("I-pipe: cannot reserve timer virq #%d (got #%d)",
+ IPIPE_TIMER_VIRQ, virq);
+ virq = ipipe_alloc_virq();
+#ifdef CONFIG_SMP
+ if (virq != IPIPE_CRITICAL_IPI)
+ panic("I-pipe: cannot reserve critical IPI virq #%d (got #%d)",
+ IPIPE_CRITICAL_IPI, virq);
+ virq = ipipe_alloc_virq();
+ if (virq != IPIPE_SERVICE_IPI0)
+ panic("I-pipe: cannot reserve service IPI 0 virq #%d (got #%d)",
+ IPIPE_SERVICE_IPI0, virq);
+ virq = ipipe_alloc_virq();
+ if (virq != IPIPE_SERVICE_IPI1)
+ panic("I-pipe: cannot reserve service IPI 1 virq #%d (got #%d)",
+ IPIPE_SERVICE_IPI1, virq);
+ virq = ipipe_alloc_virq();
+ if (virq != IPIPE_SERVICE_IPI2)
+ panic("I-pipe: cannot reserve service IPI 2 virq #%d (got #%d)",
+ IPIPE_SERVICE_IPI2, virq);
+ virq = ipipe_alloc_virq();
+ if (virq != IPIPE_SERVICE_IPI3)
+ panic("I-pipe: cannot reserve service IPI 3 virq #%d (got #%d)",
+ IPIPE_SERVICE_IPI3, virq);
+ virq = ipipe_alloc_virq();
+ if (virq != IPIPE_SERVICE_IPI4)
+ panic("I-pipe: cannot reserve service IPI 4 virq #%d (got #%d)",
+ IPIPE_SERVICE_IPI4, virq);
+#endif
+ __ipipe_decr_ticks = tb_ticks_per_jiffy;
+}
+
+/*
+ * __ipipe_sync_stage() -- Flush the pending IRQs for the current
+ * domain (and processor). This routine flushes the interrupt log
+ * (see "Optimistic interrupt protection" from D. Stodolsky et al. for
+ * more on the deferred interrupt scheme). Every interrupt that
+ * occurred while the pipeline was stalled gets played. WARNING:
+ * callers on SMP boxen should always check for CPU migration on
+ * return of this routine. One can control the kind of interrupts
+ * which are going to be sync'ed using the syncmask
+ * parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
+ * plays virtual interrupts only. This routine must be called with hw
+ * interrupts off.
+ */
+void __ipipe_sync_stage(unsigned long syncmask)
+{
+ unsigned long mask, submask;
+ struct ipcpudata *cpudata;
+ struct ipipe_domain *ipd;
+ ipipe_declare_cpuid;
+ int level, rank;
+ unsigned irq;
+
+ ipipe_load_cpuid();
+ ipd = ipipe_percpu_domain[cpuid];
+ cpudata = &ipd->cpudata[cpuid];
+
+ if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))
+ return;
+
+ /*
+ * The policy here is to keep the dispatching code interrupt-free
+ * by stalling the current stage. If the upper domain handler
+ * (which we call) wants to re-enable interrupts while in a safe
+ * portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
+ * sigaction()), it will have to unstall (then stall again before
+ * returning to us!) the stage when it sees fit.
+ */
+ while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {
+ level = ffnz(mask);
+ __clear_bit(level, &cpudata->irq_pending_hi);
+
+ while ((submask = cpudata->irq_pending_lo[level]) != 0) {
+ rank = ffnz(submask);
+ irq = (level << IPIPE_IRQ_ISHIFT) + rank;
+
+ if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))
{
+ __clear_bit(rank,
+ &cpudata->irq_pending_lo[level]);
+ continue;
+ }
+
+ if (--cpudata->irq_hits[irq] == 0) {
+ __clear_bit(rank,
+ &cpudata->irq_pending_lo[level]);
+ ipipe_mark_irq_delivery(ipd,irq,cpuid);
+ }
+
+ __set_bit(IPIPE_STALL_FLAG, &cpudata->status);
+ ipipe_mark_domain_stall(ipd, cpuid);
+
+ if (ipd == ipipe_root_domain) {
+ /*
+ * Linux handlers are called w/ hw
+ * interrupts on so that they could
+ * not defer interrupts for higher
+ * priority domains.
+ */
+ local_irq_enable_hw();
+ ((void (*)(unsigned, struct pt_regs *))
+ ipd->irqs[irq].handler) (irq,
__ipipe_tick_regs + cpuid);
+ local_irq_disable_hw();
+ } else {
+ __clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
+ ipd->irqs[irq].handler(irq);
+ __set_bit(IPIPE_SYNC_FLAG, &cpudata->status);
+ }
+#ifdef CONFIG_SMP
+ {
+ int _cpuid = ipipe_processor_id();
+
+ if (_cpuid != cpuid) { /* Handle CPU
migration. */
+ /*
+ * We expect any domain to clear the
SYNC bit each
+ * time it switches in a new task, so
that preemptions
+ * and/or CPU migrations (in the SMP
case) over the
+ * ISR do not lock out the log syncer
for some
+ * indefinite amount of time. In the
Linux case,
+ * schedule() handles this (see
kernel/sched.c). For
+ * this reason, we don't bother
clearing it here for
+ * the source CPU in the migration
handling case,
+ * since it must have scheduled another
task in by
+ * now.
+ */
+ cpuid = _cpuid;
+ cpudata = &ipd->cpudata[cpuid];
+ __set_bit(IPIPE_SYNC_FLAG,
&cpudata->status);
+ }
+ }
+#endif /* CONFIG_SMP */
+
+ __clear_bit(IPIPE_STALL_FLAG, &cpudata->status);
+ ipipe_mark_domain_unstall(ipd, cpuid);
+ }
+ }
+
+ __clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
+}
+
+#ifdef CONFIG_SMP
+cpumask_t __ipipe_set_irq_affinity(unsigned irq, cpumask_t cpumask)
+{
+ cpumask_t oldmask = irq_affinity[irq];
+ irq_desc_t *desc = get_irq_desc(irq);
+
+ if (desc->handler == NULL || desc->handler->set_affinity == NULL)
+ return CPU_MASK_NONE;
+
+ if (cpus_empty(cpumask))
+ return oldmask; /* Return mask value -- no change. */
+
+ cpus_and(cpumask, cpumask, cpu_online_map);
+
+ if (cpus_empty(cpumask))
+ return CPU_MASK_NONE; /* Error -- bad mask value or
non-routable IRQ. */
+
+ irq_affinity[irq] = cpumask;
+ irq_desc->handler->set_affinity(irq, cpumask);
+
+ return oldmask;
+}
+
+int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)
+{
+ extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
+ unsigned long flags;
+ ipipe_declare_cpuid;
+ int i;
+
+ ipipe_lock_cpu(flags);
+
+ ipi -= IPIPE_MSG_IPI_OFFSET;
+ for_each_online_cpu(i)
+ {
+ if (cpu_isset(i, cpumask))
+ set_bit(ipi, &ipipe_ipi_message[i].value);
+ }
+ mb();
+
+ if (!cpus_empty(cpumask))
+#ifdef CONFIG_MPIC
+ mpic_send_ipi(0x2, cpus_addr(cpumask)[0]);
+#else
+#error "We have only MPIC support here!"
+#endif
+ ipipe_unlock_cpu(flags);
+
+ return 0;
+}
+
+#endif /* CONFIG_SMP */
+
+/*
+ * ipipe_virtualize_irq() -- Attach a handler (and optionally a hw
+ * acknowledge routine) to an interrupt for the given domain.
+ */
+int ipipe_virtualize_irq(struct ipipe_domain *ipd,
+ unsigned irq,
+ void (*handler) (unsigned irq),
+ int (*acknowledge) (unsigned irq), unsigned modemask)
+{
+ unsigned long flags;
+ int err;
+
+ if (irq >= IPIPE_NR_IRQS)
+ return -EINVAL;
+
+ if (ipd->irqs[irq].control & IPIPE_SYSTEM_MASK)
+ return -EPERM;
+
+ spin_lock_irqsave_hw(&__ipipe_pipelock, flags);
+
+ if (handler != NULL) {
+ /*
+ * A bit of hack here: if we are re-virtualizing an IRQ just
+ * to change the acknowledge routine by passing the special
+ * IPIPE_SAME_HANDLER value, then allow to recycle the current
+ * handler for the IRQ. This allows Linux device drivers
+ * managing shared IRQ lines to call ipipe_virtualize_irq() in
+ * addition to request_irq() just for the purpose of
+ * interposing their own shared acknowledge routine.
+ */
+
+ if (handler == IPIPE_SAME_HANDLER) {
+ handler = ipd->irqs[irq].handler;
+
+ if (handler == NULL) {
+ err = -EINVAL;
+ goto unlock_and_exit;
+ }
+ } else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 &&
+ ipd->irqs[irq].handler != NULL) {
+ err = -EBUSY;
+ goto unlock_and_exit;
+ }
+
+ if ((modemask & (IPIPE_SHARED_MASK | IPIPE_PASS_MASK)) ==
+ IPIPE_SHARED_MASK) {
+ err = -EINVAL;
+ goto unlock_and_exit;
+ }
+
+ if ((modemask & IPIPE_STICKY_MASK) != 0)
+ modemask |= IPIPE_HANDLE_MASK;
+ } else
+ modemask &=
+ ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK |
+ IPIPE_SHARED_MASK);
+
+ if (acknowledge == NULL) {
+ if ((modemask & IPIPE_SHARED_MASK) == 0)
+ /*
+ * Acknowledge handler unspecified -- this is ok in
+ * non-shared management mode, but we will force the
+ * use of the Linux-defined handler instead.
+ */
+ acknowledge = ipipe_root_domain->irqs[irq].acknowledge;
+ else {
+ /*
+ * A valid acknowledge handler to be called in shared
+ * mode is required when declaring a shared IRQ.
+ */
+ err = -EINVAL;
+ goto unlock_and_exit;
+ }
+ }
+
+ ipd->irqs[irq].handler = handler;
+ ipd->irqs[irq].acknowledge = acknowledge;
+ ipd->irqs[irq].control = modemask;
+
+ if (irq < NR_IRQS &&
+ handler != NULL &&
+ !ipipe_virtual_irq_p(irq) && (modemask & IPIPE_ENABLE_MASK) != 0) {
+ if (ipd != ipipe_current_domain) {
+ /*
+ * IRQ enable/disable state is domain-sensitive, so
+ * we may not change it for another domain. What is
+ * allowed however is forcing some domain to handle
+ * an interrupt source, by passing the proper 'ipd'
+ * descriptor which thus may be different from
+ * ipipe_current_domain.
+ */
+ err = -EPERM;
+ goto unlock_and_exit;
+ }
+
+ enable_irq(irq);
+ }
+
+ err = 0;
+
+unlock_and_exit:
+
+ spin_unlock_irqrestore_hw(&__ipipe_pipelock, flags);
+
+ return err;
+}
+
+/* ipipe_control_irq() -- Change modes of a pipelined interrupt for
+ * the current domain. */
+
+int ipipe_control_irq(unsigned irq, unsigned clrmask, unsigned setmask)
+{
+ irq_desc_t *desc;
+ unsigned long flags;
+
+ if (irq >= IPIPE_NR_IRQS)
+ return -EINVAL;
+
+ if (ipipe_current_domain->irqs[irq].control & IPIPE_SYSTEM_MASK)
+ return -EPERM;
+
+ if (((setmask | clrmask) & IPIPE_SHARED_MASK) != 0)
+ return -EINVAL;
+
+ desc = irq_desc + irq;
+
+ if (ipipe_current_domain->irqs[irq].handler == NULL)
+ setmask &= ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK);
+
+ if ((setmask & IPIPE_STICKY_MASK) != 0)
+ setmask |= IPIPE_HANDLE_MASK;
+
+ if ((clrmask & (IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK)) != 0) /* If
one goes, both go. */
+ clrmask |= (IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK);
+
+ spin_lock_irqsave_hw(&__ipipe_pipelock, flags);
+
+ ipipe_current_domain->irqs[irq].control &= ~clrmask;
+ ipipe_current_domain->irqs[irq].control |= setmask;
+
+ if ((setmask & IPIPE_ENABLE_MASK) != 0)
+ enable_irq(irq);
+ else if ((clrmask & IPIPE_ENABLE_MASK) != 0)
+ disable_irq(irq);
+
+ spin_unlock_irqrestore_hw(&__ipipe_pipelock, flags);
+
+ return 0;
+}
+
+int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
+{
+ info->ncpus = num_online_cpus();
+ info->cpufreq = ipipe_cpu_freq();
+ info->archdep.tmirq = IPIPE_TIMER_VIRQ;
+ info->archdep.tmfreq = info->cpufreq;
+
+ return 0;
+}
+
+/*
+ * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
+ * just like if it has been actually received from a hw source. Also
+ * works for virtual interrupts.
+ */
+int ipipe_trigger_irq(unsigned irq)
+{
+ unsigned long flags;
+
+ if (irq >= IPIPE_NR_IRQS ||
+ (ipipe_virtual_irq_p(irq)
+ && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
+ return -EINVAL;
+
+ local_irq_save_hw(flags);
+
+ __ipipe_handle_irq(irq, NULL);
+
+ local_irq_restore_hw(flags);
+
+ return 1;
+}
+
+static void __ipipe_set_decr(void)
+{
+ ipipe_declare_cpuid;
+
+ ipipe_load_cpuid();
+
+ disarm_decr[cpuid] = (__ipipe_decr_ticks != tb_ticks_per_jiffy);
+#ifdef CONFIG_40x
+ /* Enable and set auto-reload. */
+ mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE);
+ mtspr(SPRN_PIT, __ipipe_decr_ticks);
+#else /* !CONFIG_40x */
+ __ipipe_decr_next[cpuid] = __ipipe_read_timebase() + __ipipe_decr_ticks;
+ set_dec(__ipipe_decr_ticks);
+#endif /* CONFIG_40x */
+}
+
+int ipipe_tune_timer(unsigned long ns, int flags)
+{
+ unsigned long x, ticks;
+
+ if (flags & IPIPE_RESET_TIMER)
+ ticks = tb_ticks_per_jiffy;
+ else {
+ ticks = ns * tb_ticks_per_jiffy / (1000000000 / HZ);
+
+ if (ticks > tb_ticks_per_jiffy)
+ return -EINVAL;
+ }
+
+ x = ipipe_critical_enter(&__ipipe_set_decr); /* Sync with all CPUs */
+ __ipipe_decr_ticks = ticks;
+ __ipipe_set_decr();
+ ipipe_critical_exit(x);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(__ipipe_sync_stage);
+EXPORT_SYMBOL(__ipipe_decr_ticks);
+EXPORT_SYMBOL(__ipipe_decr_next);
+EXPORT_SYMBOL(ipipe_critical_enter);
+EXPORT_SYMBOL(ipipe_critical_exit);
+EXPORT_SYMBOL(ipipe_trigger_irq);
+EXPORT_SYMBOL(ipipe_virtualize_irq);
+EXPORT_SYMBOL(ipipe_control_irq);
+EXPORT_SYMBOL(ipipe_get_sysinfo);
+EXPORT_SYMBOL(ipipe_tune_timer);
diff -Nru linux-2.6.14/arch/ppc64/kernel/ipipe-root.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/ipipe-root.c
--- linux-2.6.14/arch/ppc64/kernel/ipipe-root.c 1970-01-01 02:00:00.000000000
+0200
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/ipipe-root.c
2005-12-04 12:40:59.000000000 +0200
@@ -0,0 +1,613 @@
+/* -*- linux-c -*-
+ * linux/arch/ppc/kernel/ipipe-root.c
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum (Adeos/ppc port over 2.6).
+ * Copyright (C) 2004 Wolfgang Grandegger (Adeos/ppc port over 2.4).
+ * Copyright (C) 2005 Heikki Lindholm (64-bit PowerPC adoption).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-dependent I-pipe support for PowerPC.
+ */
+
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/time.h>
+#include <asm/mmu_context.h>
+#include <asm/machdep.h>
+#include <asm/processor.h>
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+static void __ipipe_null_handler(unsigned irq)
+{
+ /* Nop. */
+ DBG("__ipipe_null_handler called.\n");
+}
+
+extern irq_desc_t irq_desc[];
+
+#ifdef CONFIG_SMP
+unsigned int __ipipe_ipi_irq = NR_IRQS+1; /* dummy value */
+
+extern struct ipipe_ipi_struct ipipe_ipi_message[];
+
+void __ipipe_register_ipi(unsigned int irq);
+irqreturn_t __ipipe_ipi_action(int irq, void *dev_id, struct pt_regs *regs);
+#endif
+
+static struct hw_interrupt_type __ipipe_std_irq_dtype[NR_IRQS];
+
+static void __ipipe_override_irq_enable(unsigned irq)
+{
+ unsigned long flags;
+
+ local_irq_save_hw(flags);
+ ipipe_irq_unlock(irq);
+ __ipipe_std_irq_dtype[irq].enable(irq);
+ local_irq_restore_hw(flags);
+}
+
+static void __ipipe_override_irq_disable(unsigned irq)
+{
+ unsigned long flags;
+
+ local_irq_save_hw(flags);
+ ipipe_irq_lock(irq);
+ __ipipe_std_irq_dtype[irq].disable(irq);
+ local_irq_restore_hw(flags);
+}
+
+static void __ipipe_override_irq_end(unsigned irq)
+{
+ unsigned long flags;
+
+ local_irq_save_hw(flags);
+
+ if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
+ ipipe_irq_unlock(irq);
+
+ __ipipe_std_irq_dtype[irq].end(irq);
+
+ local_irq_restore_hw(flags);
+}
+
+static void __ipipe_override_irq_affinity(unsigned irq, cpumask_t mask)
+{
+ unsigned long flags;
+
+ local_irq_save_hw(flags);
+ __ipipe_std_irq_dtype[irq].set_affinity(irq, mask);
+ local_irq_restore_hw(flags);
+}
+
+static void __ipipe_enable_sync(void)
+{
+ __ipipe_decr_next[ipipe_processor_id()] =
+ __ipipe_read_timebase() + get_dec();
+}
+
+/*
+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
+ * interrupts are off, and secondary CPUs are still lost in space.
+ */
+void __ipipe_enable_pipeline(void)
+{
+ unsigned long flags;
+ unsigned irq;
+
+ flags = ipipe_critical_enter(&__ipipe_enable_sync);
+
+ /* First, virtualize all interrupts from the root domain. */
+
+ for (irq = 0; irq < NR_IRQS; irq++)
+ ipipe_virtualize_irq(ipipe_root_domain,
+ irq,
+ (void (*)(unsigned))&__ipipe_do_IRQ,
+ &__ipipe_ack_irq,
+ IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+
+ /*
+ * We use a virtual IRQ to handle the timer irq (decrementer trap)
+ * which has been allocated early in __ipipe_init_platform().
+ */
+
+ ipipe_virtualize_irq(ipipe_root_domain,
+ IPIPE_TIMER_VIRQ,
+ (void (*)(unsigned))&__ipipe_do_timer,
+ NULL, IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+
+ /*
+ * Virtual IPIs
+ */
+#ifdef CONFIG_SMP
+ ipipe_virtualize_irq(ipipe_root_domain,
+ IPIPE_SERVICE_IPI0,
+ (void (*)(unsigned))&__ipipe_null_handler,
+ NULL,
+ IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+ ipipe_virtualize_irq(ipipe_root_domain,
+ IPIPE_SERVICE_IPI1,
+ (void (*)(unsigned))&__ipipe_null_handler,
+ NULL,
+ IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+ ipipe_virtualize_irq(ipipe_root_domain,
+ IPIPE_SERVICE_IPI2,
+ (void (*)(unsigned))&__ipipe_null_handler,
+ NULL,
+ IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+ ipipe_virtualize_irq(ipipe_root_domain,
+ IPIPE_SERVICE_IPI3,
+ (void (*)(unsigned))&__ipipe_null_handler,
+ NULL,
+ IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+ ipipe_virtualize_irq(ipipe_root_domain,
+ IPIPE_SERVICE_IPI4,
+ (void (*)(unsigned))&__ipipe_null_handler,
+ NULL,
+ IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+#endif /* CONFIG_SMP */
+ /*
+ * Interpose on the IRQ control routines so we can make them
+ * atomic using hw masking and prevent the interrupt log from
+ * being untimely flushed.
+ */
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ if (irq_desc[irq].handler != NULL)
+ __ipipe_std_irq_dtype[irq] = *irq_desc[irq].handler;
+ }
+
+ /*
+ * The original controller structs are often shared, so we first
+ * save them all before changing any of them. Notice that we don't
+ * override the ack() handler since we will enforce the necessary
+ * setup in __ipipe_ack_irq().
+ */
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ struct hw_interrupt_type *handler = irq_desc[irq].handler;
+
+ if (handler == NULL)
+ continue;
+
+ if (handler->enable != NULL)
+ handler->enable = &__ipipe_override_irq_enable;
+
+ if (handler->disable != NULL)
+ handler->disable = &__ipipe_override_irq_disable;
+
+ if (handler->end != NULL)
+ handler->end = &__ipipe_override_irq_end;
+
+ if (handler->set_affinity != NULL)
+ handler->set_affinity = &__ipipe_override_irq_affinity;
+ }
+
+ __ipipe_decr_next[ipipe_processor_id()] =
+ __ipipe_read_timebase() + get_dec();
+
+ ipipe_critical_exit(flags);
+}
+
+int __ipipe_ack_irq(unsigned irq)
+{
+ irq_desc_t *desc = get_irq_desc(irq);
+ unsigned long flags;
+ ipipe_declare_cpuid;
+
+ if (desc->handler->ack == NULL)
+ return 1;
+
+ /*
+ * No need to mask IRQs at hw level: we are always called from
+ * __ipipe_handle_irq(), so interrupts are already off. We
+ * stall the pipeline so that spin_lock_irq*() ops won't
+ * unintentionally flush it, since this could cause infinite
+ * recursion.
+ */
+
+ ipipe_load_cpuid();
+ flags = ipipe_test_and_stall_pipeline();
+ preempt_disable();
+ spin_lock_hw(&desc->lock);
+ desc->handler->ack(irq);
+ spin_unlock_hw(&desc->lock);
+ preempt_enable_no_resched();
+ ipipe_restore_pipeline_nosync(ipipe_percpu_domain[cpuid], flags, cpuid);
+
+ return 1;
+}
+
+/*
+ * __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must
+ * be called with local hw interrupts disabled.
+ */
+static inline void __ipipe_walk_pipeline(struct list_head *pos, int cpuid)
+{
+ struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];
+
+ while (pos != &__ipipe_pipeline) {
+ struct ipipe_domain *next_domain =
+ list_entry(pos, struct ipipe_domain, p_link);
+
+ if (test_bit(IPIPE_STALL_FLAG,
+ &next_domain->cpudata[cpuid].status))
+ break; /* Stalled stage -- do not go further. */
+
+ if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {
+
+ if (next_domain == this_domain)
+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
+ else {
+ __ipipe_switch_to(this_domain, next_domain,
cpuid);
+
+ ipipe_load_cpuid(); /* Processor might have
changed. */
+
+ if (this_domain->cpudata[cpuid].irq_pending_hi
!= 0
+ && !test_bit(IPIPE_STALL_FLAG,
+
&this_domain->cpudata[cpuid].status))
+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
+ }
+
+ break;
+ } else if (next_domain == this_domain)
+ break;
+
+ pos = next_domain->p_link.next;
+ }
+}
+
+/*
+ * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
+ * interrupt protection log is maintained here for each domain. Hw
+ * interrupts are off on entry.
+ */
+void __ipipe_handle_irq(int irq, struct pt_regs *regs)
+{
+ struct ipipe_domain *this_domain;
+ struct list_head *head, *pos;
+ ipipe_declare_cpuid;
+ int m_ack, s_ack;
+
+ m_ack = (regs == NULL); /* Software-triggered IRQs do not need
+ * any ack. */
+ if (irq >= IPIPE_NR_IRQS) {
+ printk(KERN_ERR "I-pipe: spurious interrupt %d\n", irq);
+ return;
+ }
+
+ ipipe_load_cpuid();
+
+ this_domain = ipipe_percpu_domain[cpuid];
+
+ s_ack = m_ack;
+
+ if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
+ head = &this_domain->p_link;
+ else
+ head = __ipipe_pipeline.next;
+
+ /* Ack the interrupt. */
+
+ pos = head;
+
+ while (pos != &__ipipe_pipeline) {
+ struct ipipe_domain *next_domain =
+ list_entry(pos, struct ipipe_domain, p_link);
+
+ /*
+ * For each domain handling the incoming IRQ, mark it as
+ * pending in its log.
+ */
+ if (test_bit(IPIPE_HANDLE_FLAG,
+ &next_domain->irqs[irq].control)) {
+ /*
+ * Domains that handle this IRQ are polled for
+ * acknowledging it by decreasing priority order. The
+ * interrupt must be made pending _first_ in the
+ * domain's status flags before the PIC is unlocked.
+ */
+
+ next_domain->cpudata[cpuid].irq_hits[irq]++;
+ __ipipe_set_irq_bit(next_domain, cpuid, irq);
+ ipipe_mark_irq_receipt(next_domain, irq, cpuid);
+
+ /*
+ * Always get the first master acknowledge available.
+ * Once we've got it, allow slave acknowledge
+ * handlers to run (until one of them stops us).
+ */
+ if (next_domain->irqs[irq].acknowledge != NULL) {
+ if (!m_ack)
+ m_ack =
next_domain->irqs[irq].acknowledge(irq);
+ else if (test_bit
+ (IPIPE_SHARED_FLAG,
+ &next_domain->irqs[irq].control) &&
!s_ack)
+ s_ack =
next_domain->irqs[irq].acknowledge(irq);
+ }
+ }
+
+ /*
+ * If the domain does not want the IRQ to be passed down the
+ * interrupt pipe, exit the loop now.
+ */
+
+ if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
+ break;
+
+ pos = next_domain->p_link.next;
+ }
+
+ /*
+ * Now walk the pipeline, yielding control to the highest
+ * priority domain that has pending interrupt(s) or
+ * immediately to the current domain if the interrupt has been
+ * marked as 'sticky'. This search does not go beyond the
+ * current domain in the pipeline.
+ */
+
+ __ipipe_walk_pipeline(head, cpuid);
+}
+
+asmlinkage int __ipipe_grab_irq(struct pt_regs *regs)
+{
+ extern int ppc_spurious_interrupts;
+ ipipe_declare_cpuid;
+ int irq;
+
+ irq = ppc_md.get_irq(regs);
+ /* handle I-pipe IPIs */
+ if (irq >= 0) {
+#ifdef CONFIG_SMP
+ /* check for cascaded I-pipe IPIs */
+ if (irq == __ipipe_ipi_irq) {
+ irq_desc_t *desc = get_irq_desc(irq);
+
+ if (desc->handler && desc->handler->ack)
+ desc->handler->ack(irq);
+ __ipipe_ipi_action(irq,NULL,regs);
+ desc->handler->end(irq);
+ }
+ else
+#endif /* CONFIG_SMP */
+ __ipipe_handle_irq(irq, regs);
+ }
+ else
+ ppc_spurious_interrupts++;
+
+ ipipe_load_cpuid();
+
+ return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
+ !test_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_domain->cpudata[cpuid].status));
+}
+
+void __ipipe_do_IRQ(int irq, struct pt_regs *regs)
+{
+ irq_enter();
+ ppc_irq_dispatch_handler(regs, irq);
+ irq_exit();
+}
+
+asmlinkage int __ipipe_grab_timer(struct pt_regs *regs)
+{
+ ipipe_declare_cpuid;
+
+ ipipe_load_cpuid();
+ /* On 970 CPUs DEC cannot be disabled, and without setting DEC
+ * here, DEC interrupt would be triggered as soon as interrupts
+ * are enabled in __ipipe_sync_stage
+ */
+ set_dec(0x7fffffff);
+
+ __ipipe_tick_regs[cpuid].msr = regs->msr; /* for do_timer() */
+
+ __ipipe_handle_irq(IPIPE_TIMER_VIRQ, regs);
+
+ ipipe_load_cpuid();
+
+#ifndef CONFIG_40x
+ if (__ipipe_decr_ticks != tb_ticks_per_jiffy) {
+ unsigned long long next_date, now;
+
+ next_date = __ipipe_decr_next[cpuid];
+
+ while ((now = __ipipe_read_timebase()) >= next_date)
+ next_date += __ipipe_decr_ticks;
+
+ set_dec(next_date - now);
+
+ __ipipe_decr_next[cpuid] = next_date;
+ }
+#endif /* !CONFIG_40x */
+
+ return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
+ !test_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_domain->cpudata[cpuid].status));
+}
+
+void __ipipe_do_timer(int irq, struct pt_regs *regs)
+{
+ timer_interrupt(regs);
+}
+
+/* IPI stuff */
+#ifdef CONFIG_SMP
+void __ipipe_register_ipi(unsigned int irq) {
+ DBG("__ipipe_register_IPI: %u\n", irq);
+ __ipipe_ipi_irq = irq;
+ mb();
+}
+
+irqreturn_t
+__ipipe_ipi_action(int irq, void *dev_id, struct pt_regs *regs) {
+ ipipe_declare_cpuid;
+
+ ipipe_load_cpuid();
+
+ DBG("__ipipe_ipi_action: irq=%d dev_id=%p cpu=%u\n",
+ irq, dev_id, cpuid);
+ while (ipipe_ipi_message[cpuid].value & IPIPE_MSG_IPI_MASK) {
+ if (test_and_clear_bit(IPIPE_MSG_CRITICAL_IPI,
+ &ipipe_ipi_message[cpuid].value)) {
+ mb();
+ __ipipe_handle_irq(IPIPE_CRITICAL_IPI, regs);
+ }
+ if (test_and_clear_bit(IPIPE_MSG_SERVICE_IPI0,
+ &ipipe_ipi_message[cpuid].value)) {
+ mb();
+ __ipipe_handle_irq(IPIPE_SERVICE_IPI0, regs);
+ }
+ if (test_and_clear_bit(IPIPE_MSG_SERVICE_IPI1,
+ &ipipe_ipi_message[cpuid].value)) {
+ mb();
+ __ipipe_handle_irq(IPIPE_SERVICE_IPI1, regs);
+ }
+ if (test_and_clear_bit(IPIPE_MSG_SERVICE_IPI2,
+ &ipipe_ipi_message[cpuid].value)) {
+ mb();
+ __ipipe_handle_irq(IPIPE_SERVICE_IPI2, regs);
+ }
+ if (test_and_clear_bit(IPIPE_MSG_SERVICE_IPI3,
+ &ipipe_ipi_message[cpuid].value)) {
+ mb();
+ __ipipe_handle_irq(IPIPE_SERVICE_IPI3, regs);
+ }
+ if (test_and_clear_bit(IPIPE_MSG_SERVICE_IPI4,
+ &ipipe_ipi_message[cpuid].value)) {
+ mb();
+ __ipipe_handle_irq(IPIPE_SERVICE_IPI4, regs);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_SMP */
+
+asmlinkage int __ipipe_check_root(struct pt_regs *regs)
+{
+ ipipe_declare_cpuid;
+ /*
+ * This routine is called with hw interrupts off, so no migration
+ * can occur while checking the identity of the current domain.
+ */
+ ipipe_load_cpuid();
+ return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
+ !test_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_domain->cpudata[cpuid].status));
+}
+
+asmlinkage void __ipipe_stall_root_raw(void)
+{
+ ipipe_declare_cpuid;
+
+ ipipe_load_cpuid(); /* hw IRQs are off on entry. */
+
+ __set_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_domain->cpudata[cpuid].status);
+
+ ipipe_mark_domain_stall(ipipe_root_domain, cpuid);
+
+ local_irq_enable_hw();
+}
+
+asmlinkage void __ipipe_unstall_root_raw(void)
+{
+ ipipe_declare_cpuid;
+
+ local_irq_disable_hw();
+
+ ipipe_load_cpuid();
+
+ __clear_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_domain->cpudata[cpuid].status);
+
+ ipipe_mark_domain_unstall(ipipe_root_domain, cpuid);
+}
+
+int __ipipe_syscall_root(struct pt_regs *regs)
+{
+ ipipe_declare_cpuid;
+ unsigned long flags;
+
+ /*
+ * This routine either returns:
+ * 0 -- if the syscall is to be passed to Linux;
+ * >0 -- if the syscall should not be passed to Linux, and no
+ * tail work should be performed;
+ * <0 -- if the syscall should not be passed to Linux but the
+ * tail work has to be performed (for handling signals etc).
+ */
+
+ if (__ipipe_event_monitors[IPIPE_EVENT_SYSCALL] > 0 &&
+ __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,regs) > 0) {
+ /*
+ * We might enter here over a non-root domain and exit
+ * over the root one as a result of the syscall
+ * (i.e. by recycling the register set of the current
+ * context across the migration), so we need to fixup
+ * the interrupt flag upon return too, so that
+ * __ipipe_unstall_iret_root() resets the correct
+ * stall bit on exit.
+ */
+ if (ipipe_current_domain == ipipe_root_domain && !in_atomic()) {
+ /*
+ * Sync pending VIRQs before _TIF_NEED_RESCHED
+ * is tested.
+ */
+ ipipe_lock_cpu(flags);
+ if ((ipipe_root_domain->cpudata[cpuid].irq_pending_hi &
IPIPE_IRQMASK_VIRT) != 0)
+ __ipipe_sync_stage(IPIPE_IRQMASK_VIRT);
+ ipipe_unlock_cpu(flags);
+ return -1;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(__switch_to);
+EXPORT_SYMBOL(show_stack);
+EXPORT_SYMBOL(_switch);
+EXPORT_SYMBOL(switch_slb);
+EXPORT_SYMBOL(switch_stab);
+EXPORT_SYMBOL(__flush_tlb_pending);
+EXPORT_PER_CPU_SYMBOL(ppc64_tlb_batch);
+#ifndef CONFIG_SMP
+EXPORT_SYMBOL(last_task_used_math);
+#endif
+EXPORT_SYMBOL(disarm_decr);
+EXPORT_SYMBOL(tb_ticks_per_jiffy);
+
+
diff -Nru linux-2.6.14/arch/ppc64/kernel/irq.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/irq.c
--- linux-2.6.14/arch/ppc64/kernel/irq.c 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/irq.c
2005-11-04 10:51:48.000000000 +0200
@@ -165,14 +165,18 @@
if (desc->status & IRQ_PER_CPU) {
/* no locking required for CPU-local interrupts: */
+#ifndef CONFIG_IPIPE
ack_irq(irq);
+#endif /* CONFIG_IPIPE */
action_ret = handle_IRQ_event(irq, regs, desc->action);
desc->handler->end(irq);
return;
}
spin_lock(&desc->lock);
+#ifndef CONFIG_IPIPE
ack_irq(irq);
+#endif /* CONFIG_IPIPE */
/*
REPLAY is when Linux resends an IRQ that was dropped earlier
WAITING is used by probe to mark irqs that are being tested
diff -Nru linux-2.6.14/arch/ppc64/kernel/Makefile
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/Makefile
--- linux-2.6.14/arch/ppc64/kernel/Makefile 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/Makefile
2005-11-04 08:57:36.000000000 +0200
@@ -75,6 +75,7 @@
endif
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
+obj-$(CONFIG_IPIPE) += ipipe-core.o ipipe-root.o
obj-$(CONFIG_KPROBES) += kprobes.o
CFLAGS_ioctl32.o += -Ifs/
diff -Nru linux-2.6.14/arch/ppc64/kernel/mpic.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/mpic.c
--- linux-2.6.14/arch/ppc64/kernel/mpic.c 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/mpic.c
2005-11-12 10:27:32.000000000 +0200
@@ -44,7 +44,6 @@
static struct mpic *mpic_primary;
static DEFINE_SPINLOCK(mpic_lock);
-
/*
* Register accessor functions
*/
@@ -867,6 +866,11 @@
#ifdef CONFIG_SMP
void mpic_request_ipis(void)
{
+#ifdef CONFIG_IPIPE
+ extern void __ipipe_register_ipi(unsigned int irq);
+ extern irqreturn_t __ipipe_ipi_action(int irq,
+ void *dev_id, struct pt_regs *regs);
+#endif
struct mpic *mpic = mpic_primary;
BUG_ON(mpic == NULL);
@@ -878,8 +882,14 @@
"IPI0 (call function)", mpic);
request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT,
"IPI1 (reschedule)", mpic);
+#ifdef CONFIG_IPIPE
+ request_irq(mpic->ipi_offset+2, __ipipe_ipi_action, SA_INTERRUPT,
+ "IPI2 (I-pipe one-for-all)", mpic);
+ __ipipe_register_ipi(mpic->ipi_offset+2);
+#else
request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT,
"IPI2 (unused)", mpic);
+#endif /* CONFIG_IPIPE */
request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT,
"IPI3 (debugger break)", mpic);
diff -Nru linux-2.6.14/arch/ppc64/kernel/time.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/time.c
--- linux-2.6.14/arch/ppc64/kernel/time.c 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/time.c
2005-11-05 10:36:42.000000000 +0200
@@ -72,6 +72,9 @@
EXPORT_SYMBOL(jiffies_64);
+#ifdef CONFIG_IPIPE
+unsigned long disarm_decr[NR_CPUS];
+#endif /* CONFIG_IPIPE */
/* keep track of when we need to update the rtc */
time_t last_rtc_update;
extern int piranha_simulator;
@@ -363,6 +366,9 @@
next_dec = lpaca->next_jiffy_update_tb - cur_tb;
if (next_dec > lpaca->default_decr)
next_dec = lpaca->default_decr;
+#ifdef CONFIG_IPIPE
+ if (!disarm_decr[smp_processor_id()])
+#endif /* CONFIG_IPIPE */
set_dec(next_dec);
#ifdef CONFIG_PPC_ISERIES
diff -Nru linux-2.6.14/arch/ppc64/kernel/traps.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/traps.c
--- linux-2.6.14/arch/ppc64/kernel/traps.c 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/kernel/traps.c
2005-11-04 21:06:22.000000000 +0200
@@ -174,6 +174,9 @@
if (ppc_md.system_reset_exception)
ppc_md.system_reset_exception(regs);
+ if (ipipe_trap_notify(IPIPE_TRAP_SYSRESET, regs))
+ return;
+
die("System Reset", regs, 0);
/* Must die if the interrupt is not recoverable */
@@ -191,6 +194,9 @@
if (ppc_md.machine_check_exception)
recover = ppc_md.machine_check_exception(regs);
+ if (ipipe_trap_notify(IPIPE_TRAP_MCE, regs))
+ return;
+
if (recover)
return;
@@ -205,6 +211,9 @@
void unknown_exception(struct pt_regs *regs)
{
+ if (ipipe_trap_notify(IPIPE_TRAP_UNKNOWN, regs))
+ return;
+
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
@@ -213,6 +222,9 @@
void instruction_breakpoint_exception(struct pt_regs *regs)
{
+ if (ipipe_trap_notify(IPIPE_TRAP_IABR, regs))
+ return;
+
if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
return;
@@ -225,6 +237,9 @@
{
regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
+ if (ipipe_trap_notify(IPIPE_TRAP_SSTEP, regs))
+ return;
+
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
return;
@@ -401,6 +416,9 @@
void __kprobes program_check_exception(struct pt_regs *regs)
{
+ if (ipipe_trap_notify(IPIPE_TRAP_PCE, regs))
+ return;
+
if (debugger_fault_handler(regs))
return;
@@ -448,6 +466,9 @@
void kernel_fp_unavailable_exception(struct pt_regs *regs)
{
+ if (ipipe_trap_notify(IPIPE_TRAP_KFPUNAVAIL, regs))
+ return;
+
printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
@@ -455,6 +476,9 @@
void altivec_unavailable_exception(struct pt_regs *regs)
{
+ if (ipipe_trap_notify(IPIPE_TRAP_ALTUNAVAIL, regs))
+ return;
+
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
@@ -470,6 +494,9 @@
void performance_monitor_exception(struct pt_regs *regs)
{
+ if (ipipe_trap_notify(IPIPE_TRAP_PERFMON, regs))
+ return;
+
perf_irq(regs);
}
@@ -484,6 +511,9 @@
emulate_single_step(regs);
return;
}
+
+ if (ipipe_trap_notify(IPIPE_TRAP_ALIGNMENT, regs))
+ return;
/* Operand address was bad */
if (fixed == -EFAULT) {
@@ -506,6 +536,9 @@
int err;
siginfo_t info;
+ if (ipipe_trap_notify(IPIPE_TRAP_ALTASSIST, regs))
+ return;
+
if (!user_mode(regs)) {
printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
" at %lx\n", regs->nip);
@@ -547,6 +580,9 @@
*/
void unrecoverable_exception(struct pt_regs *regs)
{
+ if (ipipe_trap_notify(IPIPE_TRAP_NREC, regs))
+ return;
+
printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
regs->trap, regs->nip);
die("Unrecoverable exception", regs, SIGABRT);
diff -Nru linux-2.6.14/arch/ppc64/mm/fault.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/mm/fault.c
--- linux-2.6.14/arch/ppc64/mm/fault.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/mm/fault.c
2005-11-04 11:12:44.000000000 +0200
@@ -121,6 +121,9 @@
BUG_ON((trap == 0x380) || (trap == 0x480));
+ if (ipipe_trap_notify(IPIPE_TRAP_ACCESS, regs))
+ return 0;
+
if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return 0;
diff -Nru linux-2.6.14/arch/ppc64/mm/hash_native.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/mm/hash_native.c
--- linux-2.6.14/arch/ppc64/mm/hash_native.c 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/mm/hash_native.c
2005-11-10 09:24:21.000000000 +0200
@@ -264,7 +264,7 @@
if (large)
avpn &= ~1;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
native_lock_hpte(hptep);
hpte_v = hptep->v;
@@ -288,7 +288,7 @@
if (lock_tlbie)
spin_unlock(&native_tlbie_lock);
}
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/*
@@ -345,7 +345,7 @@
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
unsigned long large = batch->large;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
j = 0;
for (i = 0; i < number; i++) {
@@ -413,7 +413,7 @@
spin_unlock(&native_tlbie_lock);
}
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#ifdef CONFIG_PPC_PSERIES
diff -Nru linux-2.6.14/arch/ppc64/mm/tlb.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/mm/tlb.c
--- linux-2.6.14/arch/ppc64/mm/tlb.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/arch/ppc64/mm/tlb.c
2005-11-04 12:36:17.000000000 +0200
@@ -168,7 +168,11 @@
cpumask_t tmp;
int local = 0;
+#ifdef CONFIG_IPIPE
+ BUG_ON(ipipe_current_domain == ipipe_root_domain && in_interrupt());
+#else /* !CONFIG_IPIPE */
BUG_ON(in_interrupt());
+#endif /* CONFIG_IPIPE */
cpu = get_cpu();
i = batch->index;
diff -Nru linux-2.6.14/include/asm-ppc64/hw_irq.h
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/hw_irq.h
--- linux-2.6.14/include/asm-ppc64/hw_irq.h 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/hw_irq.h
2005-11-04 11:32:23.000000000 +0200
@@ -19,6 +19,81 @@
int timer_interrupt(struct pt_regs *);
extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
+#ifdef CONFIG_IPIPE
+
+#ifdef CONFIG_PPC_ISERIES
+#error "iSeries machines not supported by adeos i-pipe."
+#endif
+
+void __ipipe_stall_root(void);
+void __ipipe_unstall_root(void);
+unsigned long __ipipe_test_root(void);
+unsigned long __ipipe_test_and_stall_root(void);
+void __ipipe_restore_root(unsigned long flags);
+
+#define irqs_disabled() __ipipe_test_root()
+
+static inline void local_irq_disable(void) {
+ __ipipe_stall_root();
+}
+
+static inline void local_irq_enable(void) {
+ __ipipe_unstall_root();
+}
+
+static inline void local_irq_save_ptr(unsigned long *flags) {
+ *flags = (!__ipipe_test_and_stall_root()) << 15; /*XXX*/
+}
+
+static inline void local_irq_restore(unsigned long flags) {
+ __ipipe_restore_root(!(flags & MSR_EE)); /*XXX*/
+}
+
+/* XXX */
+#define local_save_flags(flags) ((flags) =
(!__ipipe_test_root()) << 15)
+#define local_irq_save(flags) local_irq_save_ptr(&flags)
+
+static inline void local_irq_disable_hw(void)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ __mtmsrd(msr & ~MSR_EE, 1);
+ __asm__ __volatile__("": : :"memory");
+}
+
+static inline void local_irq_enable_hw(void)
+{
+ unsigned long msr;
+ __asm__ __volatile__("": : :"memory");
+ msr = mfmsr();
+ __mtmsrd(msr | MSR_EE, 1);
+}
+
+static inline void local_irq_save_ptr_hw(unsigned long *flags)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ *flags = msr;
+ __mtmsrd(msr & ~MSR_EE, 1);
+ __asm__ __volatile__("": : :"memory");
+}
+
+#define local_save_flags_hw(flags) ((flags) = mfmsr())
+#define local_irq_save_hw(flags) local_irq_save_ptr_hw(&flags)
+#define local_irq_restore_hw(flags) do { \
+ __asm__ __volatile__("": : :"memory"); \
+ __mtmsrd((flags), 1); \
+} while(0)
+#define local_test_iflag_hw(x) ((x) & MSR_EE)
+#define irqs_disabled_hw() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ !(flags & MSR_EE); \
+})
+
+#else /* !CONFIG_IPIPE */
+
#ifdef CONFIG_PPC_ISERIES
extern unsigned long local_get_flags(void);
@@ -75,6 +150,13 @@
#endif /* CONFIG_PPC_ISERIES */
+#define local_irq_save_hw(flags) local_irq_save(flags)
+#define local_irq_restore_hw(flags) local_irq_restore(flags)
+#define local_irq_enable_hw() local_irq_enable()
+#define local_irq_disable_hw(flags) local_irq_disable()
+
+#endif /* CONFIG_IPIPE */
+
#define mask_irq(irq) \
({ \
irq_desc_t *desc = get_irq_desc(irq); \
diff -Nru linux-2.6.14/include/asm-ppc64/ipipe.h
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/ipipe.h
--- linux-2.6.14/include/asm-ppc64/ipipe.h 1970-01-01 02:00:00.000000000
+0200
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/ipipe.h
2005-12-06 12:14:33.000000000 +0200
@@ -0,0 +1,203 @@
+/*
+ * include/asm-ppc64/ipipe.h
+ *
+ * I-pipe 64-bit PowerPC adoption
+ * Copyright (C) 2005 Heikki Lindholm
+ * based on previous work:
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __PPC64_IPIPE_H
+#define __PPC64_IPIPE_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_IPIPE
+
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/paca.h>
+#include <linux/list.h>
+#include <linux/cpumask.h>
+#include <linux/cache.h>
+#include <linux/threads.h>
+
+#define IPIPE_ARCH_STRING "0.9-02"
+#define IPIPE_MAJOR_NUMBER 0
+#define IPIPE_MINOR_NUMBER 9
+#define IPIPE_PATCH_NUMBER 2
+
+#define IPIPE_NR_XIRQS NR_IRQS
+#define IPIPE_IRQ_ISHIFT 6 /* 64-bit arch. */
+
+/*
+ * The first virtual interrupt is reserved for the timer (see
+ * __ipipe_init_platform).
+ */
+#define IPIPE_TIMER_VIRQ IPIPE_VIRQ_BASE
+
+#ifdef CONFIG_SMP
+/*
+ * These are virtual IPI numbers. The OpenPIC supports only 4 IPIs and
+ * IPIs 0x0,0x1,0x3 are already used by Linux. The virtualization layer is
+ * implemented by using the free IPI 0x2 and cascading it in ipipe-root.c.
+ */
+/* these are bit numbers in practice */
+#define IPIPE_MSG_CRITICAL_IPI 0x0
+#define IPIPE_MSG_SERVICE_IPI0 (IPIPE_MSG_CRITICAL_IPI + 1)
+#define IPIPE_MSG_SERVICE_IPI1 (IPIPE_MSG_CRITICAL_IPI + 2)
+#define IPIPE_MSG_SERVICE_IPI2 (IPIPE_MSG_CRITICAL_IPI + 3)
+#define IPIPE_MSG_SERVICE_IPI3 (IPIPE_MSG_CRITICAL_IPI + 4)
+#define IPIPE_MSG_SERVICE_IPI4 (IPIPE_MSG_CRITICAL_IPI + 5)
+
+#define IPIPE_MSG_IPI_MASK ((1 << IPIPE_MSG_CRITICAL_IPI) | \
+ (1 << IPIPE_MSG_SERVICE_IPI0) | \
+ (1 << IPIPE_MSG_SERVICE_IPI1) | \
+ (1 << IPIPE_MSG_SERVICE_IPI2) | \
+ (1 << IPIPE_MSG_SERVICE_IPI3) | \
+ (1 << IPIPE_MSG_SERVICE_IPI4))
+
+#define IPIPE_CRITICAL_IPI (IPIPE_VIRQ_BASE + 1)
+#define IPIPE_SERVICE_IPI0 (IPIPE_CRITICAL_IPI + 1)
+#define IPIPE_SERVICE_IPI1 (IPIPE_CRITICAL_IPI + 2)
+#define IPIPE_SERVICE_IPI2 (IPIPE_CRITICAL_IPI + 3)
+#define IPIPE_SERVICE_IPI3 (IPIPE_CRITICAL_IPI + 4)
+#define IPIPE_SERVICE_IPI4 (IPIPE_CRITICAL_IPI + 5)
+
+#define IPIPE_MSG_IPI_OFFSET (IPIPE_CRITICAL_IPI)
+
+#define ipipe_processor_id() (get_paca()->paca_index)
+#else /* !CONFIG_SMP */
+#define ipipe_processor_id() 0
+#endif /* CONFIG_SMP */
+
+#define prepare_arch_switch(next) \
+do { \
+ __ipipe_dispatch_event(IPIPE_EVENT_SCHEDULE,next); \
+ local_irq_disable_hw(); \
+} while(0)
+
+#define task_hijacked(p) \
+ ( { \
+ int x = ipipe_current_domain != ipipe_root_domain; \
+ __clear_bit(IPIPE_SYNC_FLAG, \
+ &ipipe_root_domain->cpudata[task_cpu(p)].status); \
+ local_irq_enable_hw(); x; \
+ } )
+
+/* traps */
+#define IPIPE_TRAP_ACCESS 0 /* Data or instruction access exception */
+#define IPIPE_TRAP_ALIGNMENT 1 /* Alignment exception */
+#define IPIPE_TRAP_ALTUNAVAIL 2 /* Altivec unavailable */
+#define IPIPE_TRAP_PCE 3 /* Program check exception */
+#define IPIPE_TRAP_MCE 4 /* Machine check exception */
+#define IPIPE_TRAP_UNKNOWN 5 /* Unknown exception */
+#define IPIPE_TRAP_IABR 6 /* Instruction breakpoint */
+#define IPIPE_TRAP_SSTEP 7 /* Single-step exception */
+#define IPIPE_TRAP_NREC 8 /* Non-recoverable exception */
+#define IPIPE_TRAP_ALTASSIST 9 /* Altivec assist exception */
+#define IPIPE_TRAP_SYSRESET 10 /* System reset exception */
+#define IPIPE_TRAP_KFPUNAVAIL 11 /* Kernel FP Unavailable exception */
+#define IPIPE_TRAP_PERFMON 12 /* Performance Monitor exception */
+#define IPIPE_NR_FAULTS 13
+/* Pseudo-vectors used for kernel events */
+#define IPIPE_FIRST_EVENT IPIPE_NR_FAULTS
+#define IPIPE_EVENT_SYSCALL (IPIPE_FIRST_EVENT)
+#define IPIPE_EVENT_SCHEDULE (IPIPE_FIRST_EVENT + 1)
+#define IPIPE_EVENT_SIGWAKE (IPIPE_FIRST_EVENT + 2)
+#define IPIPE_EVENT_SETSCHED (IPIPE_FIRST_EVENT + 3)
+#define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 4)
+#define IPIPE_LAST_EVENT IPIPE_EVENT_EXIT
+#define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1)
+
+struct ipipe_domain;
+
+struct ipipe_sysinfo {
+
+ int ncpus; /* Number of CPUs on board */
+ u64 cpufreq; /* CPU frequency (in Hz) */
+
+ /* Arch-dependent block */
+
+ struct {
+ unsigned tmirq; /* Decrementer virtual IRQ */
+ u64 tmfreq; /* Timebase frequency */
+ } archdep;
+};
+
+#define ipipe_read_tsc(t) (t = mftb())
+
+#define __ipipe_read_timebase() \
+ ({ \
+ unsigned long long t; \
+ ipipe_read_tsc(t); \
+ t; \
+ })
+
+extern unsigned long tb_ticks_per_jiffy;
+#define ipipe_cpu_freq() (HZ * tb_ticks_per_jiffy)
+#define ipipe_tsc2ns(t) (((t) * 1000) / (ipipe_cpu_freq() /
1000000))
+
+/* Private interface -- Internal use only */
+
+#ifdef CONFIG_SMP
+struct ipipe_ipi_struct {
+ volatile unsigned long value;
+} ____cacheline_aligned;
+#endif /* CONFIG_SMP */
+
+#define __ipipe_check_platform() do { } while(0)
+
+void __ipipe_init_platform(void);
+
+void __ipipe_enable_pipeline(void);
+
+void __ipipe_sync_stage(unsigned long syncmask);
+
+int __ipipe_ack_irq(unsigned irq);
+
+int __ipipe_ack_system_irq(unsigned irq);
+
+void __ipipe_do_IRQ(int irq,
+ struct pt_regs *regs);
+
+void __ipipe_do_timer(int irq,
+ struct pt_regs *regs);
+
+void __ipipe_do_critical_sync(unsigned irq);
+
+extern unsigned long __ipipe_decr_ticks;
+
+extern unsigned long long __ipipe_decr_next[];
+
+extern struct pt_regs __ipipe_tick_regs[];
+
+void __ipipe_handle_irq(int irq,
+ struct pt_regs *regs);
+
+#define __ipipe_tick_irq IPIPE_TIMER_VIRQ
+
+#else /* !CONFIG_IPIPE */
+
+#define task_hijacked(p) 0
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* !__PPC64_IPIPE_H */
diff -Nru linux-2.6.14/include/asm-ppc64/mmu_context.h
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/mmu_context.h
--- linux-2.6.14/include/asm-ppc64/mmu_context.h 2005-10-28
03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/mmu_context.h
2005-11-10 09:25:00.000000000 +0200
@@ -79,9 +79,9 @@
{
unsigned long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
switch_mm(prev, next, current);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#endif /* __PPC64_MMU_CONTEXT_H */
diff -Nru linux-2.6.14/include/asm-ppc64/pgalloc.h
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/pgalloc.h
--- linux-2.6.14/include/asm-ppc64/pgalloc.h 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/pgalloc.h
2005-11-04 23:45:42.000000000 +0200
@@ -117,4 +117,9 @@
#define check_pgt_cache() do { } while (0)
+static inline void set_pgdir(unsigned long address, pgd_t entry)
+{
+ /* nop */
+}
+
#endif /* _PPC64_PGALLOC_H */
diff -Nru linux-2.6.14/include/asm-ppc64/time.h
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/time.h
--- linux-2.6.14/include/asm-ppc64/time.h 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/asm-ppc64/time.h
2005-11-04 12:37:44.000000000 +0200
@@ -23,6 +23,9 @@
#include <asm/iSeries/HvCall.h>
/* time.c */
+#ifdef CONFIG_IPIPE
+extern unsigned long disarm_decr[NR_CPUS];
+#endif /* CONFIG_IPIPE */
extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
diff -Nru linux-2.6.14/include/linux/hardirq.h
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/linux/hardirq.h
--- linux-2.6.14/include/linux/hardirq.h 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/linux/hardirq.h
2005-11-04 12:08:17.000000000 +0200
@@ -87,8 +87,21 @@
# define synchronize_irq(irq) barrier()
#endif
+#ifdef CONFIG_IPIPE
+#define nmi_enter() \
+do { \
+ if (ipipe_current_domain == ipipe_root_domain) \
+ irq_enter(); \
+} while(0)
+#define nmi_exit() \
+do { \
+ if (ipipe_current_domain == ipipe_root_domain) \
+ sub_preempt_count(HARDIRQ_OFFSET); \
+} while(0)
+#else /* !CONFIG_IPIPE */
#define nmi_enter() irq_enter()
#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
+#endif /* CONFIG_IPIPE */
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
static inline void account_user_vtime(struct task_struct *tsk)
diff -Nru linux-2.6.14/include/linux/ipipe.h
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/linux/ipipe.h
--- linux-2.6.14/include/linux/ipipe.h 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/linux/ipipe.h
2005-12-06 12:25:37.000000000 +0200
@@ -0,0 +1,757 @@
+/* -*- linux-c -*-
+ * include/linux/ipipe.h
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_IPIPE_H
+#define __LINUX_IPIPE_H
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+#include <asm/ipipe.h>
+
+#ifdef CONFIG_IPIPE
+
+#define IPIPE_VERSION_STRING IPIPE_ARCH_STRING
+#define IPIPE_RELEASE_NUMBER ((IPIPE_MAJOR_NUMBER << 16) | \
+ (IPIPE_MINOR_NUMBER << 8) | \
+ (IPIPE_PATCH_NUMBER))
+
+#define IPIPE_ROOT_PRIO 100
+#define IPIPE_ROOT_ID 0
+#define IPIPE_ROOT_NPTDKEYS 4 /* Must be <= BITS_PER_LONG */
+
+#define IPIPE_RESET_TIMER 0x1
+#define IPIPE_GRAB_TIMER 0x2
+#define IPIPE_SAME_HANDLER ((void (*)(unsigned))(-1))
+
+/* Global domain flags */
+#define IPIPE_SPRINTK_FLAG 0 /* Synchronous printk() allowed */
+
+#define IPIPE_STALL_FLAG 0 /* Stalls a pipeline stage */
+#define IPIPE_SYNC_FLAG 1 /* The interrupt syncer is
running for the domain */
+
+#define IPIPE_HANDLE_FLAG 0
+#define IPIPE_PASS_FLAG 1
+#define IPIPE_ENABLE_FLAG 2
+#define IPIPE_DYNAMIC_FLAG IPIPE_HANDLE_FLAG
+#define IPIPE_STICKY_FLAG 3
+#define IPIPE_SYSTEM_FLAG 4
+#define IPIPE_LOCK_FLAG 5
+#define IPIPE_SHARED_FLAG 6
+#define IPIPE_EXCLUSIVE_FLAG 31 /* ipipe_catch_event() is the reason
why. */
+
+#define IPIPE_HANDLE_MASK (1 << IPIPE_HANDLE_FLAG)
+#define IPIPE_PASS_MASK (1 << IPIPE_PASS_FLAG)
+#define IPIPE_ENABLE_MASK (1 << IPIPE_ENABLE_FLAG)
+#define IPIPE_DYNAMIC_MASK IPIPE_HANDLE_MASK
+#define IPIPE_EXCLUSIVE_MASK (1 << IPIPE_EXCLUSIVE_FLAG)
+#define IPIPE_STICKY_MASK (1 << IPIPE_STICKY_FLAG)
+#define IPIPE_SYSTEM_MASK (1 << IPIPE_SYSTEM_FLAG)
+#define IPIPE_LOCK_MASK (1 << IPIPE_LOCK_FLAG)
+#define IPIPE_SHARED_MASK (1 << IPIPE_SHARED_FLAG)
+#define IPIPE_SYNC_MASK (1 << IPIPE_SYNC_FLAG)
+
+#define IPIPE_DEFAULT_MASK (IPIPE_HANDLE_MASK|IPIPE_PASS_MASK)
+#define IPIPE_STDROOT_MASK
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_SYSTEM_MASK)
+
+/* Number of virtual IRQs */
+#define IPIPE_NR_VIRQS BITS_PER_LONG
+/* First virtual IRQ # */
+#define IPIPE_VIRQ_BASE (((IPIPE_NR_XIRQS + BITS_PER_LONG - 1)
/ BITS_PER_LONG) * BITS_PER_LONG)
+/* Total number of IRQ slots */
+#define IPIPE_NR_IRQS (IPIPE_VIRQ_BASE + IPIPE_NR_VIRQS)
+/* Number of indirect words needed to map the whole IRQ space. */
+#define IPIPE_IRQ_IWORDS ((IPIPE_NR_IRQS + BITS_PER_LONG - 1) /
BITS_PER_LONG)
+#define IPIPE_IRQ_IMASK (BITS_PER_LONG - 1)
+#define IPIPE_IRQMASK_ANY (~0L)
+#define IPIPE_IRQMASK_VIRT (IPIPE_IRQMASK_ANY << (IPIPE_VIRQ_BASE /
BITS_PER_LONG))
+
+#ifdef CONFIG_SMP
+
+#define IPIPE_NR_CPUS NR_CPUS
+#define ipipe_declare_cpuid int cpuid
+#define ipipe_load_cpuid() do { \
+ (cpuid) = ipipe_processor_id(); \
+ } while(0)
+#define ipipe_lock_cpu(flags) do { \
+ local_irq_save_hw(flags); \
+ (cpuid) = ipipe_processor_id(); \
+ } while(0)
+#define ipipe_unlock_cpu(flags) local_irq_restore_hw(flags)
+#define ipipe_get_cpu(flags) ipipe_lock_cpu(flags)
+#define ipipe_put_cpu(flags) ipipe_unlock_cpu(flags)
+#define ipipe_current_domain (ipipe_percpu_domain[ipipe_processor_id()])
+
+#else /* !CONFIG_SMP */
+
+#define IPIPE_NR_CPUS 1
+#define ipipe_declare_cpuid const int cpuid = 0
+#define ipipe_load_cpuid() do { } while(0)
+#define ipipe_lock_cpu(flags) local_irq_save_hw(flags)
+#define ipipe_unlock_cpu(flags) local_irq_restore_hw(flags)
+#define ipipe_get_cpu(flags) do { flags = 0; } while(0)
+#define ipipe_put_cpu(flags) do { } while(0)
+#define ipipe_current_domain (ipipe_percpu_domain[0])
+
+#endif /* CONFIG_SMP */
+
+#define ipipe_virtual_irq_p(irq) ((irq) >= IPIPE_VIRQ_BASE && \
+ (irq) < IPIPE_NR_IRQS)
+
+struct ipipe_domain {
+
+ struct list_head p_link; /* Link in pipeline */
+
+ struct ipcpudata {
+ unsigned long status;
+ unsigned long irq_pending_hi;
+ unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
+ unsigned long irq_hits[IPIPE_NR_IRQS];
+ } cpudata[IPIPE_NR_CPUS];
+
+ struct {
+ int (*acknowledge) (unsigned irq);
+ void (*handler) (unsigned irq);
+ unsigned long control;
+ } irqs[IPIPE_NR_IRQS];
+
+ int (*evhand[IPIPE_NR_EVENTS])(unsigned event,
+ struct ipipe_domain *from,
+ void *data); /* Event handlers. */
+ unsigned long evexcl; /* Exclusive event bits. */
+
+#ifdef CONFIG_IPIPE_STATS
+ struct ipipe_stats { /* All in timebase units. */
+ unsigned long long last_stall_date;
+ unsigned long last_stall_eip;
+ unsigned long max_stall_time;
+ unsigned long max_stall_eip;
+ struct ipipe_irq_stats {
+ unsigned long long last_receipt_date;
+ unsigned long max_delivery_time;
+ } irq_stats[IPIPE_NR_IRQS];
+ } stats[IPIPE_NR_CPUS];
+#endif /* CONFIG_IPIPE_STATS */
+ unsigned long flags;
+ unsigned domid;
+ const char *name;
+ int priority;
+ void *pdd;
+};
+
+struct ipipe_domain_attr {
+
+ unsigned domid; /* Domain identifier -- Magic value set by
caller */
+ const char *name; /* Domain name -- Warning: won't be dup'ed! */
+ int priority; /* Priority in interrupt pipeline */
+ void (*entry) (void); /* Domain entry point */
+ void *pdd; /* Per-domain (opaque) data pointer */
+};
+
+/* The following macros must be used hw interrupts off. */
+
+#define __ipipe_set_irq_bit(ipd,cpuid,irq) \
+do { \
+ if (!test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { \
+ __set_bit(irq &
IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); \
+ __set_bit(irq >>
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); \
+ } \
+} while(0)
+
+#define __ipipe_clear_pend(ipd,cpuid,irq) \
+do { \
+ __clear_bit(irq &
IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); \
+ if ((ipd)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT] == 0)
\
+ __clear_bit(irq >>
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); \
+} while(0)
+
+#define __ipipe_lock_irq(ipd,cpuid,irq) \
+do { \
+ if (!test_and_set_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) \
+ __ipipe_clear_pend(ipd,cpuid,irq); \
+} while(0)
+
+#define __ipipe_unlock_irq(ipd,irq) \
+do { \
+ int __cpuid, __nr_cpus = num_online_cpus(); \
+ if (test_and_clear_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) \
+ for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) \
+ if ((ipd)->cpudata[__cpuid].irq_hits[irq] > 0) { /* We
need atomic ops next. */ \
+ set_bit(irq &
IPIPE_IRQ_IMASK,&(ipd)->cpudata[__cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); \
+ set_bit(irq >>
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[__cpuid].irq_pending_hi); \
+ } \
+} while(0)
+
+#define __ipipe_clear_irq(ipd,irq) \
+do { \
+ int __cpuid, __nr_cpus = num_online_cpus(); \
+ clear_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control); \
+ for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) { \
+ (ipd)->cpudata[__cpuid].irq_hits[irq] = 0; \
+ __ipipe_clear_pend(ipd,__cpuid,irq); \
+ } \
+} while(0)
+
+#ifdef __RAW_SPIN_LOCK_UNLOCKED
+#define spin_lock_hw(x) _raw_spin_lock(x)
+#define spin_trylock_hw(x) _raw_spin_trylock(x)
+#define spin_unlock_hw(x) _raw_spin_unlock(x)
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+#define write_lock_hw(x) _raw_write_lock(x)
+#define write_trylock_hw(x) _raw_write_trylock(x)
+#define write_unlock_hw(x) _raw_write_unlock(x)
+#define read_lock_hw(x) _raw_read_lock(x)
+#define read_trylock_hw(x) _raw_read_trylock(x)
+#define read_unlock_hw(x) _raw_read_unlock(x)
+#else /* UP non-debug */
+#define write_lock_hw(lock) do { (void)(lock); } while (0)
+#define write_trylock_hw(lock) ({ (void)(lock); 1; })
+#define write_unlock_hw(lock) do { (void)(lock); } while (0)
+#define read_lock_hw(lock) do { (void)(lock); } while (0)
+#define read_trylock_hw(lock) ({ (void)(lock); 1; })
+#define read_unlock_hw(lock) do { (void)(lock); } while (0)
+#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */
+#else /* !__RAW_SPIN_LOCK_UNLOCKED */
+#define spin_lock_hw(x) _spin_lock(x)
+#define spin_unlock_hw(x) _spin_unlock(x)
+#define spin_trylock_hw(x) _spin_trylock(x)
+#define write_lock_hw(x) _write_lock(x)
+#define write_unlock_hw(x) _write_unlock(x)
+#define write_trylock_hw(x) _write_trylock(x)
+#define read_lock_hw(x) _read_lock(x)
+#define read_unlock_hw(x) _read_unlock(x)
+#endif /* __RAW_SPIN_LOCK_UNLOCKED */
+
+typedef spinlock_t ipipe_spinlock_t;
+typedef rwlock_t ipipe_rwlock_t;
+#define IPIPE_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
+#define IPIPE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
+
+#define spin_lock_irqsave_hw(x,flags) \
+do { \
+ local_irq_save_hw(flags); \
+ spin_lock_hw(x); \
+} while (0)
+
+#define spin_unlock_irqrestore_hw(x,flags) \
+do { \
+ spin_unlock_hw(x); \
+ local_irq_restore_hw(flags); \
+} while (0)
+
+#define spin_lock_irq_hw(x) \
+do { \
+ local_irq_disable_hw(); \
+ spin_lock_hw(x); \
+} while (0)
+
+#define spin_unlock_irq_hw(x) \
+do { \
+ spin_unlock_hw(x); \
+ local_irq_enable_hw(); \
+} while (0)
+
+#define read_lock_irqsave_hw(lock, flags) \
+do { \
+ local_irq_save_hw(flags); \
+ read_lock_hw(lock); \
+} while (0)
+
+#define read_unlock_irqrestore_hw(lock, flags) \
+do { \
+ read_unlock_hw(lock); \
+ local_irq_restore_hw(flags); \
+} while (0)
+
+#define write_lock_irqsave_hw(lock, flags) \
+do { \
+ local_irq_save_hw(flags); \
+ write_lock_hw(lock); \
+} while (0)
+
+#define write_unlock_irqrestore_hw(lock, flags) \
+do { \
+ write_unlock_hw(lock); \
+ local_irq_restore_hw(flags); \
+} while (0)
+
+extern struct ipipe_domain *ipipe_percpu_domain[], *ipipe_root_domain;
+
+extern unsigned __ipipe_printk_virq;
+
+extern unsigned long __ipipe_virtual_irq_map;
+
+extern struct list_head __ipipe_pipeline;
+
+extern ipipe_spinlock_t __ipipe_pipelock;
+
+extern int __ipipe_event_monitors[];
+
+/* Private interface */
+
+void ipipe_init(void);
+
+#ifdef CONFIG_PROC_FS
+void ipipe_init_proc(void);
+#else /* !CONFIG_PROC_FS */
+#define ipipe_init_proc() do { } while(0)
+#endif /* CONFIG_PROC_FS */
+
+void __ipipe_init_stage(struct ipipe_domain *ipd);
+
+void __ipipe_cleanup_domain(struct ipipe_domain *ipd);
+
+void __ipipe_add_domain_proc(struct ipipe_domain *ipd);
+
+void __ipipe_remove_domain_proc(struct ipipe_domain *ipd);
+
+void __ipipe_flush_printk(unsigned irq);
+
+void __ipipe_stall_root(void);
+
+void __ipipe_unstall_root(void);
+
+unsigned long __ipipe_test_root(void);
+
+unsigned long __ipipe_test_and_stall_root(void);
+
+void fastcall __ipipe_restore_root(unsigned long flags);
+
+int fastcall __ipipe_schedule_irq(unsigned irq, struct list_head *head);
+
+int fastcall __ipipe_dispatch_event(unsigned event, void *data);
+
+#define __ipipe_pipeline_head_p(ipd) (&(ipd)->p_link == __ipipe_pipeline.next)
+
+#ifdef CONFIG_SMP
+
+cpumask_t __ipipe_set_irq_affinity(unsigned irq,
+ cpumask_t cpumask);
+
+int fastcall __ipipe_send_ipi(unsigned ipi,
+ cpumask_t cpumask);
+
+#endif /* CONFIG_SMP */
+
+/* Called with hw interrupts off. */
+static inline void __ipipe_switch_to(struct ipipe_domain *out,
+ struct ipipe_domain *in, int cpuid)
+{
+ void ipipe_suspend_domain(void);
+
+ /*
+ * "in" is guaranteed to be closer than "out" from the head of the
+ * pipeline (and obviously different).
+ */
+
+ ipipe_percpu_domain[cpuid] = in;
+
+ ipipe_suspend_domain(); /* Sync stage and propagate interrupts. */
+ ipipe_load_cpuid(); /* Processor might have changed. */
+
+ if (ipipe_percpu_domain[cpuid] == in)
+ /*
+ * Otherwise, something has changed the current domain under
+ * our feet recycling the register set; do not override.
+ */
+ ipipe_percpu_domain[cpuid] = out;
+}
+
+static inline void ipipe_sigwake_notify(struct task_struct *p)
+{
+ if (__ipipe_event_monitors[IPIPE_EVENT_SIGWAKE] > 0)
+ __ipipe_dispatch_event(IPIPE_EVENT_SIGWAKE,p);
+}
+
+static inline void ipipe_setsched_notify(struct task_struct *p)
+{
+ if (__ipipe_event_monitors[IPIPE_EVENT_SETSCHED] > 0)
+ __ipipe_dispatch_event(IPIPE_EVENT_SETSCHED,p);
+}
+
+static inline void ipipe_exit_notify(struct task_struct *p)
+{
+ if (__ipipe_event_monitors[IPIPE_EVENT_EXIT] > 0)
+ __ipipe_dispatch_event(IPIPE_EVENT_EXIT,p);
+}
+
+static inline int ipipe_trap_notify(int ex, struct pt_regs *regs)
+{
+ return __ipipe_event_monitors[ex] ? __ipipe_dispatch_event(ex,regs) : 0;
+}
+
+#ifdef CONFIG_IPIPE_STATS
+
+#define ipipe_mark_domain_stall(ipd, cpuid) \
+do { \
+ __label__ here; \
+ struct ipipe_stats *ips; \
+here: \
+ ips = (ipd)->stats + cpuid; \
+ if (ips->last_stall_date == 0) { \
+ ipipe_read_tsc(ips->last_stall_date); \
+ ips->last_stall_eip = (unsigned long)&&here; \
+ } \
+} while(0)
+
+static inline void ipipe_mark_domain_unstall(struct ipipe_domain *ipd, int
cpuid)
+{ /* Called w/ hw interrupts off. */
+ struct ipipe_stats *ips = ipd->stats + cpuid;
+ unsigned long long t, d;
+
+ if (ips->last_stall_date != 0) {
+ ipipe_read_tsc(t);
+ d = t - ips->last_stall_date;
+ if (d > ips->max_stall_time) {
+ ips->max_stall_time = d;
+ ips->max_stall_eip = ips->last_stall_eip;
+ }
+ ips->last_stall_date = 0;
+ }
+}
+
+static inline void ipipe_mark_irq_receipt(struct ipipe_domain *ipd, unsigned
irq, int cpuid)
+{
+ struct ipipe_stats *ips = ipd->stats + cpuid;
+
+ if (ips->irq_stats[irq].last_receipt_date == 0) {
+ ipipe_read_tsc(ips->irq_stats[irq].last_receipt_date);
+ }
+}
+
+static inline void ipipe_mark_irq_delivery(struct ipipe_domain *ipd, unsigned
irq, int cpuid)
+{ /* Called w/ hw interrupts off. */
+ struct ipipe_stats *ips = ipd->stats + cpuid;
+ unsigned long long t, d;
+
+ if (ips->irq_stats[irq].last_receipt_date != 0) {
+ ipipe_read_tsc(t);
+ d = t - ips->irq_stats[irq].last_receipt_date;
+ ips->irq_stats[irq].last_receipt_date = 0;
+ if (d > ips->irq_stats[irq].max_delivery_time)
+ ips->irq_stats[irq].max_delivery_time = d;
+ }
+}
+
+static inline void ipipe_reset_stats (void)
+{
+ int cpu, irq;
+ for_each_online_cpu(cpu) {
+ ipipe_root_domain->stats[cpu].last_stall_date = 0LL;
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
+
ipipe_root_domain->stats[cpu].irq_stats[irq].last_receipt_date = 0LL;
+ }
+}
+
+#else /* !CONFIG_IPIPE_STATS */
+
+#define ipipe_mark_domain_stall(ipd,cpuid) do { } while(0)
+#define ipipe_mark_domain_unstall(ipd,cpuid) do { } while(0)
+#define ipipe_mark_irq_receipt(ipd,irq,cpuid) do { } while(0)
+#define ipipe_mark_irq_delivery(ipd,irq,cpuid) do { } while(0)
+#define ipipe_reset_stats() do { } while(0)
+
+#endif /* CONFIG_IPIPE_STATS */
+
+/* Public interface */
+
+int ipipe_register_domain(struct ipipe_domain *ipd,
+ struct ipipe_domain_attr *attr);
+
+int ipipe_unregister_domain(struct ipipe_domain *ipd);
+
+void ipipe_suspend_domain(void);
+
+int ipipe_virtualize_irq(struct ipipe_domain *ipd,
+ unsigned irq,
+ void (*handler) (unsigned irq),
+ int (*acknowledge) (unsigned irq),
+ unsigned modemask);
+
+static inline int ipipe_share_irq(unsigned irq,
+ int (*acknowledge) (unsigned irq))
+{
+ return ipipe_virtualize_irq(ipipe_current_domain,
+ irq,
+ IPIPE_SAME_HANDLER,
+ acknowledge,
+ IPIPE_SHARED_MASK | IPIPE_HANDLE_MASK |
+ IPIPE_PASS_MASK);
+}
+
+int ipipe_control_irq(unsigned irq,
+ unsigned clrmask,
+ unsigned setmask);
+
+unsigned ipipe_alloc_virq(void);
+
+int ipipe_free_virq(unsigned virq);
+
+int fastcall ipipe_trigger_irq(unsigned irq);
+
+static inline int ipipe_propagate_irq(unsigned irq)
+{
+
+ return __ipipe_schedule_irq(irq, ipipe_current_domain->p_link.next);
+}
+
+static inline int ipipe_schedule_irq(unsigned irq)
+{
+
+ return __ipipe_schedule_irq(irq, &ipipe_current_domain->p_link);
+}
+
+static inline void ipipe_stall_pipeline_from(struct ipipe_domain *ipd)
+{
+ ipipe_declare_cpuid;
+#ifdef CONFIG_SMP
+ unsigned long flags;
+
+ ipipe_lock_cpu(flags); /* Care for migration. */
+
+ __set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
+ ipipe_mark_domain_stall(ipd, cpuid);
+
+ if (!__ipipe_pipeline_head_p(ipd))
+ ipipe_unlock_cpu(flags);
+#else /* CONFIG_SMP */
+ set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
+ ipipe_mark_domain_stall(ipd, cpuid);
+
+ if (__ipipe_pipeline_head_p(ipd))
+ local_irq_disable_hw();
+#endif /* CONFIG_SMP */
+}
+
+static inline unsigned long ipipe_test_pipeline_from(struct ipipe_domain *ipd)
+{
+ unsigned long flags, s;
+ ipipe_declare_cpuid;
+
+ ipipe_get_cpu(flags);
+ s = test_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
+ ipipe_put_cpu(flags);
+
+ return s;
+}
+
+static inline unsigned long ipipe_test_and_stall_pipeline_from(struct
+ ipipe_domain
+ *ipd)
+{
+ ipipe_declare_cpuid;
+ unsigned long s;
+#ifdef CONFIG_SMP
+ unsigned long flags;
+
+ ipipe_lock_cpu(flags); /* Care for migration. */
+
+ s = __test_and_set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
+ ipipe_mark_domain_stall(ipd, cpuid);
+
+ if (!__ipipe_pipeline_head_p(ipd))
+ ipipe_unlock_cpu(flags);
+#else /* CONFIG_SMP */
+ s = test_and_set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
+ ipipe_mark_domain_stall(ipd, cpuid);
+
+ if (__ipipe_pipeline_head_p(ipd))
+ local_irq_disable_hw();
+#endif /* CONFIG_SMP */
+
+ return s;
+}
+
+void fastcall ipipe_unstall_pipeline_from(struct ipipe_domain *ipd);
+
+static inline unsigned long ipipe_test_and_unstall_pipeline_from(struct
+ ipipe_domain
+ *ipd)
+{
+ unsigned long flags, s;
+ ipipe_declare_cpuid;
+
+ ipipe_get_cpu(flags);
+ s = test_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
+ ipipe_unstall_pipeline_from(ipd);
+ ipipe_put_cpu(flags);
+
+ return s;
+}
+
+static inline void ipipe_unstall_pipeline(void)
+{
+ ipipe_unstall_pipeline_from(ipipe_current_domain);
+}
+
+static inline unsigned long ipipe_test_and_unstall_pipeline(void)
+{
+ return ipipe_test_and_unstall_pipeline_from(ipipe_current_domain);
+}
+
+static inline unsigned long ipipe_test_pipeline(void)
+{
+ return ipipe_test_pipeline_from(ipipe_current_domain);
+}
+
+static inline unsigned long ipipe_test_and_stall_pipeline(void)
+{
+ return ipipe_test_and_stall_pipeline_from(ipipe_current_domain);
+}
+
+static inline void ipipe_restore_pipeline_from(struct ipipe_domain *ipd,
+ unsigned long flags)
+{
+ if (flags)
+ ipipe_stall_pipeline_from(ipd);
+ else
+ ipipe_unstall_pipeline_from(ipd);
+}
+
+static inline void ipipe_stall_pipeline(void)
+{
+ ipipe_stall_pipeline_from(ipipe_current_domain);
+}
+
+static inline void ipipe_restore_pipeline(unsigned long flags)
+{
+ ipipe_restore_pipeline_from(ipipe_current_domain, flags);
+}
+
+static inline void ipipe_restore_pipeline_nosync(struct ipipe_domain *ipd,
+ unsigned long flags, int cpuid)
+{
+ /*
+ * If cpuid is current, then it must be held on entry
+ * (ipipe_get_cpu/local_irq_save_hw/local_irq_disable_hw).
+ */
+
+ if (flags) {
+ __set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
+ ipipe_mark_domain_stall(ipd,cpuid);
+ }
+ else {
+ __clear_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
+ ipipe_mark_domain_unstall(ipd,cpuid);
+ }
+}
+
+void ipipe_init_attr(struct ipipe_domain_attr *attr);
+
+int ipipe_get_sysinfo(struct ipipe_sysinfo *sysinfo);
+
+int ipipe_tune_timer(unsigned long ns,
+ int flags);
+
+unsigned long ipipe_critical_enter(void (*syncfn) (void));
+
+void ipipe_critical_exit(unsigned long flags);
+
+static inline void ipipe_set_printk_sync(struct ipipe_domain *ipd)
+{
+ set_bit(IPIPE_SPRINTK_FLAG, &ipd->flags);
+}
+
+static inline void ipipe_set_printk_async(struct ipipe_domain *ipd)
+{
+ clear_bit(IPIPE_SPRINTK_FLAG, &ipd->flags);
+}
+
+int ipipe_catch_event(struct ipipe_domain *ipd,
+ unsigned event,
+ int (*handler)(unsigned event,
+ struct ipipe_domain *ipd,
+ void *data));
+
+cpumask_t ipipe_set_irq_affinity(unsigned irq,
+ cpumask_t cpumask);
+
+int fastcall ipipe_send_ipi(unsigned ipi,
+ cpumask_t cpumask);
+
+int ipipe_setscheduler_root(struct task_struct *p,
+ int policy,
+ int prio);
+
+int ipipe_reenter_root(struct task_struct *prev,
+ int policy,
+ int prio);
+
+int ipipe_alloc_ptdkey(void);
+
+int ipipe_free_ptdkey(int key);
+
+int fastcall ipipe_set_ptd(int key,
+ void *value);
+
+void fastcall *ipipe_get_ptd(int key);
+
+#define local_irq_enable_hw_cond() local_irq_enable_hw()
+#define local_irq_disable_hw_cond() local_irq_disable_hw()
+#define local_irq_save_hw_cond(flags) local_irq_save_hw(flags)
+#define local_irq_restore_hw_cond(flags) local_irq_restore_hw(flags)
+#define spin_lock_irqsave_hw_cond(lock,flags) spin_lock_irqsave_hw(lock,flags)
+#define spin_unlock_irqrestore_hw_cond(lock,flags)
spin_unlock_irqrestore_hw(lock,flags)
+
+#define ipipe_irq_lock(irq) \
+ do { \
+ ipipe_declare_cpuid; \
+ ipipe_load_cpuid(); \
+ __ipipe_lock_irq(ipipe_percpu_domain[cpuid], cpuid, irq);\
+ } while(0)
+
+#define ipipe_irq_unlock(irq) \
+ do { \
+ ipipe_declare_cpuid; \
+ ipipe_load_cpuid(); \
+ __ipipe_unlock_irq(ipipe_percpu_domain[cpuid], irq); \
+ } while(0)
+
+#else /* !CONFIG_IPIPE */
+
+#define ipipe_init() do { } while(0)
+#define ipipe_suspend_domain() do { } while(0)
+#define ipipe_sigwake_notify(p) do { } while(0)
+#define ipipe_setsched_notify(p) do { } while(0)
+#define ipipe_exit_notify(p) do { } while(0)
+#define ipipe_init_proc() do { } while(0)
+#define ipipe_reset_stats() do { } while(0)
+#define ipipe_trap_notify(t,r) 0
+
+#define spin_lock_hw(lock) spin_lock(lock)
+#define spin_unlock_hw(lock) spin_unlock(lock)
+#define spin_lock_irq_hw(lock) spin_lock_irq(lock)
+#define spin_unlock_irq_hw(lock) spin_unlock_irq(lock)
+#define spin_lock_irqsave_hw(lock,flags) spin_lock_irqsave(lock, flags)
+#define spin_unlock_irqrestore_hw(lock,flags) spin_unlock_irqrestore(lock,
flags)
+
+#define local_irq_enable_hw_cond() do { } while(0)
+#define local_irq_disable_hw_cond() do { } while(0)
+#define local_irq_save_hw_cond(flags) do { flags = 0; /* Optimized
out */ } while(0)
+#define local_irq_restore_hw_cond(flags) do { } while(0)
+#define spin_lock_irqsave_hw_cond(lock,flags) do { flags = 0;
spin_lock(lock); } while(0)
+#define spin_unlock_irqrestore_hw_cond(lock,flags) spin_unlock(lock)
+
+#define ipipe_irq_lock(irq) do { } while(0)
+#define ipipe_irq_unlock(irq) do { } while(0)
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* !__LINUX_IPIPE_H */
diff -Nru linux-2.6.14/include/linux/preempt.h
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/linux/preempt.h
--- linux-2.6.14/include/linux/preempt.h 2005-10-28 03:02:08.000000000
+0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/linux/preempt.h
2005-11-04 12:12:57.000000000 +0200
@@ -26,28 +26,45 @@
asmlinkage void preempt_schedule(void);
-#define preempt_disable() \
-do { \
- inc_preempt_count(); \
- barrier(); \
+#ifdef CONFIG_IPIPE
+
+#include <asm/ipipe.h>
+
+extern struct ipipe_domain *ipipe_percpu_domain[], *ipipe_root_domain;
+
+#define ipipe_preempt_guard() (ipipe_percpu_domain[ipipe_processor_id()] ==
ipipe_root_domain)
+#else /* !CONFIG_IPIPE */
+#define ipipe_preempt_guard() 1
+#endif /* CONFIG_IPIPE */
+
+#define preempt_disable() \
+do { \
+ if (ipipe_preempt_guard()) { \
+ inc_preempt_count(); \
+ barrier(); \
+ } \
} while (0)
-#define preempt_enable_no_resched() \
-do { \
- barrier(); \
- dec_preempt_count(); \
+#define preempt_enable_no_resched() \
+do { \
+ if (ipipe_preempt_guard()) { \
+ barrier(); \
+ dec_preempt_count(); \
+ } \
} while (0)
-#define preempt_check_resched() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
+#define preempt_check_resched()
\
+do { \
+ if (ipipe_preempt_guard()) { \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ preempt_schedule(); \
+ } \
} while (0)
-#define preempt_enable() \
-do { \
- preempt_enable_no_resched(); \
- preempt_check_resched(); \
+#define preempt_enable() \
+do { \
+ preempt_enable_no_resched(); \
+ preempt_check_resched(); \
} while (0)
#else
diff -Nru linux-2.6.14/include/linux/sched.h
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/linux/sched.h
--- linux-2.6.14/include/linux/sched.h 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/include/linux/sched.h
2005-11-04 12:13:27.000000000 +0200
@@ -4,6 +4,7 @@
#include <asm/param.h> /* for HZ */
#include <linux/config.h>
+#include <linux/ipipe.h>
#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
@@ -813,6 +814,9 @@
int cpuset_mems_generation;
#endif
atomic_t fs_excl; /* holding fs exclusive resources */
+#ifdef CONFIG_IPIPE
+ void *ptd[IPIPE_ROOT_NPTDKEYS];
+#endif
};
static inline pid_t process_group(struct task_struct *tsk)
diff -Nru linux-2.6.14/init/Kconfig
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/init/Kconfig
--- linux-2.6.14/init/Kconfig 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/init/Kconfig 2005-11-04
12:13:48.000000000 +0200
@@ -69,6 +69,7 @@
config LOCALVERSION
string "Local version - append to kernel release"
+ default "-ipipe"
help
Append an extra string to the end of your kernel version.
This will show up when you type uname, for example.
diff -Nru linux-2.6.14/init/main.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/init/main.c
--- linux-2.6.14/init/main.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/init/main.c 2005-11-04
12:14:05.000000000 +0200
@@ -402,8 +402,9 @@
*/
schedule();
+ ipipe_reset_stats();
cpu_idle();
-}
+}
/* Check for early params. */
static int __init do_early_param(char *param, char *val)
@@ -487,6 +488,11 @@
init_timers();
softirq_init();
time_init();
+ /*
+ * We need to wait for the interrupt and time subsystems to be
+ * initialized before enabling the pipeline.
+ */
+ ipipe_init();
/*
* HACK ALERT! This is early. We're enabling the console before
@@ -611,6 +617,7 @@
#ifdef CONFIG_SYSCTL
sysctl_init();
#endif
+ ipipe_init_proc();
/* Networking initialization needs a process context */
sock_init();
diff -Nru linux-2.6.14/kernel/exit.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/exit.c
--- linux-2.6.14/kernel/exit.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/exit.c 2005-11-04
12:14:33.000000000 +0200
@@ -846,6 +846,7 @@
exit_itimers(tsk->signal);
acct_process(code);
}
+ ipipe_exit_notify(tsk);
exit_mm(tsk);
exit_sem(tsk);
diff -Nru linux-2.6.14/kernel/fork.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/fork.c
--- linux-2.6.14/kernel/fork.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/fork.c 2005-11-04
12:14:46.000000000 +0200
@@ -1153,6 +1153,14 @@
total_forks++;
write_unlock_irq(&tasklist_lock);
retval = 0;
+#ifdef CONFIG_IPIPE
+ {
+ int k;
+
+ for (k = 0; k < IPIPE_ROOT_NPTDKEYS; k++)
+ p->ptd[k] = NULL;
+ }
+#endif /* CONFIG_IPIPE */
fork_out:
if (retval)
diff -Nru linux-2.6.14/kernel/ipipe/core.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/ipipe/core.c
--- linux-2.6.14/kernel/ipipe/core.c 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/ipipe/core.c
2005-11-04 08:53:51.000000000 +0200
@@ -0,0 +1,678 @@
+/* -*- linux-c -*-
+ * linux/kernel/ipipe/core.c
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-independent I-PIPE core support.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif /* CONFIG_PROC_FS */
+
+static struct ipipe_domain ipipe_root =
+ { .cpudata = {[0 ... IPIPE_NR_CPUS-1] =
+ { .status = (1<<IPIPE_STALL_FLAG) } } };
+
+struct ipipe_domain *ipipe_root_domain = &ipipe_root;
+
+struct ipipe_domain *ipipe_percpu_domain[IPIPE_NR_CPUS] =
+ {[0 ... IPIPE_NR_CPUS - 1] = &ipipe_root };
+
+ipipe_spinlock_t __ipipe_pipelock = IPIPE_SPIN_LOCK_UNLOCKED;
+
+struct list_head __ipipe_pipeline;
+
+unsigned long __ipipe_virtual_irq_map = 0;
+
+unsigned __ipipe_printk_virq;
+
+int __ipipe_event_monitors[IPIPE_NR_EVENTS];
+
+/*
+ * ipipe_init() -- Initialization routine of the IPIPE layer. Called
+ * by the host kernel early during the boot procedure.
+ */
+void ipipe_init(void)
+{
+ struct ipipe_domain *ipd = &ipipe_root;
+
+ __ipipe_check_platform(); /* Do platform dependent checks first.
*/
+
+ /*
+ * A lightweight registration code for the root domain. We are
+ * running on the boot CPU, hw interrupts are off, and
+ * secondary CPUs are still lost in space.
+ */
+
+ INIT_LIST_HEAD(&__ipipe_pipeline);
+
+ ipd->name = "Linux";
+ ipd->domid = IPIPE_ROOT_ID;
+ ipd->priority = IPIPE_ROOT_PRIO;
+
+ __ipipe_init_stage(ipd);
+
+ INIT_LIST_HEAD(&ipd->p_link);
+ list_add_tail(&ipd->p_link, &__ipipe_pipeline);
+
+ __ipipe_init_platform();
+
+ __ipipe_printk_virq = ipipe_alloc_virq(); /* Cannot fail here. */
+ ipd->irqs[__ipipe_printk_virq].handler = &__ipipe_flush_printk;
+ ipd->irqs[__ipipe_printk_virq].acknowledge = NULL;
+ ipd->irqs[__ipipe_printk_virq].control = IPIPE_HANDLE_MASK;
+
+ __ipipe_enable_pipeline();
+
+ printk(KERN_INFO "I-pipe %s: pipeline enabled.\n",
+ IPIPE_VERSION_STRING);
+}
+
+void __ipipe_init_stage(struct ipipe_domain *ipd)
+{
+ int cpuid, n;
+
+ for (cpuid = 0; cpuid < IPIPE_NR_CPUS; cpuid++) {
+ ipd->cpudata[cpuid].irq_pending_hi = 0;
+
+ for (n = 0; n < IPIPE_IRQ_IWORDS; n++)
+ ipd->cpudata[cpuid].irq_pending_lo[n] = 0;
+
+ for (n = 0; n < IPIPE_NR_IRQS; n++)
+ ipd->cpudata[cpuid].irq_hits[n] = 0;
+ }
+
+ for (n = 0; n < IPIPE_NR_IRQS; n++) {
+ ipd->irqs[n].acknowledge = NULL;
+ ipd->irqs[n].handler = NULL;
+ ipd->irqs[n].control = IPIPE_PASS_MASK; /* Pass but don't
handle */
+ }
+
+ for (n = 0; n < IPIPE_NR_EVENTS; n++)
+ ipd->evhand[n] = NULL;
+
+ ipd->evexcl = 0;
+
+#ifdef CONFIG_SMP
+ ipd->irqs[IPIPE_CRITICAL_IPI].acknowledge = &__ipipe_ack_system_irq;
+ ipd->irqs[IPIPE_CRITICAL_IPI].handler = &__ipipe_do_critical_sync;
+ /* Immediately handle in the current domain but *never* pass */
+ ipd->irqs[IPIPE_CRITICAL_IPI].control =
+ IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SYSTEM_MASK;
+#endif /* CONFIG_SMP */
+}
+
+void __ipipe_stall_root(void)
+{
+ ipipe_declare_cpuid;
+ unsigned long flags;
+
+ ipipe_get_cpu(flags); /* Care for migration. */
+
+ set_bit(IPIPE_STALL_FLAG, &ipipe_root_domain->cpudata[cpuid].status);
+
+#ifdef CONFIG_SMP
+ if (!__ipipe_pipeline_head_p(ipipe_root_domain))
+ ipipe_put_cpu(flags);
+#else /* CONFIG_SMP */
+ if (__ipipe_pipeline_head_p(ipipe_root_domain))
+ local_irq_disable_hw();
+#endif /* CONFIG_SMP */
+ ipipe_mark_domain_stall(ipipe_root_domain,cpuid);
+}
+
+void __ipipe_cleanup_domain(struct ipipe_domain *ipd)
+{
+ ipipe_unstall_pipeline_from(ipd);
+
+#ifdef CONFIG_SMP
+ {
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ while (ipd->cpudata[cpu].irq_pending_hi != 0)
+ cpu_relax();
+ }
+ }
+#endif /* CONFIG_SMP */
+}
+
+void __ipipe_unstall_root(void)
+{
+ ipipe_declare_cpuid;
+
+ local_irq_disable_hw();
+
+ ipipe_load_cpuid();
+
+ __clear_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);
+
+ ipipe_mark_domain_unstall(ipipe_root_domain, cpuid);
+
+ if (ipipe_root_domain->cpudata[cpuid].irq_pending_hi != 0)
+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
+
+ local_irq_enable_hw();
+}
+
+unsigned long __ipipe_test_root(void)
+{
+ unsigned long flags, s;
+ ipipe_declare_cpuid;
+
+ ipipe_get_cpu(flags); /* Care for migration. */
+ s = test_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);
+ ipipe_put_cpu(flags);
+
+ return s;
+}
+
+unsigned long __ipipe_test_and_stall_root(void)
+{
+ unsigned long flags, s;
+ ipipe_declare_cpuid;
+
+ ipipe_get_cpu(flags); /* Care for migration. */
+ s = test_and_set_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_domain->cpudata[cpuid].status);
+ ipipe_mark_domain_stall(ipipe_root_domain,cpuid);
+ ipipe_put_cpu(flags);
+
+ return s;
+}
+
+void fastcall __ipipe_restore_root(unsigned long flags)
+{
+ if (flags)
+ __ipipe_stall_root();
+ else
+ __ipipe_unstall_root();
+}
+
+/*
+ * ipipe_unstall_pipeline_from() -- Unstall the pipeline and
+ * synchronize pending interrupts for a given domain. See
+ * __ipipe_walk_pipeline() for more information.
+ */
+void fastcall ipipe_unstall_pipeline_from(struct ipipe_domain *ipd)
+{
+ struct ipipe_domain *this_domain;
+ struct list_head *pos;
+ unsigned long flags;
+ ipipe_declare_cpuid;
+
+ ipipe_lock_cpu(flags);
+
+ __clear_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
+
+ ipipe_mark_domain_unstall(ipd, cpuid);
+
+ this_domain = ipipe_percpu_domain[cpuid];
+
+ if (ipd == this_domain) {
+ if (ipd->cpudata[cpuid].irq_pending_hi != 0)
+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
+
+ goto release_cpu_and_exit;
+ }
+
+ list_for_each(pos, &__ipipe_pipeline) {
+
+ struct ipipe_domain *next_domain =
+ list_entry(pos, struct ipipe_domain, p_link);
+
+ if (test_bit(IPIPE_STALL_FLAG,
+ &next_domain->cpudata[cpuid].status))
+ break; /* Stalled stage -- do not go further. */
+
+ if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {
+
+ if (next_domain == this_domain)
+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
+ else {
+ __ipipe_switch_to(this_domain, next_domain,
+ cpuid);
+
+ ipipe_load_cpuid(); /* Processor might have
changed. */
+
+ if (this_domain->cpudata[cpuid].
+ irq_pending_hi != 0
+ && !test_bit(IPIPE_STALL_FLAG,
+ &this_domain->cpudata[cpuid].
+ status))
+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
+ }
+
+ break;
+ } else if (next_domain == this_domain)
+ break;
+ }
+
+release_cpu_and_exit:
+
+ if (__ipipe_pipeline_head_p(ipd))
+ local_irq_enable_hw();
+ else
+ ipipe_unlock_cpu(flags);
+}
+
+/*
+ * ipipe_suspend_domain() -- Suspend the current domain, switching to
+ * the next one which has pending work down the pipeline.
+ */
+void ipipe_suspend_domain(void)
+{
+ struct ipipe_domain *this_domain, *next_domain;
+ struct list_head *ln;
+ unsigned long flags;
+ ipipe_declare_cpuid;
+
+ ipipe_lock_cpu(flags);
+
+ this_domain = next_domain = ipipe_percpu_domain[cpuid];
+
+ __clear_bit(IPIPE_STALL_FLAG, &this_domain->cpudata[cpuid].status);
+
+ ipipe_mark_domain_unstall(this_domain, cpuid);
+
+ if (this_domain->cpudata[cpuid].irq_pending_hi != 0)
+ goto sync_stage;
+
+ for (;;) {
+ ln = next_domain->p_link.next;
+
+ if (ln == &__ipipe_pipeline)
+ break;
+
+ next_domain = list_entry(ln, struct ipipe_domain, p_link);
+
+ if (test_bit(IPIPE_STALL_FLAG,
+ &next_domain->cpudata[cpuid].status))
+ break;
+
+ if (next_domain->cpudata[cpuid].irq_pending_hi == 0)
+ continue;
+
+ ipipe_percpu_domain[cpuid] = next_domain;
+
+sync_stage:
+
+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
+
+ ipipe_load_cpuid(); /* Processor might have changed. */
+
+ if (ipipe_percpu_domain[cpuid] != next_domain)
+ /*
+ * Something has changed the current domain under our
+ * feet, recycling the register set; take note.
+ */
+ this_domain = ipipe_percpu_domain[cpuid];
+ }
+
+ ipipe_percpu_domain[cpuid] = this_domain;
+
+ ipipe_unlock_cpu(flags);
+}
+
+/* ipipe_alloc_virq() -- Allocate a pipelined virtual/soft interrupt.
+ * Virtual interrupts are handled in exactly the same way than their
+ * hw-generated counterparts wrt pipelining.
+ */
+unsigned ipipe_alloc_virq(void)
+{
+ unsigned long flags, irq = 0;
+ int ipos;
+
+ spin_lock_irqsave_hw(&__ipipe_pipelock, flags);
+
+ if (__ipipe_virtual_irq_map != ~0) {
+ ipos = ffz(__ipipe_virtual_irq_map);
+ set_bit(ipos, &__ipipe_virtual_irq_map);
+ irq = ipos + IPIPE_VIRQ_BASE;
+ }
+
+ spin_unlock_irqrestore_hw(&__ipipe_pipelock, flags);
+
+ return irq;
+}
+
+/* __ipipe_dispatch_event() -- Low-level event dispatcher. */
+
+int fastcall __ipipe_dispatch_event (unsigned event, void *data)
+{
+ struct ipipe_domain *start_domain, *this_domain, *next_domain;
+ struct list_head *pos, *npos;
+ unsigned long flags;
+ ipipe_declare_cpuid;
+ int propagate = 1;
+
+ ipipe_lock_cpu(flags);
+
+ start_domain = this_domain = ipipe_percpu_domain[cpuid];
+
+ list_for_each_safe(pos,npos,&__ipipe_pipeline) {
+
+ next_domain = list_entry(pos,struct ipipe_domain,p_link);
+
+ /*
+ * Note: Domain migration may occur while running
+ * event or interrupt handlers, in which case the
+ * current register set is going to be recycled for a
+ * different domain than the initiating one. We do
+ * care for that, always tracking the current domain
+ * descriptor upon return from those handlers.
+ */
+ if (next_domain->evhand[event] != NULL) {
+ ipipe_percpu_domain[cpuid] = next_domain;
+ ipipe_unlock_cpu(flags);
+ propagate =
!next_domain->evhand[event](event,start_domain,data);
+ ipipe_lock_cpu(flags);
+ if (ipipe_percpu_domain[cpuid] != next_domain)
+ this_domain = ipipe_percpu_domain[cpuid];
+ }
+
+ if (next_domain != ipipe_root_domain && /* NEVER sync the root
stage here. */
+ next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
+
!test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status)) {
+ ipipe_percpu_domain[cpuid] = next_domain;
+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
+ ipipe_load_cpuid();
+ if (ipipe_percpu_domain[cpuid] != next_domain)
+ this_domain = ipipe_percpu_domain[cpuid];
+ }
+
+ ipipe_percpu_domain[cpuid] = this_domain;
+
+ if (next_domain == this_domain || !propagate)
+ break;
+ }
+
+ ipipe_unlock_cpu(flags);
+
+ return !propagate;
+}
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/proc_fs.h>
+
+static struct proc_dir_entry *ipipe_proc_root;
+
+static int __ipipe_version_info_proc(char *page,
+ char **start,
+ off_t off, int count, int *eof, void *data)
+{
+ int len = sprintf(page, "%s\n", IPIPE_VERSION_STRING);
+
+ len -= off;
+
+ if (len <= off + count)
+ *eof = 1;
+
+ *start = page + off;
+
+ if(len > count)
+ len = count;
+
+ if(len < 0)
+ len = 0;
+
+ return len;
+}
+
+static int __ipipe_common_info_proc(char *page,
+ char **start,
+ off_t off, int count, int *eof, void *data)
+{
+ struct ipipe_domain *ipd = (struct ipipe_domain *)data;
+ unsigned long ctlbits;
+ unsigned irq, _irq;
+ char *p = page;
+ int len;
+
+ spin_lock(&__ipipe_pipelock);
+
+ p += sprintf(p, "Priority=%d, Id=0x%.8x\n",
+ ipd->priority, ipd->domid);
+ irq = 0;
+
+ while (irq < IPIPE_NR_IRQS) {
+ ctlbits =
+ (ipd->irqs[irq].
+ control & (IPIPE_HANDLE_MASK | IPIPE_PASS_MASK |
+ IPIPE_STICKY_MASK));
+ if (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq)) {
+ /*
+ * There might be a hole between the last external
+ * IRQ and the first virtual one; skip it.
+ */
+ irq++;
+ continue;
+ }
+
+ if (ipipe_virtual_irq_p(irq)
+ && !test_bit(irq - IPIPE_VIRQ_BASE,
+ &__ipipe_virtual_irq_map)) {
+ /* Non-allocated virtual IRQ; skip it. */
+ irq++;
+ continue;
+ }
+
+ /*
+ * Attempt to group consecutive IRQ numbers having the
+ * same virtualization settings in a single line.
+ */
+
+ _irq = irq;
+
+ while (++_irq < IPIPE_NR_IRQS) {
+ if (ipipe_virtual_irq_p(_irq) !=
+ ipipe_virtual_irq_p(irq)
+ || (ipipe_virtual_irq_p(_irq)
+ && !test_bit(_irq - IPIPE_VIRQ_BASE,
+ &__ipipe_virtual_irq_map))
+ || ctlbits != (ipd->irqs[_irq].
+ control & (IPIPE_HANDLE_MASK |
+ IPIPE_PASS_MASK |
+ IPIPE_STICKY_MASK)))
+ break;
+ }
+
+ if (_irq == irq + 1)
+ p += sprintf(p, "irq%u: ", irq);
+ else
+ p += sprintf(p, "irq%u-%u: ", irq, _irq - 1);
+
+ /*
+ * Statuses are as follows:
+ * o "accepted" means handled _and_ passed down the pipeline.
+ * o "grabbed" means handled, but the interrupt might be
+ * terminated _or_ passed down the pipeline depending on
+ * what the domain handler asks for to the I-pipe.
+ * o "passed" means unhandled by the domain but passed
+ * down the pipeline.
+ * o "discarded" means unhandled and _not_ passed down the
+ * pipeline. The interrupt merely disappears from the
+ * current domain down to the end of the pipeline.
+ */
+ if (ctlbits & IPIPE_HANDLE_MASK) {
+ if (ctlbits & IPIPE_PASS_MASK)
+ p += sprintf(p, "accepted");
+ else
+ p += sprintf(p, "grabbed");
+ } else if (ctlbits & IPIPE_PASS_MASK)
+ p += sprintf(p, "passed");
+ else
+ p += sprintf(p, "discarded");
+
+ if (ctlbits & IPIPE_STICKY_MASK)
+ p += sprintf(p, ", sticky");
+
+ if (ipipe_virtual_irq_p(irq))
+ p += sprintf(p, ", virtual");
+
+ p += sprintf(p, "\n");
+
+ irq = _irq;
+ }
+
+ spin_unlock(&__ipipe_pipelock);
+
+ len = p - page;
+
+ if (len <= off + count)
+ *eof = 1;
+
+ *start = page + off;
+
+ len -= off;
+
+ if (len > count)
+ len = count;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+
+#ifdef CONFIG_IPIPE_STATS
+
+static int __ipipe_stat_info_proc(char *page,
+ char **start,
+ off_t off, int count, int *eof, void *data)
+{
+ struct ipipe_domain *ipd = (struct ipipe_domain *)data;
+ int len = 0, cpu, irq;
+ char *p = page;
+
+ p += sprintf(p,"> STALL TIME:\n");
+
+ for_each_online_cpu(cpu) {
+ unsigned long eip = ipd->stats[cpu].max_stall_eip;
+ char namebuf[KSYM_NAME_LEN+1];
+ unsigned long offset, size, t;
+ const char *name;
+ char *modname;
+
+ name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf);
+ t = ipipe_tsc2ns(ipd->stats[cpu].max_stall_time);
+
+ if (name) {
+ if (modname)
+ p += sprintf(p,"CPU%d %12lu (%s+%#lx [%s])\n",
+ cpu,t,name,offset,modname);
+ else
+ p += sprintf(p,"CPU%d %12lu (%s+%#lx)\n",
+ cpu,t,name,offset);
+ }
+ else
+ p += sprintf(p,"CPU%d %12lu (%lx)\n",
+ cpu,t,eip);
+ }
+
+ p += sprintf(p,"> PROPAGATION TIME:\nIRQ");
+
+ for_each_online_cpu(cpu) {
+ p += sprintf(p," CPU%d",cpu);
+ }
+
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++) {
+
+ unsigned long long t = 0;
+
+ for_each_online_cpu(cpu) {
+ t += ipd->stats[cpu].irq_stats[irq].max_delivery_time;
+ }
+
+ if (!t)
+ continue;
+
+ p += sprintf(p,"\n%3d:",irq);
+
+ for_each_online_cpu(cpu) {
+ p += sprintf(p,"%13lu",
+
ipipe_tsc2ns(ipd->stats[cpu].irq_stats[irq].max_delivery_time));
+ }
+ }
+
+ p += sprintf(p,"\n");
+
+ len = p - page - off;
+ if (len <= off + count) *eof = 1;
+ *start = page + off;
+ if (len > count) len = count;
+ if (len < 0) len = 0;
+
+ return len;
+}
+
+#endif /* CONFIG_IPIPE_STATS */
+
+void __ipipe_add_domain_proc(struct ipipe_domain *ipd)
+{
+
+
create_proc_read_entry(ipd->name,0444,ipipe_proc_root,&__ipipe_common_info_proc,ipd);
+#ifdef CONFIG_IPIPE_STATS
+ {
+ char name[64];
+ snprintf(name,sizeof(name),"%s_stats",ipd->name);
+
create_proc_read_entry(name,0444,ipipe_proc_root,&__ipipe_stat_info_proc,ipd);
+ }
+#endif /* CONFIG_IPIPE_STATS */
+}
+
+void __ipipe_remove_domain_proc(struct ipipe_domain *ipd)
+{
+ remove_proc_entry(ipd->name,ipipe_proc_root);
+#ifdef CONFIG_IPIPE_STATS
+ {
+ char name[64];
+ snprintf(name,sizeof(name),"%s_stats",ipd->name);
+ remove_proc_entry(name,ipipe_proc_root);
+ }
+#endif /* CONFIG_IPIPE_STATS */
+}
+
+void ipipe_init_proc(void)
+{
+ ipipe_proc_root = create_proc_entry("ipipe",S_IFDIR, 0);
+
create_proc_read_entry("version",0444,ipipe_proc_root,&__ipipe_version_info_proc,NULL);
+ __ipipe_add_domain_proc(ipipe_root_domain);
+}
+
+#endif /* CONFIG_PROC_FS */
+
+EXPORT_SYMBOL(ipipe_suspend_domain);
+EXPORT_SYMBOL(ipipe_alloc_virq);
+EXPORT_SYMBOL(ipipe_unstall_pipeline_from);
+EXPORT_SYMBOL(ipipe_percpu_domain);
+EXPORT_SYMBOL(ipipe_root_domain);
+EXPORT_SYMBOL(__ipipe_unstall_root);
+EXPORT_SYMBOL(__ipipe_stall_root);
+EXPORT_SYMBOL(__ipipe_restore_root);
+EXPORT_SYMBOL(__ipipe_test_and_stall_root);
+EXPORT_SYMBOL(__ipipe_test_root);
+EXPORT_SYMBOL(__ipipe_dispatch_event);
+EXPORT_SYMBOL(__ipipe_pipeline);
+EXPORT_SYMBOL(__ipipe_pipelock);
+EXPORT_SYMBOL(__ipipe_virtual_irq_map);
diff -Nru linux-2.6.14/kernel/ipipe/generic.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/ipipe/generic.c
--- linux-2.6.14/kernel/ipipe/generic.c 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/ipipe/generic.c
2005-12-04 12:35:49.000000000 +0200
@@ -0,0 +1,392 @@
+/* -*- linux-c -*-
+ * linux/kernel/ipipe/generic.c
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-independent I-PIPE services.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif /* CONFIG_PROC_FS */
+
+MODULE_DESCRIPTION("I-pipe");
+MODULE_LICENSE("GPL");
+
+static int __ipipe_ptd_key_count;
+
+static unsigned long __ipipe_ptd_key_map;
+
+/* ipipe_register_domain() -- Link a new domain to the pipeline. */
+
+int ipipe_register_domain(struct ipipe_domain *ipd,
+ struct ipipe_domain_attr *attr)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ if (ipipe_current_domain != ipipe_root_domain) {
+ printk(KERN_WARNING
+ "I-pipe: Only the root domain may register a new
domain.\n");
+ return -EPERM;
+ }
+
+ flags = ipipe_critical_enter(NULL);
+
+ list_for_each(pos, &__ipipe_pipeline) {
+ struct ipipe_domain *_ipd =
+ list_entry(pos, struct ipipe_domain, p_link);
+ if (_ipd->domid == attr->domid)
+ break;
+ }
+
+ ipipe_critical_exit(flags);
+
+ if (pos != &__ipipe_pipeline)
+ /* A domain with the given id already exists -- fail. */
+ return -EBUSY;
+
+ ipd->name = attr->name;
+ ipd->priority = attr->priority;
+ ipd->domid = attr->domid;
+ ipd->pdd = attr->pdd;
+ ipd->flags = 0;
+
+#ifdef CONFIG_IPIPE_STATS
+ {
+ int cpu, irq;
+ for_each_online_cpu(cpu) {
+ ipd->stats[cpu].last_stall_date = 0LL;
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
+
ipd->stats[cpu].irq_stats[irq].last_receipt_date = 0LL;
+ }
+ }
+#endif /* CONFIG_IPIPE_STATS */
+
+ __ipipe_init_stage(ipd);
+
+ INIT_LIST_HEAD(&ipd->p_link);
+
+#ifdef CONFIG_PROC_FS
+ __ipipe_add_domain_proc(ipd);
+#endif /* CONFIG_PROC_FS */
+
+ flags = ipipe_critical_enter(NULL);
+
+ list_for_each(pos, &__ipipe_pipeline) {
+ struct ipipe_domain *_ipd =
+ list_entry(pos, struct ipipe_domain, p_link);
+ if (ipd->priority > _ipd->priority)
+ break;
+ }
+
+ list_add_tail(&ipd->p_link, pos);
+
+ ipipe_critical_exit(flags);
+
+ printk(KERN_WARNING "I-pipe: Domain %s registered.\n", ipd->name);
+
+ /*
+ * Finally, allow the new domain to perform its initialization
+ * chores.
+ */
+
+ if (attr->entry != NULL) {
+ ipipe_declare_cpuid;
+
+ ipipe_lock_cpu(flags);
+
+ ipipe_percpu_domain[cpuid] = ipd;
+ attr->entry();
+ ipipe_percpu_domain[cpuid] = ipipe_root_domain;
+
+ ipipe_load_cpuid(); /* Processor might have changed. */
+
+ if (ipipe_root_domain->cpudata[cpuid].irq_pending_hi != 0 &&
+ !test_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_domain->cpudata[cpuid].status))
+ __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
+
+ ipipe_unlock_cpu(flags);
+ }
+
+ return 0;
+}
+
+/* ipipe_unregister_domain() -- Remove a domain from the pipeline. */
+
+int ipipe_unregister_domain(struct ipipe_domain *ipd)
+{
+ unsigned long flags;
+
+ if (ipipe_current_domain != ipipe_root_domain) {
+ printk(KERN_WARNING
+ "I-pipe: Only the root domain may unregister a
domain.\n");
+ return -EPERM;
+ }
+
+ if (ipd == ipipe_root_domain) {
+ printk(KERN_WARNING
+ "I-pipe: Cannot unregister the root domain.\n");
+ return -EPERM;
+ }
+#ifdef CONFIG_SMP
+ {
+ int nr_cpus = num_online_cpus(), _cpuid;
+ unsigned irq;
+
+ /*
+ * In the SMP case, wait for the logged events to drain on
+ * other processors before eventually removing the domain
+ * from the pipeline.
+ */
+
+ ipipe_unstall_pipeline_from(ipd);
+
+ flags = ipipe_critical_enter(NULL);
+
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++) {
+ clear_bit(IPIPE_HANDLE_FLAG, &ipd->irqs[irq].control);
+ clear_bit(IPIPE_STICKY_FLAG, &ipd->irqs[irq].control);
+ set_bit(IPIPE_PASS_FLAG, &ipd->irqs[irq].control);
+ }
+
+ ipipe_critical_exit(flags);
+
+ for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
+ while (ipd->cpudata[_cpuid].irq_hits[irq] > 0)
+ cpu_relax();
+ }
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PROC_FS
+ __ipipe_remove_domain_proc(ipd);
+#endif /* CONFIG_PROC_FS */
+
+ /*
+ * Simply remove the domain from the pipeline and we are almost done.
+ */
+
+ flags = ipipe_critical_enter(NULL);
+ list_del_init(&ipd->p_link);
+ ipipe_critical_exit(flags);
+
+ __ipipe_cleanup_domain(ipd);
+
+ printk(KERN_WARNING "I-pipe: Domain %s unregistered.\n", ipd->name);
+
+ return 0;
+}
+
+/*
+ * ipipe_propagate_irq() -- Force a given IRQ propagation on behalf of
+ * a running interrupt handler to the next domain down the pipeline.
+ * ipipe_schedule_irq() -- Does almost the same as above, but attempts
+ * to pend the interrupt for the current domain first.
+ */
+int fastcall __ipipe_schedule_irq(unsigned irq, struct list_head *head)
+{
+ struct list_head *ln;
+ unsigned long flags;
+ ipipe_declare_cpuid;
+
+ if (irq >= IPIPE_NR_IRQS ||
+ (ipipe_virtual_irq_p(irq)
+ && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
+ return -EINVAL;
+
+ ipipe_lock_cpu(flags);
+
+ ln = head;
+
+ while (ln != &__ipipe_pipeline) {
+ struct ipipe_domain *ipd =
+ list_entry(ln, struct ipipe_domain, p_link);
+
+ if (test_bit(IPIPE_HANDLE_FLAG, &ipd->irqs[irq].control)) {
+ ipd->cpudata[cpuid].irq_hits[irq]++;
+ __ipipe_set_irq_bit(ipd, cpuid, irq);
+ ipipe_mark_irq_receipt(ipd, irq, cpuid);
+ ipipe_unlock_cpu(flags);
+ return 1;
+ }
+
+ ln = ipd->p_link.next;
+ }
+
+ ipipe_unlock_cpu(flags);
+
+ return 0;
+}
+
+/* ipipe_free_virq() -- Release a virtual/soft interrupt. */
+
+int ipipe_free_virq(unsigned virq)
+{
+ if (!ipipe_virtual_irq_p(virq))
+ return -EINVAL;
+
+ clear_bit(virq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map);
+
+ return 0;
+}
+
+void ipipe_init_attr(struct ipipe_domain_attr *attr)
+{
+ attr->name = "anon";
+ attr->domid = 1;
+ attr->entry = NULL;
+ attr->priority = IPIPE_ROOT_PRIO;
+ attr->pdd = NULL;
+}
+
+/*
+ * ipipe_catch_event() -- Interpose or remove an event handler for a
+ * given domain.
+ */
+int ipipe_catch_event(struct ipipe_domain *ipd,
+ unsigned event,
+ int (*handler)(unsigned event, struct ipipe_domain *ipd,
void *data))
+{
+ if (event >= IPIPE_NR_EVENTS)
+ return -EINVAL;
+
+ if (!xchg(&ipd->evhand[event],handler)) {
+ if (handler)
+ __ipipe_event_monitors[event]++;
+ }
+ else if (!handler)
+ __ipipe_event_monitors[event]--;
+
+ return 0;
+}
+
+cpumask_t ipipe_set_irq_affinity (unsigned irq, cpumask_t cpumask)
+{
+#ifdef CONFIG_SMP
+ if (irq >= IPIPE_NR_XIRQS)
+ /* Allow changing affinity of external IRQs only. */
+ return CPU_MASK_NONE;
+
+ if (num_online_cpus() > 1)
+ /* Allow changing affinity of external IRQs only. */
+ return __ipipe_set_irq_affinity(irq,cpumask);
+#endif /* CONFIG_SMP */
+
+ return CPU_MASK_NONE;
+}
+
+int fastcall ipipe_send_ipi (unsigned ipi, cpumask_t cpumask)
+
+{
+#ifdef CONFIG_SMP
+ switch (ipi) {
+
+ case IPIPE_SERVICE_IPI0:
+ case IPIPE_SERVICE_IPI1:
+ case IPIPE_SERVICE_IPI2:
+ case IPIPE_SERVICE_IPI3:
+#ifdef IPIPE_SERVICE_IPI4
+ case IPIPE_SERVICE_IPI4:
+#endif /* IPIPE_SERVICE_IPI4 */
+ break;
+
+ default:
+
+ return -EINVAL;
+ }
+
+ return __ipipe_send_ipi(ipi,cpumask);
+#endif /* CONFIG_SMP */
+
+ return -EINVAL;
+}
+
+int ipipe_alloc_ptdkey (void)
+{
+ unsigned long flags;
+ int key = -1;
+
+ spin_lock_irqsave_hw(&__ipipe_pipelock,flags);
+
+ if (__ipipe_ptd_key_count < IPIPE_ROOT_NPTDKEYS) {
+ key = ffz(__ipipe_ptd_key_map);
+ set_bit(key,&__ipipe_ptd_key_map);
+ __ipipe_ptd_key_count++;
+ }
+
+ spin_unlock_irqrestore_hw(&__ipipe_pipelock,flags);
+
+ return key;
+}
+
+int ipipe_free_ptdkey (int key)
+{
+ unsigned long flags;
+
+ if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
+ return -EINVAL;
+
+ spin_lock_irqsave_hw(&__ipipe_pipelock,flags);
+
+ if (test_and_clear_bit(key,&__ipipe_ptd_key_map))
+ __ipipe_ptd_key_count--;
+
+ spin_unlock_irqrestore_hw(&__ipipe_pipelock,flags);
+
+ return 0;
+}
+
+int fastcall ipipe_set_ptd (int key, void *value)
+
+{
+ if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
+ return -EINVAL;
+
+ current->ptd[key] = value;
+
+ return 0;
+}
+
+void fastcall *ipipe_get_ptd (int key)
+
+{
+ if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
+ return NULL;
+
+ return current->ptd[key];
+}
+
+EXPORT_SYMBOL(ipipe_register_domain);
+EXPORT_SYMBOL(ipipe_unregister_domain);
+EXPORT_SYMBOL(ipipe_free_virq);
+EXPORT_SYMBOL(ipipe_init_attr);
+EXPORT_SYMBOL(ipipe_catch_event);
+EXPORT_SYMBOL(ipipe_alloc_ptdkey);
+EXPORT_SYMBOL(ipipe_free_ptdkey);
+EXPORT_SYMBOL(ipipe_set_ptd);
+EXPORT_SYMBOL(ipipe_get_ptd);
+EXPORT_SYMBOL(ipipe_set_irq_affinity);
+EXPORT_SYMBOL(ipipe_send_ipi);
+EXPORT_SYMBOL(__ipipe_schedule_irq);
diff -Nru linux-2.6.14/kernel/ipipe/Kconfig
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/ipipe/Kconfig
--- linux-2.6.14/kernel/ipipe/Kconfig 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/ipipe/Kconfig
2005-11-04 08:53:51.000000000 +0200
@@ -0,0 +1,18 @@
+config IPIPE
+ bool "Interrupt pipeline"
+ default y
+ ---help---
+ Activate this option if you want the interrupt pipeline to be
+ compiled in.
+
+config IPIPE_STATS
+ bool "Collect statistics"
+ depends on IPIPE
+ default n
+ ---help---
+ Activate this option if you want runtime statistics to be collected
+ while the I-pipe is operating. This option adds a small overhead, but
+ is useful to detect unexpected latency points.
+
+config IPIPE_EXTENDED
+ def_bool IPIPE
diff -Nru linux-2.6.14/kernel/ipipe/Makefile
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/ipipe/Makefile
--- linux-2.6.14/kernel/ipipe/Makefile 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/ipipe/Makefile
2005-11-04 08:53:51.000000000 +0200
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_IPIPE) += core.o generic.o
diff -Nru linux-2.6.14/kernel/irq/handle.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/irq/handle.c
--- linux-2.6.14/kernel/irq/handle.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/irq/handle.c
2005-11-04 12:15:19.000000000 +0200
@@ -81,6 +81,17 @@
{
int ret, retval = 0, status = 0;
+#ifdef CONFIG_IPIPE
+ /*
+ * If processing a timer tick, pass the original regs as
+ * collected during preemption and not our phony - always
+ * kernel-originated - frame, so that we don't wreck the
+ * profiling code.
+ */
+ if (__ipipe_tick_irq == irq)
+ regs = __ipipe_tick_regs + smp_processor_id();
+#endif /* CONFIG_IPIPE */
+
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
@@ -117,14 +128,18 @@
/*
* No locking required for CPU-local interrupts:
*/
+#ifndef CONFIG_IPIPE
desc->handler->ack(irq);
+#endif /* CONFIG_IPIPE */
action_ret = handle_IRQ_event(irq, regs, desc->action);
desc->handler->end(irq);
return 1;
}
spin_lock(&desc->lock);
+#ifndef CONFIG_IPIPE
desc->handler->ack(irq);
+#endif /* CONFIG_IPIPE */
/*
* REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested
diff -Nru linux-2.6.14/kernel/Makefile
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/Makefile
--- linux-2.6.14/kernel/Makefile 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/Makefile 2005-11-04
12:14:18.000000000 +0200
@@ -32,6 +32,7 @@
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_SECCOMP) += seccomp.o
+obj-$(CONFIG_IPIPE) += ipipe/
ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
# According to Alan Modra <[EMAIL PROTECTED]>, the -fno-omit-frame-pointer is
diff -Nru linux-2.6.14/kernel/printk.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/printk.c
--- linux-2.6.14/kernel/printk.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/printk.c 2005-12-06
12:24:58.000000000 +0200
@@ -507,6 +507,78 @@
* is inspected when the actual printing occurs.
*/
+#ifdef CONFIG_IPIPE
+
+static ipipe_spinlock_t __ipipe_printk_lock = IPIPE_SPIN_LOCK_UNLOCKED;
+
+static int __ipipe_printk_fill;
+
+static char __ipipe_printk_buf[__LOG_BUF_LEN];
+
+void __ipipe_flush_printk (unsigned virq)
+{
+ char *p = __ipipe_printk_buf;
+ int len, lmax, out = 0;
+ unsigned long flags;
+
+ goto start;
+
+ do {
+ spin_unlock_irqrestore_hw(&__ipipe_printk_lock,flags);
+ start:
+ lmax = __ipipe_printk_fill;
+ while (out < lmax) {
+ len = strlen(p) + 1;
+ printk("%s",p);
+ p += len;
+ out += len;
+ }
+ spin_lock_irqsave_hw(&__ipipe_printk_lock,flags);
+ }
+ while (__ipipe_printk_fill != lmax);
+
+ __ipipe_printk_fill = 0;
+
+ spin_unlock_irqrestore_hw(&__ipipe_printk_lock,flags);
+}
+
+asmlinkage int printk(const char *fmt, ...)
+{
+ int r, fbytes, oldcount;
+ unsigned long flags;
+ va_list args;
+
+ va_start(args, fmt);
+
+ if (ipipe_current_domain == ipipe_root_domain ||
+ test_bit(IPIPE_SPRINTK_FLAG,&ipipe_current_domain->flags) ||
+ oops_in_progress) {
+ r = vprintk(fmt, args);
+ goto out;
+ }
+
+ spin_lock_irqsave_hw(&__ipipe_printk_lock,flags);
+
+ oldcount = __ipipe_printk_fill;
+ fbytes = __LOG_BUF_LEN - oldcount;
+
+ if (fbytes > 1) {
+ r = vscnprintf(__ipipe_printk_buf + __ipipe_printk_fill,
+ fbytes, fmt, args) + 1; /* account for the null
byte */
+ __ipipe_printk_fill += r;
+ } else
+ r = 0;
+
+ spin_unlock_irqrestore_hw(&__ipipe_printk_lock,flags);
+
+ if (oldcount == 0)
+ ipipe_trigger_irq(__ipipe_printk_virq);
+out:
+ va_end(args);
+
+ return r;
+}
+#else /* !CONFIG_IPIPE */
asmlinkage int printk(const char *fmt, ...)
{
va_list args;
@@ -518,6 +590,7 @@
return r;
}
+#endif /* CONFIG_IPIPE */
/* cpu currently holding logbuf_lock */
static volatile unsigned int printk_cpu = UINT_MAX;
diff -Nru linux-2.6.14/kernel/sched.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/sched.c
--- linux-2.6.14/kernel/sched.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/sched.c 2005-11-04
12:16:21.000000000 +0200
@@ -3010,6 +3010,8 @@
prepare_task_switch(rq, next);
prev = context_switch(rq, prev, next);
barrier();
+ if (task_hijacked(prev))
+ return;
/*
* this_rq must be evaluated again because prev may have moved
* CPUs since it called schedule(), thus the 'rq' on its stack
@@ -3042,6 +3044,11 @@
struct task_struct *task = current;
int saved_lock_depth;
#endif
+#ifdef CONFIG_IPIPE
+ /* Do not reschedule over non-Linux domains. */
+ if (ipipe_current_domain != ipipe_root_domain)
+ return;
+#endif /* CONFIG_IPIPE */
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
@@ -3670,6 +3677,7 @@
deactivate_task(p, rq);
oldprio = p->prio;
__setscheduler(p, policy, param->sched_priority);
+ ipipe_setsched_notify(p);
if (array) {
__activate_task(p, rq);
/*
@@ -5647,3 +5655,53 @@
}
#endif
+
+#ifdef CONFIG_IPIPE
+
+int ipipe_setscheduler_root (struct task_struct *p, int policy, int prio)
+{
+ prio_array_t *array;
+ unsigned long flags;
+ runqueue_t *rq;
+ int oldprio;
+
+ if (prio < 1 || prio > MAX_RT_PRIO-1)
+ return -EINVAL;
+
+ rq = task_rq_lock(p, &flags);
+ array = p->array;
+ if (array)
+ deactivate_task(p, rq);
+ oldprio = p->prio;
+ __setscheduler(p, policy, prio);
+ if (array) {
+ __activate_task(p, rq);
+ if (task_running(rq, p)) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else if (TASK_PREEMPTS_CURR(p, rq))
+ resched_task(rq->curr);
+ }
+ task_rq_unlock(rq, &flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(ipipe_setscheduler_root);
+
+int ipipe_reenter_root (struct task_struct *prev, int policy, int prio)
+{
+ finish_task_switch(this_rq(), prev);
+ if (reacquire_kernel_lock(current) < 0)
+ ;
+ preempt_enable_no_resched();
+
+ if (current->policy != policy || current->rt_priority != prio)
+ return ipipe_setscheduler_root(current,policy,prio);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(ipipe_reenter_root);
+
+#endif /* CONFIG_IPIPE */
diff -Nru linux-2.6.14/kernel/signal.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/signal.c
--- linux-2.6.14/kernel/signal.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/kernel/signal.c 2005-11-04
12:16:34.000000000 +0200
@@ -601,6 +601,7 @@
unsigned int mask;
set_tsk_thread_flag(t, TIF_SIGPENDING);
+ ipipe_sigwake_notify(t); /* TIF_SIGPENDING must be set first. */
/*
* For SIGKILL, we want to wake it up in the stopped/traced case.
diff -Nru linux-2.6.14/lib/smp_processor_id.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/lib/smp_processor_id.c
--- linux-2.6.14/lib/smp_processor_id.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/lib/smp_processor_id.c
2005-11-04 12:16:53.000000000 +0200
@@ -12,6 +12,11 @@
int this_cpu = raw_smp_processor_id();
cpumask_t this_mask;
+#ifdef CONFIG_IPIPE
+ if (ipipe_current_domain != ipipe_root_domain)
+ return this_cpu;
+#endif /* CONFIG_IPIPE */
+
if (likely(preempt_count))
goto out;
diff -Nru linux-2.6.14/lib/spinlock_debug.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/lib/spinlock_debug.c
--- linux-2.6.14/lib/spinlock_debug.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/lib/spinlock_debug.c
2005-11-04 12:17:20.000000000 +0200
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/module.h>
static void spin_bug(spinlock_t *lock, const char *msg)
{
@@ -93,6 +94,8 @@
debug_spin_lock_after(lock);
}
+EXPORT_SYMBOL(_raw_spin_lock);
+
int _raw_spin_trylock(spinlock_t *lock)
{
int ret = __raw_spin_trylock(&lock->raw_lock);
@@ -108,12 +111,16 @@
return ret;
}
+EXPORT_SYMBOL(_raw_spin_trylock);
+
void _raw_spin_unlock(spinlock_t *lock)
{
debug_spin_unlock(lock);
__raw_spin_unlock(&lock->raw_lock);
}
+EXPORT_SYMBOL(_raw_spin_unlock);
+
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
static long print_once = 1;
@@ -162,6 +169,8 @@
__read_lock_debug(lock);
}
+EXPORT_SYMBOL(_raw_read_lock);
+
int _raw_read_trylock(rwlock_t *lock)
{
int ret = __raw_read_trylock(&lock->raw_lock);
@@ -175,12 +184,16 @@
return ret;
}
+EXPORT_SYMBOL(_raw_read_trylock);
+
void _raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
__raw_read_unlock(&lock->raw_lock);
}
+EXPORT_SYMBOL(_raw_read_unlock);
+
static inline void debug_write_lock_before(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
@@ -235,6 +248,8 @@
debug_write_lock_after(lock);
}
+EXPORT_SYMBOL(_raw_write_lock);
+
int _raw_write_trylock(rwlock_t *lock)
{
int ret = __raw_write_trylock(&lock->raw_lock);
@@ -250,8 +265,12 @@
return ret;
}
+EXPORT_SYMBOL(_raw_write_trylock);
+
void _raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
__raw_write_unlock(&lock->raw_lock);
}
+
+EXPORT_SYMBOL(_raw_write_unlock);
diff -Nru linux-2.6.14/mm/vmalloc.c
linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/mm/vmalloc.c
--- linux-2.6.14/mm/vmalloc.c 2005-10-28 03:02:08.000000000 +0300
+++ linux-2.6.14-adeos-ipipe-ppc64-0.9-02-dev/mm/vmalloc.c 2005-11-04
12:17:51.000000000 +0200
@@ -18,6 +18,7 @@
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
DEFINE_RWLOCK(vmlist_lock);
@@ -148,10 +149,14 @@
pgd = pgd_offset_k(addr);
spin_lock(&init_mm.page_table_lock);
do {
+ pgd_t oldpgd;
+ memcpy(&oldpgd,pgd,sizeof(pgd_t));
next = pgd_addr_end(addr, end);
err = vmap_pud_range(pgd, addr, next, prot, pages);
if (err)
break;
+ if (pgd_val(oldpgd) != pgd_val(*pgd))
+ set_pgdir(addr, *pgd);
} while (pgd++, addr = next, addr != end);
spin_unlock(&init_mm.page_table_lock);
flush_cache_vmap((unsigned long) area->addr, end);