From: Wolfgang Grandegger <[email protected]>

The IBM-EMACS driver requires a separate Linux kernel patch implementing
a common real-time capable Memory Access Layer (MAL) and providing
direct access to the PHY interface from the corresponding Linux driver.

Signed-off-by: Wolfgang Grandegger <[email protected]>
---
 drivers/ibm_newemac/README                         |   32 +
 .../ibm_newemac/linux-2.6.36.4-rtdm-ibm-emac.patch |  875 ++++++++++++++++++++
 .../ibm_newemac/linux-3.0.4-rtdm-ibm-emac.patch    |  875 ++++++++++++++++++++
 3 files changed, 1782 insertions(+), 0 deletions(-)
 create mode 100644 drivers/ibm_newemac/README
 create mode 100644 drivers/ibm_newemac/linux-2.6.36.4-rtdm-ibm-emac.patch
 create mode 100644 drivers/ibm_newemac/linux-3.0.4-rtdm-ibm-emac.patch

diff --git a/drivers/ibm_newemac/README b/drivers/ibm_newemac/README
new file mode 100644
index 0000000..99e991e
--- /dev/null
+++ b/drivers/ibm_newemac/README
@@ -0,0 +1,32 @@
+This RTnet driver is for the EMAC Ethernet controllers on AMCC 4xx
+processors. It requires a *separate* Linux kernel patch to provide a
+real-time capable memory access layer (MAL). See chapter "Technical
+comments" below for further information. At the time of writing, the
+following patches are available in this directory:
+
+- linux-2.6.36.4-rtdm-ibm-emac.patch
+- linux-3.0.4-rtdm-ibm-emac.patch
+
+They also fix a cleanup issue to allow re-binding of the driver. After
+applying the patch, you need to enable "CONFIG_IBM_NEW_EMAC_MAL_RTDM".
+If you use RTnet and Linux networking drivers concurrently on different
+EMAC controllers, you need to unbind the Linux driver from the device
+before loading the RTnet EMAC driver:
+
+# echo "1ef600f00.ethernet" > \
+     /sys/bus/platform/devices/1ef600f00.ethernet/driver/unbind
+# insmod rt_ibm_emac.ko
+
+Technical comments:
+------------------
+
+The two IBM-EMAC on the AMCC 44x processors share a common memory
+access layer (MAL) for transferring packages from the EMAC to the
+memory and vise versa. Unfortunately, there is only one interrupt
+line for RX and TX, which requires handling these interrupts in the
+real-time context. This means, that the two EMACs do influence each
+other to some extend, e.g. heavy traffic on the EMAC used for Linux
+networking does influence the other EMAC used for RTnet traffic.
+So far, I did not see real problems. The maximum round-trip time
+measured with "rtt-sender <-> rtt-responder" did not increase 250us,
+even under heavy packet storms provoked on the EMAC used for Linux.
diff --git a/drivers/ibm_newemac/linux-2.6.36.4-rtdm-ibm-emac.patch 
b/drivers/ibm_newemac/linux-2.6.36.4-rtdm-ibm-emac.patch
new file mode 100644
index 0000000..bafc780
--- /dev/null
+++ b/drivers/ibm_newemac/linux-2.6.36.4-rtdm-ibm-emac.patch
@@ -0,0 +1,875 @@
+From 7b2172814c8a98d44f963159063707a391c74085 Mon Sep 17 00:00:00 2001
+From: Wolfgang Grandegger <[email protected]>
+Date: Thu, 17 Nov 2011 13:11:31 +0100
+Subject: [PATCH] net/ibm_newemac: provide real-time capable RTDM MAL driver
+
+Signed-off-by: Wolfgang Grandegger <[email protected]>
+---
+ drivers/net/ibm_newemac/Kconfig  |    5 +
+ drivers/net/ibm_newemac/Makefile |    4 +
+ drivers/net/ibm_newemac/core.c   |    5 +
+ drivers/net/ibm_newemac/mal.c    |  358 ++++++++++++++++++++++++++++++++++---
+ drivers/net/ibm_newemac/mal.h    |   27 +++
+ drivers/net/ibm_newemac/phy.c    |    7 +
+ drivers/net/ibm_newemac/rgmii.c  |    8 +
+ drivers/net/ibm_newemac/zmii.c   |    9 +
+ 8 files changed, 394 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig
+index 78a1628..b5696e1 100644
+--- a/drivers/net/ibm_newemac/Kconfig
++++ b/drivers/net/ibm_newemac/Kconfig
+@@ -39,6 +39,11 @@ config IBM_NEW_EMAC_RX_SKB_HEADROOM
+
+         If unsure, set to 0.
+
++config IBM_NEW_EMAC_MAL_RTDM
++      bool "Real-time MAL"
++      depends on IBM_NEW_EMAC && XENO_SKIN_RTDM
++      default n
++
+ config IBM_NEW_EMAC_DEBUG
+       bool "Debugging"
+       depends on IBM_NEW_EMAC
+diff --git a/drivers/net/ibm_newemac/Makefile 
b/drivers/net/ibm_newemac/Makefile
+index 0b5c995..ab82084 100644
+--- a/drivers/net/ibm_newemac/Makefile
++++ b/drivers/net/ibm_newemac/Makefile
+@@ -2,6 +2,10 @@
+ # Makefile for the PowerPC 4xx on-chip ethernet driver
+ #
+
++ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXTRA_CFLAGS += -D__IN_XENOMAI__ -Iinclude/xenomai
++endif
++
+ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac.o
+
+ ibm_newemac-y := mal.o core.o phy.o
+diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
+index 519e19e..55136b8 100644
+--- a/drivers/net/ibm_newemac/core.c
++++ b/drivers/net/ibm_newemac/core.c
+@@ -2960,6 +2960,9 @@ static int __devexit emac_remove(struct platform_device 
*ofdev)
+       if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+               zmii_detach(dev->zmii_dev, dev->zmii_port);
+
++      busy_phy_map &= ~(1 << dev->phy.address);
++      DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
++
+       mal_unregister_commac(dev->mal, &dev->commac);
+       emac_put_deps(dev);
+
+@@ -3108,3 +3111,5 @@ static void __exit emac_exit(void)
+
+ module_init(emac_init);
+ module_exit(emac_exit);
++
++EXPORT_SYMBOL_GPL(busy_phy_map);
+diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
+index d5717e2..75f04b1 100644
+--- a/drivers/net/ibm_newemac/mal.c
++++ b/drivers/net/ibm_newemac/mal.c
+@@ -18,6 +18,10 @@
+  *      Armin Kuster <[email protected]>
+  *      Copyright 2002 MontaVista Softare Inc.
+  *
++ * Real-time extension required for the RTnet IBM EMAC driver
++ *
++ * Copyright 2011 Wolfgang Grandegger <[email protected]>
++ *
+  * This program is free software; you can redistribute  it and/or modify it
+  * under  the terms of  the GNU General  Public License as published by the
+  * Free Software Foundation;  either version 2 of the  License, or (at your
+@@ -31,6 +35,27 @@
+ #include "core.h"
+ #include <asm/dcr-regs.h>
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++#define MAL_IRQ_HANDLED RTDM_IRQ_HANDLED
++#define mal_spin_lock_irqsave(lock, flags) \
++      do { rtdm_lock_get_irqsave(lock, flags); } while (0)
++#define mal_spin_unlock_irqrestore(lock, flags) \
++      do { rtdm_lock_put_irqrestore(lock, flags); } while (0)
++#define mal_spin_lock_init(lock) \
++      do { rtdm_lock_init(lock); } while (0)
++static DEFINE_RTDM_RATELIMIT_STATE(mal_net_ratelimit_state, 5000000000LL, 10);
++#define mal_net_ratelimit() rtdm_ratelimit(&mal_net_ratelimit_state, __func__)
++#else
++#define MAL_IRQ_HANDLED IRQ_HANDLED
++#define mal_spin_lock_irqsave(lock, flags) \
++      do { spin_lock_irqsave(lock, flags);; } while (0)
++#define mal_spin_unlock_irqrestore(lock, flags) \
++      do { spin_unlock_irqrestore(lock, flags); } while (0)
++#define mal_spin_lock_init(lock) \
++      do { spin_lock_init(lock); } while (0)
++#define mal_net_ratelimit() net_ratelimit()
++#endif
++
+ static int mal_count;
+
+ int __devinit mal_register_commac(struct mal_instance *mal,
+@@ -38,27 +63,49 @@ int __devinit mal_register_commac(struct mal_instance      
*mal,
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
+-
++      mal_spin_lock_irqsave(&mal->lock, flags);
+       MAL_DBG(mal, "reg(%08x, %08x)" NL,
+               commac->tx_chan_mask, commac->rx_chan_mask);
+
+       /* Don't let multiple commacs claim the same channel(s) */
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (((mal->tx_chan_mask | mal->tx_chan_mask_rtdm) &
++           commac->tx_chan_mask) ||
++          ((mal->rx_chan_mask | mal->rx_chan_mask_rtdm) &
++           commac->rx_chan_mask)) {
++              mal_spin_unlock_irqrestore(&mal->lock, flags);
++              printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
++                     mal->index);
++              return -EBUSY;
++      }
++#else
+       if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
+           (mal->rx_chan_mask & commac->rx_chan_mask)) {
+-              spin_unlock_irqrestore(&mal->lock, flags);
++              mal_spin_unlock_irqrestore(&mal->lock, flags);
+               printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
+                      mal->index);
+               return -EBUSY;
+       }
++#endif
+
+       if (list_empty(&mal->list))
+               napi_enable(&mal->napi);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (commac->rtdm) {
++              mal->tx_chan_mask_rtdm |= commac->tx_chan_mask;
++              mal->rx_chan_mask_rtdm |= commac->rx_chan_mask;
++      } else {
++              mal->tx_chan_mask |= commac->tx_chan_mask;
++              mal->rx_chan_mask |= commac->rx_chan_mask;
++      }
++#else
+       mal->tx_chan_mask |= commac->tx_chan_mask;
+       mal->rx_chan_mask |= commac->rx_chan_mask;
++#endif
+       list_add(&commac->list, &mal->list);
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+
+       return 0;
+ }
+@@ -68,18 +115,28 @@ void mal_unregister_commac(struct mal_instance    *mal,
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "unreg(%08x, %08x)" NL,
+               commac->tx_chan_mask, commac->rx_chan_mask);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (commac->rtdm) {
++              mal->tx_chan_mask_rtdm &= ~commac->tx_chan_mask;
++              mal->rx_chan_mask_rtdm &= ~commac->rx_chan_mask;
++      } else {
++              mal->tx_chan_mask &= ~commac->tx_chan_mask;
++              mal->rx_chan_mask &= ~commac->rx_chan_mask;
++      }
++#else
+       mal->tx_chan_mask &= ~commac->tx_chan_mask;
+       mal->rx_chan_mask &= ~commac->rx_chan_mask;
++#endif
+       list_del_init(&commac->list);
+       if (list_empty(&mal->list))
+               napi_disable(&mal->napi);
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
+@@ -117,14 +174,14 @@ void mal_enable_tx_channel(struct mal_instance *mal, int 
channel)
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "enable_tx(%d)" NL, channel);
+
+       set_mal_dcrn(mal, MAL_TXCASR,
+                    get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ void mal_disable_tx_channel(struct mal_instance *mal, int channel)
+@@ -146,14 +203,14 @@ void mal_enable_rx_channel(struct mal_instance *mal, int 
channel)
+       if (!(channel % 8))
+               channel >>= 3;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "enable_rx(%d)" NL, channel);
+
+       set_mal_dcrn(mal, MAL_RXCASR,
+                    get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ void mal_disable_rx_channel(struct mal_instance *mal, int channel)
+@@ -175,29 +232,36 @@ void mal_poll_add(struct mal_instance *mal, struct 
mal_commac *commac)
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "poll_add(%p)" NL, commac);
+
+       /* starts disabled */
+       set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (commac->rtdm)
++              list_add_tail(&commac->poll_list, &mal->poll_list_rtdm);
++      else
++              list_add_tail(&commac->poll_list, &mal->poll_list);
++#else
+       list_add_tail(&commac->poll_list, &mal->poll_list);
++#endif
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "poll_del(%p)" NL, commac);
+
+       list_del(&commac->poll_list);
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ /* synchronized by mal_poll() */
+@@ -218,9 +282,18 @@ static inline void mal_disable_eob_irq(struct 
mal_instance *mal)
+       MAL_DBG2(mal, "disable_irq" NL);
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_serr(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_serr(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+
+       u32 esr = get_mal_dcrn(mal, MAL_ESR);
+
+@@ -234,51 +307,99 @@ static irqreturn_t mal_serr(int irq, void *dev_instance)
+                       /* We ignore Descriptor error,
+                        * TXDE or RXDE interrupt will be generated anyway.
+                        */
+-                      return IRQ_HANDLED;
++                      return MAL_IRQ_HANDLED;
+               }
+
+               if (esr & MAL_ESR_PEIN) {
+                       /* PLB error, it's probably buggy hardware or
+                        * incorrect physical address in BD (i.e. bug)
+                        */
+-                      if (net_ratelimit())
++                      if (mal_net_ratelimit())
+                               printk(KERN_ERR
+                                      "mal%d: system error, "
+                                      "PLB (ESR = 0x%08x)\n",
+                                      mal->index, esr);
+-                      return IRQ_HANDLED;
++                      return MAL_IRQ_HANDLED;
+               }
+
+               /* OPB error, it's probably buggy hardware or incorrect
+                * EBC setup
+                */
+-              if (net_ratelimit())
++              if (mal_net_ratelimit())
+                       printk(KERN_ERR
+                              "mal%d: system error, OPB (ESR = 0x%08x)\n",
+                              mal->index, esr);
+       }
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++void mal_schedule_poll_nrt(rtdm_nrtsig_t nrt_sig, void* data)
++{
++      struct mal_instance *mal = (struct mal_instance *)data;
++      unsigned long flags;
++
++      local_irq_save(flags);
++      if (likely(napi_schedule_prep(&mal->napi))) {
++              MAL_DBG2(mal, "schedule_poll" NL);
++              __napi_schedule(&mal->napi);
++      } else
++              MAL_DBG2(mal, "already in poll" NL);
++      local_irq_restore(flags);
++}
++#endif
+ static inline void mal_schedule_poll(struct mal_instance *mal)
+ {
+       if (likely(napi_schedule_prep(&mal->napi))) {
+               MAL_DBG2(mal, "schedule_poll" NL);
++#ifndef CONFIG_IBM_NEW_EMAC_MAL_RTDM
+               mal_disable_eob_irq(mal);
++#endif
+               __napi_schedule(&mal->napi);
+       } else
+               MAL_DBG2(mal, "already in poll" NL);
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++#ifdef OBSOLETE
++static nanosecs_abs_t tstart;
++#endif
++
++static int mal_txeob(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_txeob(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+
++      struct list_head *l;
+       u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
+
+-      MAL_DBG2(mal, "txeob %08x" NL, r);
++      MAL_DBG2(mal, "rt txeob %08x" NL, r);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (r & mal->tx_chan_mask_rtdm) {
++              /* Process TX skbs */
++              list_for_each(l, &mal->poll_list_rtdm) {
++                      struct mal_commac *mc =
++                              list_entry(l, struct mal_commac, poll_list);
++#ifdef OBSOLETE
++                      tstart = rtdm_clock_read();
++#endif
++                      mc->ops->poll_tx(mc->dev);
++              }
++      }
++      if (r & mal->tx_chan_mask)
++              rtdm_nrtsig_pend(&mal->schedule_poll_nrt);
++#else
+       mal_schedule_poll(mal);
++#endif
++
+       set_mal_dcrn(mal, MAL_TXEOBISR, r);
+
+ #ifdef CONFIG_PPC_DCR_NATIVE
+@@ -287,18 +408,49 @@ static irqreturn_t mal_txeob(int irq, void *dev_instance)
+                               (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | 
ICINTSTAT_ICTX));
+ #endif
+
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_rxeob(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_rxeob(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
++      struct list_head *l;
++      u32 r;
+
+-      u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      mal->time_stamp = rtdm_clock_read();
++#endif
++      r = get_mal_dcrn(mal, MAL_RXEOBISR);
+
+       MAL_DBG2(mal, "rxeob %08x" NL, r);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (r & mal->rx_chan_mask_rtdm) {
++              list_for_each(l, &mal->poll_list_rtdm) {
++                      struct mal_commac *mc =
++                              list_entry(l, struct mal_commac, poll_list);
++                      if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED,
++                                            &mc->flags))) {
++                              MAL_DBG(mal, "mc->flags=%#lx\n", mc->flags);
++                              continue;
++                      }
++                      mc->ops->poll_rx(mc->dev, 1024);
++              }
++      }
++      if (r & mal->rx_chan_mask)
++              rtdm_nrtsig_pend(&mal->schedule_poll_nrt);
++#else
+       mal_schedule_poll(mal);
++#endif
+       set_mal_dcrn(mal, MAL_RXEOBISR, r);
+
+ #ifdef CONFIG_PPC_DCR_NATIVE
+@@ -307,76 +459,149 @@ static irqreturn_t mal_rxeob(int irq, void 
*dev_instance)
+                               (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | 
ICINTSTAT_ICRX));
+ #endif
+
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_txde(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_txde(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+
+       u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
+       set_mal_dcrn(mal, MAL_TXDEIR, deir);
+
+       MAL_DBG(mal, "txde %08x" NL, deir);
+
+-      if (net_ratelimit())
++      if (mal_net_ratelimit())
+               printk(KERN_ERR
+                      "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
+                      mal->index, deir);
+
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_rxde(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_rxde(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++      int nrtsig_pend = 0;
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+       struct list_head *l;
+
+       u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      mal->time_stamp = rtdm_clock_read();
++#endif
+       MAL_DBG(mal, "rxde %08x" NL, deir);
+
+       list_for_each(l, &mal->list) {
+               struct mal_commac *mc = list_entry(l, struct mal_commac, list);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++              if (deir & mc->rx_chan_mask) {
++                      set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
++                      mc->ops->rxde(mc->dev);
++                      if (mc->rtdm) {
++                              mc->ops->poll_tx(mc->dev);
++                              mc->ops->poll_rx(mc->dev, 1024);
++                      } else {
++                              nrtsig_pend++;
++                      }
++              }
++#else
+               if (deir & mc->rx_chan_mask) {
+                       set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
+                       mc->ops->rxde(mc->dev);
+               }
++#endif
+       }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (nrtsig_pend)
++              rtdm_nrtsig_pend(&mal->schedule_poll_nrt);
++#else
+       mal_schedule_poll(mal);
++#endif
+       set_mal_dcrn(mal, MAL_RXDEIR, deir);
+
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_int(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_int(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+       u32 esr = get_mal_dcrn(mal, MAL_ESR);
+
++      MAL_DBG(mal, "int %08x" NL, esr);
++
+       if (esr & MAL_ESR_EVB) {
+               /* descriptor error */
+               if (esr & MAL_ESR_DE) {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++                      if (esr & MAL_ESR_CIDT)
++                              return mal_rxde(irq_handle);
++                      else
++                              return mal_txde(irq_handle);
++#else
+                       if (esr & MAL_ESR_CIDT)
+                               return mal_rxde(irq, dev_instance);
+                       else
+                               return mal_txde(irq, dev_instance);
++#endif
+               } else { /* SERR */
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++                      return mal_serr(irq_handle);
++#else
+                       return mal_serr(irq, dev_instance);
++#endif
+               }
+       }
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
+ void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (commac->rtdm) {
++              set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
++      } else {
++              while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED,
++                                      &commac->flags))
++                      msleep(1);
++              napi_synchronize(&mal->napi);
++      }
++#else
+       /* Spinlock-type semantics: only one caller disable poll at a time */
+       while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
+               msleep(1);
+
+       /* Synchronize with the MAL NAPI poller */
+       napi_synchronize(&mal->napi);
++#endif
+ }
+
+ void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
+@@ -389,7 +614,12 @@ void mal_poll_enable(struct mal_instance *mal, struct 
mal_commac *commac)
+        * probably be delayed until the next interrupt but that's mostly a
+        * non-issue in the context where this is called.
+        */
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (!commac->rtdm)
++              napi_schedule(&mal->napi);
++#else
+       napi_schedule(&mal->napi);
++#endif
+ }
+
+ static int mal_poll(struct napi_struct *napi, int budget)
+@@ -429,10 +659,15 @@ static int mal_poll(struct napi_struct *napi, int budget)
+       }
+
+       /* We need to disable IRQs to protect from RXDE IRQ here */
+-      spin_lock_irqsave(&mal->lock, flags);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      local_irq_save(flags);
+       __napi_complete(napi);
++      local_irq_restore(flags);
++#else
++      spin_lock_irqsave(&mal->lock, flags);
+       mal_enable_eob_irq(mal);
+       spin_unlock_irqrestore(&mal->lock, flags);
++#endif
+
+       /* Check for "rotting" packet(s) */
+       list_for_each(l, &mal->poll_list) {
+@@ -443,10 +678,15 @@ static int mal_poll(struct napi_struct *napi, int budget)
+               if (unlikely(mc->ops->peek_rx(mc->dev) ||
+                            test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
+                       MAL_DBG2(mal, "rotting packet" NL);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++                      if (!napi_reschedule(napi))
++                              MAL_DBG2(mal, "already in poll list" NL);
++#else
+                       if (napi_reschedule(napi))
+                               mal_disable_eob_irq(mal);
+                       else
+                               MAL_DBG2(mal, "already in poll list" NL);
++#endif
+
+                       if (budget > 0)
+                               goto again;
+@@ -527,7 +767,11 @@ static int __devinit mal_probe(struct platform_device 
*ofdev,
+       const u32 *prop;
+       u32 cfg;
+       unsigned long irqflags;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      rtdm_irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
++#else
+       irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
++#endif
+
+       mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
+       if (!mal) {
+@@ -612,7 +856,18 @@ static int __devinit mal_probe(struct platform_device 
*ofdev,
+
+       INIT_LIST_HEAD(&mal->poll_list);
+       INIT_LIST_HEAD(&mal->list);
+-      spin_lock_init(&mal->lock);
++      mal_spin_lock_init(&mal->lock);
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      INIT_LIST_HEAD(&mal->poll_list_rtdm);
++
++      if (rtdm_nrtsig_init(&mal->schedule_poll_nrt, mal_schedule_poll_nrt,
++                           (void*)mal)) {
++              printk(KERN_ERR
++                     "mal%d: couldn't init mal schedule handler !\n", index);
++              goto fail_unmap;
++      }
++#endif
+
+       init_dummy_netdev(&mal->dummy_dev);
+
+@@ -674,19 +929,44 @@ static int __devinit mal_probe(struct platform_device 
*ofdev,
+               hdlr_rxde = mal_rxde;
+       }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->serr_irq_handle, mal->serr_irq,
++                             mal_serr, 0, "MAL SERR", mal);
++#else
+       err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
++#endif
+       if (err)
+               goto fail2;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->txde_irq_handle, mal->txde_irq,
++                             mal_txde, 0, "MAL TX DE", mal);
++#else
+       err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
++#endif
+       if (err)
+               goto fail3;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->txeob_irq_handle, mal->txeob_irq,
++                             mal_txeob, 0, "MAL TX EOB", mal);
++#else
+       err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
++#endif
+       if (err)
+               goto fail4;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->rxde_irq_handle, mal->rxde_irq,
++                             mal_rxde, 0, "MAL RX DE", mal);
++#else
+       err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
++#endif
+       if (err)
+               goto fail5;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->rxeob_irq_handle, mal->rxeob_irq,
++                             mal_rxeob, 0, "MAL RX EOB", mal);
++#else
+       err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
++#endif
+       if (err)
+               goto fail6;
+
+@@ -715,7 +995,11 @@ static int __devinit mal_probe(struct platform_device 
*ofdev,
+  fail6:
+       free_irq(mal->rxde_irq, mal);
+  fail5:
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      rtdm_irq_free(&mal->txeob_irq_handle);
++#else
+       free_irq(mal->txeob_irq, mal);
++#endif
+  fail4:
+       free_irq(mal->txde_irq, mal);
+  fail3:
+@@ -808,3 +1092,19 @@ void mal_exit(void)
+ {
+       of_unregister_platform_driver(&mal_of_driver);
+ }
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXPORT_SYMBOL_GPL(mal_register_commac);
++EXPORT_SYMBOL_GPL(mal_unregister_commac);
++EXPORT_SYMBOL_GPL(mal_set_rcbs);
++EXPORT_SYMBOL_GPL(mal_tx_bd_offset);
++EXPORT_SYMBOL_GPL(mal_rx_bd_offset);
++EXPORT_SYMBOL_GPL(mal_enable_tx_channel);
++EXPORT_SYMBOL_GPL(mal_disable_tx_channel);
++EXPORT_SYMBOL_GPL(mal_enable_rx_channel);
++EXPORT_SYMBOL_GPL(mal_disable_rx_channel);
++EXPORT_SYMBOL_GPL(mal_poll_add);
++EXPORT_SYMBOL_GPL(mal_poll_del);
++EXPORT_SYMBOL_GPL(mal_poll_enable);
++EXPORT_SYMBOL_GPL(mal_poll_disable);
++#endif
+diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
+index 6608421..0573c76 100644
+--- a/drivers/net/ibm_newemac/mal.h
++++ b/drivers/net/ibm_newemac/mal.h
+@@ -24,6 +24,10 @@
+ #ifndef __IBM_NEWEMAC_MAL_H
+ #define __IBM_NEWEMAC_MAL_H
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++#include <rtdm/rtdm_driver.h>
++#endif
++
+ /*
+  * There are some variations on the MAL, we express them in this driver as
+  * MAL Version 1 and 2 though that doesn't match any IBM terminology.
+@@ -186,6 +190,9 @@ struct mal_commac {
+       u32                     tx_chan_mask;
+       u32                     rx_chan_mask;
+       struct list_head        list;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      int                     rtdm;
++#endif
+ };
+
+ struct mal_instance {
+@@ -199,20 +206,40 @@ struct mal_instance {
+       int                     txde_irq;       /* TX Descriptor Error IRQ */
+       int                     rxde_irq;       /* RX Descriptor Error IRQ */
+       int                     serr_irq;       /* MAL System Error IRQ    */
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      rtdm_irq_t              txeob_irq_handle;
++      rtdm_irq_t              rxeob_irq_handle;
++      rtdm_irq_t              txde_irq_handle;
++      rtdm_irq_t              rxde_irq_handle;
++      rtdm_irq_t              serr_irq_handle;
++      rtdm_nrtsig_t           schedule_poll_nrt;
++      nanosecs_abs_t          time_stamp;
++#endif
+
+       struct list_head        poll_list;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct list_head        poll_list_rtdm;
++#endif
+       struct napi_struct      napi;
+
+       struct list_head        list;
+       u32                     tx_chan_mask;
+       u32                     rx_chan_mask;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      u32                     tx_chan_mask_rtdm;
++      u32                     rx_chan_mask_rtdm;
++#endif
+
+       dma_addr_t              bd_dma;
+       struct mal_descriptor   *bd_virt;
+
+       struct platform_device  *ofdev;
+       int                     index;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      rtdm_lock_t             lock;
++#else
+       spinlock_t              lock;
++#endif
+
+       struct net_device       dummy_dev;
+
+diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
+index ac9d964..87a0a80 100644
+--- a/drivers/net/ibm_newemac/phy.c
++++ b/drivers/net/ibm_newemac/phy.c
+@@ -535,4 +535,11 @@ int emac_mii_phy_probe(struct mii_phy *phy, int address)
+       return 0;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXPORT_SYMBOL_GPL(emac_mii_phy_probe);
++EXPORT_SYMBOL_GPL(emac_mii_reset_gpcs);
++EXPORT_SYMBOL_GPL(emac_mii_reset_phy);
++#endif
++
++
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
+index dd61798..9e0a673 100644
+--- a/drivers/net/ibm_newemac/rgmii.c
++++ b/drivers/net/ibm_newemac/rgmii.c
+@@ -337,3 +337,11 @@ void rgmii_exit(void)
+ {
+       of_unregister_platform_driver(&rgmii_driver);
+ }
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXPORT_SYMBOL_GPL(rgmii_attach);
++EXPORT_SYMBOL_GPL(rgmii_detach);
++EXPORT_SYMBOL_GPL(rgmii_set_speed);
++EXPORT_SYMBOL_GPL(rgmii_get_mdio);
++EXPORT_SYMBOL_GPL(rgmii_put_mdio);
++#endif
+diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
+index 34ed6ee..130e62b 100644
+--- a/drivers/net/ibm_newemac/zmii.c
++++ b/drivers/net/ibm_newemac/zmii.c
+@@ -331,3 +331,12 @@ void zmii_exit(void)
+ {
+       of_unregister_platform_driver(&zmii_driver);
+ }
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXPORT_SYMBOL_GPL(zmii_attach);
++EXPORT_SYMBOL_GPL(zmii_detach);
++EXPORT_SYMBOL_GPL(zmii_get_mdio);
++EXPORT_SYMBOL_GPL(zmii_put_mdio);
++EXPORT_SYMBOL_GPL(zmii_set_speed);
++#endif
++
+--
+1.7.4.1
diff --git a/drivers/ibm_newemac/linux-3.0.4-rtdm-ibm-emac.patch 
b/drivers/ibm_newemac/linux-3.0.4-rtdm-ibm-emac.patch
new file mode 100644
index 0000000..000c4f7
--- /dev/null
+++ b/drivers/ibm_newemac/linux-3.0.4-rtdm-ibm-emac.patch
@@ -0,0 +1,875 @@
+From d230f5decc7b4fb8edf783de1738312e911ab1c3 Mon Sep 17 00:00:00 2001
+From: Wolfgang Grandegger <[email protected]>
+Date: Thu, 17 Nov 2011 13:13:29 +0100
+Subject: [PATCH] net/ibm_newemac: provide real-time capable RTDM MAL driver
+
+Signed-off-by: Wolfgang Grandegger <[email protected]>
+---
+ drivers/net/ibm_newemac/Kconfig  |    5 +
+ drivers/net/ibm_newemac/Makefile |    4 +
+ drivers/net/ibm_newemac/core.c   |    5 +
+ drivers/net/ibm_newemac/mal.c    |  358 ++++++++++++++++++++++++++++++++++---
+ drivers/net/ibm_newemac/mal.h    |   27 +++
+ drivers/net/ibm_newemac/phy.c    |    7 +
+ drivers/net/ibm_newemac/rgmii.c  |    8 +
+ drivers/net/ibm_newemac/zmii.c   |    9 +
+ 8 files changed, 394 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig
+index 78a1628..b5696e1 100644
+--- a/drivers/net/ibm_newemac/Kconfig
++++ b/drivers/net/ibm_newemac/Kconfig
+@@ -39,6 +39,11 @@ config IBM_NEW_EMAC_RX_SKB_HEADROOM
+
+         If unsure, set to 0.
+
++config IBM_NEW_EMAC_MAL_RTDM
++      bool "Real-time MAL"
++      depends on IBM_NEW_EMAC && XENO_SKIN_RTDM
++      default n
++
+ config IBM_NEW_EMAC_DEBUG
+       bool "Debugging"
+       depends on IBM_NEW_EMAC
+diff --git a/drivers/net/ibm_newemac/Makefile 
b/drivers/net/ibm_newemac/Makefile
+index 0b5c995..ab82084 100644
+--- a/drivers/net/ibm_newemac/Makefile
++++ b/drivers/net/ibm_newemac/Makefile
+@@ -2,6 +2,10 @@
+ # Makefile for the PowerPC 4xx on-chip ethernet driver
+ #
+
++ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXTRA_CFLAGS += -D__IN_XENOMAI__ -Iinclude/xenomai
++endif
++
+ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac.o
+
+ ibm_newemac-y := mal.o core.o phy.o
+diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
+index 079450f..bd45db6 100644
+--- a/drivers/net/ibm_newemac/core.c
++++ b/drivers/net/ibm_newemac/core.c
+@@ -2949,6 +2949,9 @@ static int __devexit emac_remove(struct platform_device 
*ofdev)
+       if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+               zmii_detach(dev->zmii_dev, dev->zmii_port);
+
++      busy_phy_map &= ~(1 << dev->phy.address);
++      DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
++
+       mal_unregister_commac(dev->mal, &dev->commac);
+       emac_put_deps(dev);
+
+@@ -3097,3 +3100,5 @@ static void __exit emac_exit(void)
+
+ module_init(emac_init);
+ module_exit(emac_exit);
++
++EXPORT_SYMBOL_GPL(busy_phy_map);
+diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
+index d268f40..9a8f76b 100644
+--- a/drivers/net/ibm_newemac/mal.c
++++ b/drivers/net/ibm_newemac/mal.c
+@@ -18,6 +18,10 @@
+  *      Armin Kuster <[email protected]>
+  *      Copyright 2002 MontaVista Softare Inc.
+  *
++ * Real-time extension required for the RTnet IBM EMAC driver
++ *
++ * Copyright 2011 Wolfgang Grandegger <[email protected]>
++ *
+  * This program is free software; you can redistribute  it and/or modify it
+  * under  the terms of  the GNU General  Public License as published by the
+  * Free Software Foundation;  either version 2 of the  License, or (at your
+@@ -31,6 +35,27 @@
+ #include "core.h"
+ #include <asm/dcr-regs.h>
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++#define MAL_IRQ_HANDLED RTDM_IRQ_HANDLED
++#define mal_spin_lock_irqsave(lock, flags) \
++      do { rtdm_lock_get_irqsave(lock, flags); } while (0)
++#define mal_spin_unlock_irqrestore(lock, flags) \
++      do { rtdm_lock_put_irqrestore(lock, flags); } while (0)
++#define mal_spin_lock_init(lock) \
++      do { rtdm_lock_init(lock); } while (0)
++static DEFINE_RTDM_RATELIMIT_STATE(mal_net_ratelimit_state, 5000000000LL, 10);
++#define mal_net_ratelimit() rtdm_ratelimit(&mal_net_ratelimit_state, __func__)
++#else
++#define MAL_IRQ_HANDLED IRQ_HANDLED
++#define mal_spin_lock_irqsave(lock, flags) \
++      do { spin_lock_irqsave(lock, flags);; } while (0)
++#define mal_spin_unlock_irqrestore(lock, flags) \
++      do { spin_unlock_irqrestore(lock, flags); } while (0)
++#define mal_spin_lock_init(lock) \
++      do { spin_lock_init(lock); } while (0)
++#define mal_net_ratelimit() net_ratelimit()
++#endif
++
+ static int mal_count;
+
+ int __devinit mal_register_commac(struct mal_instance *mal,
+@@ -38,27 +63,49 @@ int __devinit mal_register_commac(struct mal_instance      
*mal,
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
+-
++      mal_spin_lock_irqsave(&mal->lock, flags);
+       MAL_DBG(mal, "reg(%08x, %08x)" NL,
+               commac->tx_chan_mask, commac->rx_chan_mask);
+
+       /* Don't let multiple commacs claim the same channel(s) */
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (((mal->tx_chan_mask | mal->tx_chan_mask_rtdm) &
++           commac->tx_chan_mask) ||
++          ((mal->rx_chan_mask | mal->rx_chan_mask_rtdm) &
++           commac->rx_chan_mask)) {
++              mal_spin_unlock_irqrestore(&mal->lock, flags);
++              printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
++                     mal->index);
++              return -EBUSY;
++      }
++#else
+       if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
+           (mal->rx_chan_mask & commac->rx_chan_mask)) {
+-              spin_unlock_irqrestore(&mal->lock, flags);
++              mal_spin_unlock_irqrestore(&mal->lock, flags);
+               printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
+                      mal->index);
+               return -EBUSY;
+       }
++#endif
+
+       if (list_empty(&mal->list))
+               napi_enable(&mal->napi);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (commac->rtdm) {
++              mal->tx_chan_mask_rtdm |= commac->tx_chan_mask;
++              mal->rx_chan_mask_rtdm |= commac->rx_chan_mask;
++      } else {
++              mal->tx_chan_mask |= commac->tx_chan_mask;
++              mal->rx_chan_mask |= commac->rx_chan_mask;
++      }
++#else
+       mal->tx_chan_mask |= commac->tx_chan_mask;
+       mal->rx_chan_mask |= commac->rx_chan_mask;
++#endif
+       list_add(&commac->list, &mal->list);
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+
+       return 0;
+ }
+@@ -68,18 +115,28 @@ void mal_unregister_commac(struct mal_instance    *mal,
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "unreg(%08x, %08x)" NL,
+               commac->tx_chan_mask, commac->rx_chan_mask);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (commac->rtdm) {
++              mal->tx_chan_mask_rtdm &= ~commac->tx_chan_mask;
++              mal->rx_chan_mask_rtdm &= ~commac->rx_chan_mask;
++      } else {
++              mal->tx_chan_mask &= ~commac->tx_chan_mask;
++              mal->rx_chan_mask &= ~commac->rx_chan_mask;
++      }
++#else
+       mal->tx_chan_mask &= ~commac->tx_chan_mask;
+       mal->rx_chan_mask &= ~commac->rx_chan_mask;
++#endif
+       list_del_init(&commac->list);
+       if (list_empty(&mal->list))
+               napi_disable(&mal->napi);
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
+@@ -117,14 +174,14 @@ void mal_enable_tx_channel(struct mal_instance *mal, int 
channel)
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "enable_tx(%d)" NL, channel);
+
+       set_mal_dcrn(mal, MAL_TXCASR,
+                    get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ void mal_disable_tx_channel(struct mal_instance *mal, int channel)
+@@ -146,14 +203,14 @@ void mal_enable_rx_channel(struct mal_instance *mal, int 
channel)
+       if (!(channel % 8))
+               channel >>= 3;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "enable_rx(%d)" NL, channel);
+
+       set_mal_dcrn(mal, MAL_RXCASR,
+                    get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ void mal_disable_rx_channel(struct mal_instance *mal, int channel)
+@@ -175,29 +232,36 @@ void mal_poll_add(struct mal_instance *mal, struct 
mal_commac *commac)
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "poll_add(%p)" NL, commac);
+
+       /* starts disabled */
+       set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (commac->rtdm)
++              list_add_tail(&commac->poll_list, &mal->poll_list_rtdm);
++      else
++              list_add_tail(&commac->poll_list, &mal->poll_list);
++#else
+       list_add_tail(&commac->poll_list, &mal->poll_list);
++#endif
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
+ {
+       unsigned long flags;
+
+-      spin_lock_irqsave(&mal->lock, flags);
++      mal_spin_lock_irqsave(&mal->lock, flags);
+
+       MAL_DBG(mal, "poll_del(%p)" NL, commac);
+
+       list_del(&commac->poll_list);
+
+-      spin_unlock_irqrestore(&mal->lock, flags);
++      mal_spin_unlock_irqrestore(&mal->lock, flags);
+ }
+
+ /* synchronized by mal_poll() */
+@@ -218,9 +282,18 @@ static inline void mal_disable_eob_irq(struct 
mal_instance *mal)
+       MAL_DBG2(mal, "disable_irq" NL);
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_serr(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_serr(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+
+       u32 esr = get_mal_dcrn(mal, MAL_ESR);
+
+@@ -234,51 +307,99 @@ static irqreturn_t mal_serr(int irq, void *dev_instance)
+                       /* We ignore Descriptor error,
+                        * TXDE or RXDE interrupt will be generated anyway.
+                        */
+-                      return IRQ_HANDLED;
++                      return MAL_IRQ_HANDLED;
+               }
+
+               if (esr & MAL_ESR_PEIN) {
+                       /* PLB error, it's probably buggy hardware or
+                        * incorrect physical address in BD (i.e. bug)
+                        */
+-                      if (net_ratelimit())
++                      if (mal_net_ratelimit())
+                               printk(KERN_ERR
+                                      "mal%d: system error, "
+                                      "PLB (ESR = 0x%08x)\n",
+                                      mal->index, esr);
+-                      return IRQ_HANDLED;
++                      return MAL_IRQ_HANDLED;
+               }
+
+               /* OPB error, it's probably buggy hardware or incorrect
+                * EBC setup
+                */
+-              if (net_ratelimit())
++              if (mal_net_ratelimit())
+                       printk(KERN_ERR
+                              "mal%d: system error, OPB (ESR = 0x%08x)\n",
+                              mal->index, esr);
+       }
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++void mal_schedule_poll_nrt(rtdm_nrtsig_t nrt_sig, void* data)
++{
++      struct mal_instance *mal = (struct mal_instance *)data;
++      unsigned long flags;
++
++      local_irq_save(flags);
++      if (likely(napi_schedule_prep(&mal->napi))) {
++              MAL_DBG2(mal, "schedule_poll" NL);
++              __napi_schedule(&mal->napi);
++      } else
++              MAL_DBG2(mal, "already in poll" NL);
++      local_irq_restore(flags);
++}
++#endif
+ static inline void mal_schedule_poll(struct mal_instance *mal)
+ {
+       if (likely(napi_schedule_prep(&mal->napi))) {
+               MAL_DBG2(mal, "schedule_poll" NL);
++#ifndef CONFIG_IBM_NEW_EMAC_MAL_RTDM
+               mal_disable_eob_irq(mal);
++#endif
+               __napi_schedule(&mal->napi);
+       } else
+               MAL_DBG2(mal, "already in poll" NL);
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++#ifdef OBSOLETE
++static nanosecs_abs_t tstart;
++#endif
++
++static int mal_txeob(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_txeob(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+
++      struct list_head *l;
+       u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
+
+-      MAL_DBG2(mal, "txeob %08x" NL, r);
++      MAL_DBG2(mal, "rt txeob %08x" NL, r);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (r & mal->tx_chan_mask_rtdm) {
++              /* Process TX skbs */
++              list_for_each(l, &mal->poll_list_rtdm) {
++                      struct mal_commac *mc =
++                              list_entry(l, struct mal_commac, poll_list);
++#ifdef OBSOLETE
++                      tstart = rtdm_clock_read();
++#endif
++                      mc->ops->poll_tx(mc->dev);
++              }
++      }
++      if (r & mal->tx_chan_mask)
++              rtdm_nrtsig_pend(&mal->schedule_poll_nrt);
++#else
+       mal_schedule_poll(mal);
++#endif
++
+       set_mal_dcrn(mal, MAL_TXEOBISR, r);
+
+ #ifdef CONFIG_PPC_DCR_NATIVE
+@@ -287,18 +408,49 @@ static irqreturn_t mal_txeob(int irq, void *dev_instance)
+                               (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | 
ICINTSTAT_ICTX));
+ #endif
+
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_rxeob(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_rxeob(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
++      struct list_head *l;
++      u32 r;
+
+-      u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      mal->time_stamp = rtdm_clock_read();
++#endif
++      r = get_mal_dcrn(mal, MAL_RXEOBISR);
+
+       MAL_DBG2(mal, "rxeob %08x" NL, r);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (r & mal->rx_chan_mask_rtdm) {
++              list_for_each(l, &mal->poll_list_rtdm) {
++                      struct mal_commac *mc =
++                              list_entry(l, struct mal_commac, poll_list);
++                      if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED,
++                                            &mc->flags))) {
++                              MAL_DBG(mal, "mc->flags=%#lx\n", mc->flags);
++                              continue;
++                      }
++                      mc->ops->poll_rx(mc->dev, 1024);
++              }
++      }
++      if (r & mal->rx_chan_mask)
++              rtdm_nrtsig_pend(&mal->schedule_poll_nrt);
++#else
+       mal_schedule_poll(mal);
++#endif
+       set_mal_dcrn(mal, MAL_RXEOBISR, r);
+
+ #ifdef CONFIG_PPC_DCR_NATIVE
+@@ -307,76 +459,149 @@ static irqreturn_t mal_rxeob(int irq, void 
*dev_instance)
+                               (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | 
ICINTSTAT_ICRX));
+ #endif
+
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_txde(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_txde(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+
+       u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
+       set_mal_dcrn(mal, MAL_TXDEIR, deir);
+
+       MAL_DBG(mal, "txde %08x" NL, deir);
+
+-      if (net_ratelimit())
++      if (mal_net_ratelimit())
+               printk(KERN_ERR
+                      "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
+                      mal->index, deir);
+
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_rxde(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_rxde(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++      int nrtsig_pend = 0;
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+       struct list_head *l;
+
+       u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      mal->time_stamp = rtdm_clock_read();
++#endif
+       MAL_DBG(mal, "rxde %08x" NL, deir);
+
+       list_for_each(l, &mal->list) {
+               struct mal_commac *mc = list_entry(l, struct mal_commac, list);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++              if (deir & mc->rx_chan_mask) {
++                      set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
++                      mc->ops->rxde(mc->dev);
++                      if (mc->rtdm) {
++                              mc->ops->poll_tx(mc->dev);
++                              mc->ops->poll_rx(mc->dev, 1024);
++                      } else {
++                              nrtsig_pend++;
++                      }
++              }
++#else
+               if (deir & mc->rx_chan_mask) {
+                       set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
+                       mc->ops->rxde(mc->dev);
+               }
++#endif
+       }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (nrtsig_pend)
++              rtdm_nrtsig_pend(&mal->schedule_poll_nrt);
++#else
+       mal_schedule_poll(mal);
++#endif
+       set_mal_dcrn(mal, MAL_RXDEIR, deir);
+
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++static int mal_int(rtdm_irq_t *irq_handle)
++#else
+ static irqreturn_t mal_int(int irq, void *dev_instance)
++#endif
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct mal_instance *mal = rtdm_irq_get_arg(irq_handle,
++                                                  struct mal_instance);
++#else
+       struct mal_instance *mal = dev_instance;
++#endif
+       u32 esr = get_mal_dcrn(mal, MAL_ESR);
+
++      MAL_DBG(mal, "int %08x" NL, esr);
++
+       if (esr & MAL_ESR_EVB) {
+               /* descriptor error */
+               if (esr & MAL_ESR_DE) {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++                      if (esr & MAL_ESR_CIDT)
++                              return mal_rxde(irq_handle);
++                      else
++                              return mal_txde(irq_handle);
++#else
+                       if (esr & MAL_ESR_CIDT)
+                               return mal_rxde(irq, dev_instance);
+                       else
+                               return mal_txde(irq, dev_instance);
++#endif
+               } else { /* SERR */
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++                      return mal_serr(irq_handle);
++#else
+                       return mal_serr(irq, dev_instance);
++#endif
+               }
+       }
+-      return IRQ_HANDLED;
++      return MAL_IRQ_HANDLED;
+ }
+
+ void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
+ {
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (commac->rtdm) {
++              set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
++      } else {
++              while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED,
++                                      &commac->flags))
++                      msleep(1);
++              napi_synchronize(&mal->napi);
++      }
++#else
+       /* Spinlock-type semantics: only one caller disable poll at a time */
+       while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
+               msleep(1);
+
+       /* Synchronize with the MAL NAPI poller */
+       napi_synchronize(&mal->napi);
++#endif
+ }
+
+ void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
+@@ -389,7 +614,12 @@ void mal_poll_enable(struct mal_instance *mal, struct 
mal_commac *commac)
+        * probably be delayed until the next interrupt but that's mostly a
+        * non-issue in the context where this is called.
+        */
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      if (!commac->rtdm)
++              napi_schedule(&mal->napi);
++#else
+       napi_schedule(&mal->napi);
++#endif
+ }
+
+ static int mal_poll(struct napi_struct *napi, int budget)
+@@ -429,10 +659,15 @@ static int mal_poll(struct napi_struct *napi, int budget)
+       }
+
+       /* We need to disable IRQs to protect from RXDE IRQ here */
+-      spin_lock_irqsave(&mal->lock, flags);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      local_irq_save(flags);
+       __napi_complete(napi);
++      local_irq_restore(flags);
++#else
++      spin_lock_irqsave(&mal->lock, flags);
+       mal_enable_eob_irq(mal);
+       spin_unlock_irqrestore(&mal->lock, flags);
++#endif
+
+       /* Check for "rotting" packet(s) */
+       list_for_each(l, &mal->poll_list) {
+@@ -443,10 +678,15 @@ static int mal_poll(struct napi_struct *napi, int budget)
+               if (unlikely(mc->ops->peek_rx(mc->dev) ||
+                            test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
+                       MAL_DBG2(mal, "rotting packet" NL);
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++                      if (!napi_reschedule(napi))
++                              MAL_DBG2(mal, "already in poll list" NL);
++#else
+                       if (napi_reschedule(napi))
+                               mal_disable_eob_irq(mal);
+                       else
+                               MAL_DBG2(mal, "already in poll list" NL);
++#endif
+
+                       if (budget > 0)
+                               goto again;
+@@ -526,7 +766,11 @@ static int __devinit mal_probe(struct platform_device 
*ofdev)
+       const u32 *prop;
+       u32 cfg;
+       unsigned long irqflags;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      rtdm_irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
++#else
+       irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
++#endif
+
+       mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
+       if (!mal) {
+@@ -611,7 +855,18 @@ static int __devinit mal_probe(struct platform_device 
*ofdev)
+
+       INIT_LIST_HEAD(&mal->poll_list);
+       INIT_LIST_HEAD(&mal->list);
+-      spin_lock_init(&mal->lock);
++      mal_spin_lock_init(&mal->lock);
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      INIT_LIST_HEAD(&mal->poll_list_rtdm);
++
++      if (rtdm_nrtsig_init(&mal->schedule_poll_nrt, mal_schedule_poll_nrt,
++                           (void*)mal)) {
++              printk(KERN_ERR
++                     "mal%d: couldn't init mal schedule handler !\n", index);
++              goto fail_unmap;
++      }
++#endif
+
+       init_dummy_netdev(&mal->dummy_dev);
+
+@@ -673,19 +928,44 @@ static int __devinit mal_probe(struct platform_device 
*ofdev)
+               hdlr_rxde = mal_rxde;
+       }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->serr_irq_handle, mal->serr_irq,
++                             mal_serr, 0, "MAL SERR", mal);
++#else
+       err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
++#endif
+       if (err)
+               goto fail2;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->txde_irq_handle, mal->txde_irq,
++                             mal_txde, 0, "MAL TX DE", mal);
++#else
+       err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
++#endif
+       if (err)
+               goto fail3;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->txeob_irq_handle, mal->txeob_irq,
++                             mal_txeob, 0, "MAL TX EOB", mal);
++#else
+       err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
++#endif
+       if (err)
+               goto fail4;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->rxde_irq_handle, mal->rxde_irq,
++                             mal_rxde, 0, "MAL RX DE", mal);
++#else
+       err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
++#endif
+       if (err)
+               goto fail5;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      err = rtdm_irq_request(&mal->rxeob_irq_handle, mal->rxeob_irq,
++                             mal_rxeob, 0, "MAL RX EOB", mal);
++#else
+       err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
++#endif
+       if (err)
+               goto fail6;
+
+@@ -714,7 +994,11 @@ static int __devinit mal_probe(struct platform_device 
*ofdev)
+  fail6:
+       free_irq(mal->rxde_irq, mal);
+  fail5:
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      rtdm_irq_free(&mal->txeob_irq_handle);
++#else
+       free_irq(mal->txeob_irq, mal);
++#endif
+  fail4:
+       free_irq(mal->txde_irq, mal);
+  fail3:
+@@ -807,3 +1091,19 @@ void mal_exit(void)
+ {
+       platform_driver_unregister(&mal_of_driver);
+ }
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXPORT_SYMBOL_GPL(mal_register_commac);
++EXPORT_SYMBOL_GPL(mal_unregister_commac);
++EXPORT_SYMBOL_GPL(mal_set_rcbs);
++EXPORT_SYMBOL_GPL(mal_tx_bd_offset);
++EXPORT_SYMBOL_GPL(mal_rx_bd_offset);
++EXPORT_SYMBOL_GPL(mal_enable_tx_channel);
++EXPORT_SYMBOL_GPL(mal_disable_tx_channel);
++EXPORT_SYMBOL_GPL(mal_enable_rx_channel);
++EXPORT_SYMBOL_GPL(mal_disable_rx_channel);
++EXPORT_SYMBOL_GPL(mal_poll_add);
++EXPORT_SYMBOL_GPL(mal_poll_del);
++EXPORT_SYMBOL_GPL(mal_poll_enable);
++EXPORT_SYMBOL_GPL(mal_poll_disable);
++#endif
+diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
+index 6608421..0573c76 100644
+--- a/drivers/net/ibm_newemac/mal.h
++++ b/drivers/net/ibm_newemac/mal.h
+@@ -24,6 +24,10 @@
+ #ifndef __IBM_NEWEMAC_MAL_H
+ #define __IBM_NEWEMAC_MAL_H
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++#include <rtdm/rtdm_driver.h>
++#endif
++
+ /*
+  * There are some variations on the MAL, we express them in this driver as
+  * MAL Version 1 and 2 though that doesn't match any IBM terminology.
+@@ -186,6 +190,9 @@ struct mal_commac {
+       u32                     tx_chan_mask;
+       u32                     rx_chan_mask;
+       struct list_head        list;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      int                     rtdm;
++#endif
+ };
+
+ struct mal_instance {
+@@ -199,20 +206,40 @@ struct mal_instance {
+       int                     txde_irq;       /* TX Descriptor Error IRQ */
+       int                     rxde_irq;       /* RX Descriptor Error IRQ */
+       int                     serr_irq;       /* MAL System Error IRQ    */
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      rtdm_irq_t              txeob_irq_handle;
++      rtdm_irq_t              rxeob_irq_handle;
++      rtdm_irq_t              txde_irq_handle;
++      rtdm_irq_t              rxde_irq_handle;
++      rtdm_irq_t              serr_irq_handle;
++      rtdm_nrtsig_t           schedule_poll_nrt;
++      nanosecs_abs_t          time_stamp;
++#endif
+
+       struct list_head        poll_list;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      struct list_head        poll_list_rtdm;
++#endif
+       struct napi_struct      napi;
+
+       struct list_head        list;
+       u32                     tx_chan_mask;
+       u32                     rx_chan_mask;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      u32                     tx_chan_mask_rtdm;
++      u32                     rx_chan_mask_rtdm;
++#endif
+
+       dma_addr_t              bd_dma;
+       struct mal_descriptor   *bd_virt;
+
+       struct platform_device  *ofdev;
+       int                     index;
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++      rtdm_lock_t             lock;
++#else
+       spinlock_t              lock;
++#endif
+
+       struct net_device       dummy_dev;
+
+diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
+index ac9d964..87a0a80 100644
+--- a/drivers/net/ibm_newemac/phy.c
++++ b/drivers/net/ibm_newemac/phy.c
+@@ -535,4 +535,11 @@ int emac_mii_phy_probe(struct mii_phy *phy, int address)
+       return 0;
+ }
+
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXPORT_SYMBOL_GPL(emac_mii_phy_probe);
++EXPORT_SYMBOL_GPL(emac_mii_reset_gpcs);
++EXPORT_SYMBOL_GPL(emac_mii_reset_phy);
++#endif
++
++
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
+index 4fa53f3..4097c6b 100644
+--- a/drivers/net/ibm_newemac/rgmii.c
++++ b/drivers/net/ibm_newemac/rgmii.c
+@@ -336,3 +336,11 @@ void rgmii_exit(void)
+ {
+       platform_driver_unregister(&rgmii_driver);
+ }
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXPORT_SYMBOL_GPL(rgmii_attach);
++EXPORT_SYMBOL_GPL(rgmii_detach);
++EXPORT_SYMBOL_GPL(rgmii_set_speed);
++EXPORT_SYMBOL_GPL(rgmii_get_mdio);
++EXPORT_SYMBOL_GPL(rgmii_put_mdio);
++#endif
+diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
+index 97449e7..4446b1e 100644
+--- a/drivers/net/ibm_newemac/zmii.c
++++ b/drivers/net/ibm_newemac/zmii.c
+@@ -330,3 +330,12 @@ void zmii_exit(void)
+ {
+       platform_driver_unregister(&zmii_driver);
+ }
++
++#ifdef CONFIG_IBM_NEW_EMAC_MAL_RTDM
++EXPORT_SYMBOL_GPL(zmii_attach);
++EXPORT_SYMBOL_GPL(zmii_detach);
++EXPORT_SYMBOL_GPL(zmii_get_mdio);
++EXPORT_SYMBOL_GPL(zmii_put_mdio);
++EXPORT_SYMBOL_GPL(zmii_set_speed);
++#endif
++
+--
+1.7.4.1
-- 
1.7.4.1


------------------------------------------------------------------------------
All the data continuously generated in your IT infrastructure 
contains a definitive record of customers, application performance, 
security threats, fraudulent activity, and more. Splunk takes this 
data and makes sense of it. IT sense. And common sense.
http://p.sf.net/sfu/splunk-novd2d
_______________________________________________
RTnet-developers mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/rtnet-developers

Reply via email to