Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-08 Thread Michael-Luke Jones

On 8 May 2007, at 01:46, Krzysztof Halasa wrote:


Adds a driver for built-in IXP4xx hardware Queue Manager.

Signed-off-by: Krzysztof Halasa <[EMAIL PROTECTED]>


[snip]

diff --git a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c b/arch/arm/mach- 
ixp4xx/ixp4xx_qmgr.c

new file mode 100644
index 000..b9e9bd6
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c


Already in mach-ixp4xx, so can just be called qmgr.c


@@ -0,0 +1,273 @@
+/*
+ * Intel IXP4xx Queue Manager driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <[EMAIL PROTECTED]>
+ *
+ * This program is free software; you can redistribute it and/or  
modify it

+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+
+#define DEBUG  0
+
+struct qmgr_regs __iomem *qmgr_regs;
+static struct resource *mem_res;
+static spinlock_t qmgr_lock;
+static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
+static void (*irq_handlers[HALF_QUEUES])(void *pdev);
+static void *irq_pdevs[HALF_QUEUES];
+
+void qmgr_set_irq(unsigned int queue, int src,
+ void (*handler)(void *pdev), void *pdev)
+{
+	u32 __iomem *reg = &qmgr_regs->irqsrc[queue / 8]; /* 8 queues /  
u32 */

+   int bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
+   unsigned long flags;
+
+   src &= 7;
+   spin_lock_irqsave(&qmgr_lock, flags);
+   __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg);
+   irq_handlers[queue] = handler;
+   irq_pdevs[queue] = pdev;
+   spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+
+static irqreturn_t qmgr_irq1(int irq, void *pdev)
+{
+   int i;
+   u32 val = __raw_readl(&qmgr_regs->irqstat[0]);
+   __raw_writel(val, &qmgr_regs->irqstat[0]); /* ACK */
+
+   for (i = 0; i < HALF_QUEUES; i++)
+   if (val & (1 << i))
+   irq_handlers[i](irq_pdevs[i]);
+
+   return val ? IRQ_HANDLED : 0;
+}
+
+
+void qmgr_enable_irq(unsigned int queue)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(&qmgr_lock, flags);
+   __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) | (1 << queue),
+&qmgr_regs->irqen[0]);
+   spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+void qmgr_disable_irq(unsigned int queue)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(&qmgr_lock, flags);
+   __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue),
+&qmgr_regs->irqen[0]);
+   spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+static inline void shift_mask(u32 *mask)
+{
+   mask[3] = mask[3] << 1 | mask[2] >> 31;
+   mask[2] = mask[2] << 1 | mask[1] >> 31;
+   mask[1] = mask[1] << 1 | mask[0] >> 31;
+   mask[0] <<= 1;
+}
+
+int qmgr_request_queue(unsigned int queue, unsigned int len /*  
dwords */,

+  unsigned int nearly_empty_watermark,
+  unsigned int nearly_full_watermark)
+{
+   u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
+   int err;
+
+   if (queue >= HALF_QUEUES)
+   return -ERANGE;
+
+   if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
+   return -EINVAL;
+
+   switch (len) {
+   case  16:
+   cfg = 0 << 24;
+   mask[0] = 0x1;
+   break;
+   case  32:
+   cfg = 1 << 24;
+   mask[0] = 0x3;
+   break;
+   case  64:
+   cfg = 2 << 24;
+   mask[0] = 0xF;
+   break;
+   case 128:
+   cfg = 3 << 24;
+   mask[0] = 0xFF;
+   break;
+   default:
+   return -EINVAL;
+   }
+
+   cfg |= nearly_empty_watermark << 26;
+   cfg |= nearly_full_watermark << 29;
+   len /= 16;  /* in 16-dwords: 1, 2, 4 or 8 */
+   mask[1] = mask[2] = mask[3] = 0;
+
+   if (!try_module_get(THIS_MODULE))
+   return -ENODEV;
+
+   spin_lock_irq(&qmgr_lock);
+   if (__raw_readl(&qmgr_regs->sram[queue])) {
+   err = -EBUSY;
+   goto err;
+   }
+
+   while (1) {
+   if (!(used_sram_bitmap[0] & mask[0]) &&
+   !(used_sram_bitmap[1] & mask[1]) &&
+   !(used_sram_bitmap[2] & mask[2]) &&
+   !(used_sram_bitmap[3] & mask[3]))
+   break; /* found free space */
+
+   addr++;
+   shift_mask(mask);
+   if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
+   printk(KERN_ERR "qmgr: no free SRAM space for"
+  " queue %i\n", queue);
+   err = -ENOMEM;
+   goto err;
+   }
+   }
+
+   used_sram_bitmap[0] |= mask[0];
+   used_sram_bitmap[1] |= mask[1];
+   used_sram_bitmap[2] |= mask[2];
+   used_sram_bitmap[3] |= mask[3];
+ 

Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-08 Thread Lennert Buytenhek
I'm not sure what the latest versions are, so I'm not sure which
patches to review and which patches are obsolete.


On Tue, May 08, 2007 at 02:46:28AM +0200, Krzysztof Halasa wrote:

> +struct qmgr_regs __iomem *qmgr_regs;
> +static struct resource *mem_res;
> +static spinlock_t qmgr_lock;
> +static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
> +static void (*irq_handlers[HALF_QUEUES])(void *pdev);
> +static void *irq_pdevs[HALF_QUEUES];
> +
> +void qmgr_set_irq(unsigned int queue, int src,
> +   void (*handler)(void *pdev), void *pdev)
> +{
> + u32 __iomem *reg = &qmgr_regs->irqsrc[queue / 8]; /* 8 queues / u32 */
> + int bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
> + unsigned long flags;
> +
> + src &= 7;
> + spin_lock_irqsave(&qmgr_lock, flags);
> + __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg);
> + irq_handlers[queue] = handler;
> + irq_pdevs[queue] = pdev;
> + spin_unlock_irqrestore(&qmgr_lock, flags);
> +}

The queue manager interrupts should probably be implemented as an
irqchip, in the same way that GPIO interrupts are implemented.  (I.e.
allocate 'real' interrupt numbers for them, and use the interrupt
cascade mechanism.)  You probably want to have separate irqchips for
the upper and lower halves, too.  This way, drivers can just use
request_irq() instead of having to bother with platform-specific
qmgr_set_irq() methods.  I think I also made this review comment
with Christian's driver.


> +int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
> +unsigned int nearly_empty_watermark,
> +unsigned int nearly_full_watermark)
> +{
> + u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
> + int err;
> +
> + if (queue >= HALF_QUEUES)
> + return -ERANGE;
> +
> + if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
> + return -EINVAL;
> +
> + switch (len) {
> + case  16:
> + cfg = 0 << 24;
> + mask[0] = 0x1;
> + break;
> + case  32:
> + cfg = 1 << 24;
> + mask[0] = 0x3;
> + break;
> + case  64:
> + cfg = 2 << 24;
> + mask[0] = 0xF;
> + break;
> + case 128:
> + cfg = 3 << 24;
> + mask[0] = 0xFF;
> + break;
> + default:
> + return -EINVAL;
> + }
> +
> + cfg |= nearly_empty_watermark << 26;
> + cfg |= nearly_full_watermark << 29;
> + len /= 16;  /* in 16-dwords: 1, 2, 4 or 8 */
> + mask[1] = mask[2] = mask[3] = 0;
> +
> + if (!try_module_get(THIS_MODULE))
> + return -ENODEV;
> +
> + spin_lock_irq(&qmgr_lock);
> + if (__raw_readl(&qmgr_regs->sram[queue])) {
> + err = -EBUSY;
> + goto err;
> + }
> +
> + while (1) {
> + if (!(used_sram_bitmap[0] & mask[0]) &&
> + !(used_sram_bitmap[1] & mask[1]) &&
> + !(used_sram_bitmap[2] & mask[2]) &&
> + !(used_sram_bitmap[3] & mask[3]))
> + break; /* found free space */
> +
> + addr++;
> + shift_mask(mask);
> + if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
> + printk(KERN_ERR "qmgr: no free SRAM space for"
> +" queue %i\n", queue);
> + err = -ENOMEM;
> + goto err;
> + }
> + }
> +
> + used_sram_bitmap[0] |= mask[0];
> + used_sram_bitmap[1] |= mask[1];
> + used_sram_bitmap[2] |= mask[2];
> + used_sram_bitmap[3] |= mask[3];
> + __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
> + spin_unlock_irq(&qmgr_lock);
> +
> +#if DEBUG
> + printk(KERN_DEBUG "qmgr: requested queue %i, addr = 0x%02X\n",
> +queue, addr);
> +#endif
> + return 0;
> +
> +err:
> + spin_unlock_irq(&qmgr_lock);
> + module_put(THIS_MODULE);
> + return err;
> +}

As with Christian's driver, I don't know whether an SRAM allocator
makes much sense.  We can just set up a static allocation map for the
in-tree drivers and leave out the allocator altogether.  I.e. I don't
think it's worth the complexity (and just because the butt-ugly Intel
code has an allocator isn't a very good reason. :-)

I.e. an API a la:

ixp4xx_qmgr_config_queue(int queue_nr, int sram_base_address, int 
queue_size, ...);

might simply suffice.
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-08 Thread Alexey Zaytsev

On 5/8/07, Lennert Buytenhek <[EMAIL PROTECTED]> wrote:
...

As with Christian's driver, I don't know whether an SRAM allocator
makes much sense.  We can just set up a static allocation map for the
in-tree drivers and leave out the allocator altogether.  I.e. I don't
think it's worth the complexity (and just because the butt-ugly Intel
code has an allocator isn't a very good reason. :-)


Is the qmgr used when the NPEs are utilized as DMA engines? And is the
allocator needed in this case? If yes, I beg you not to drop it,
because we use one NPE for this purpose, and if we are going to adopt
this driver instead of the intel's one, you will receive a patch
adding DMA functionality very soon. ;)
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-08 Thread Lennert Buytenhek
On Tue, May 08, 2007 at 04:47:31PM +0400, Alexey Zaytsev wrote:

> > As with Christian's driver, I don't know whether an SRAM allocator
> > makes much sense.  We can just set up a static allocation map for the
> > in-tree drivers and leave out the allocator altogether.  I.e. I don't
> > think it's worth the complexity (and just because the butt-ugly Intel
> > code has an allocator isn't a very good reason. :-)
> 
> Is the qmgr used when the NPEs are utilized as DMA engines?

I'm not sure, but probably yes.


> And is the allocator needed in this case?

If you statically partition the available queue SRAM, no.
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-08 Thread Krzysztof Halasa
Michael-Luke Jones <[EMAIL PROTECTED]> writes:

> Already in mach-ixp4xx, so can just be called qmgr.c

Same here.

>> +#define QUEUE_IRQ_SRC_NEARLY_FULL   2
>> +#define QUEUE_IRQ_SRC_FULL  3
>> +#define QUEUE_IRQ_SRC_NOT_EMPTY 4
>> +#define QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY  5
>> +#define QUEUE_IRQ_SRC_NOT_NEARLY_FULL   6
>> +#define QUEUE_IRQ_SRC_NOT_FULL  7
>
> Here, unlike ixp4xx_npe.c defines are in qmgr.h - that seems a bit
> more natural.

Because they are public interface :-)
-- 
Krzysztof Halasa
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-08 Thread Krzysztof Halasa
Lennert Buytenhek <[EMAIL PROTECTED]> writes:

> The queue manager interrupts should probably be implemented as an
> irqchip, in the same way that GPIO interrupts are implemented.  (I.e.
> allocate 'real' interrupt numbers for them, and use the interrupt
> cascade mechanism.)  You probably want to have separate irqchips for
> the upper and lower halves, too.  This way, drivers can just use
> request_irq() instead of having to bother with platform-specific
> qmgr_set_irq() methods.

Is there a sample somewhere?

> As with Christian's driver, I don't know whether an SRAM allocator
> makes much sense.  We can just set up a static allocation map for the
> in-tree drivers and leave out the allocator altogether.  I.e. I don't
> think it's worth the complexity (and just because the butt-ugly Intel
> code has an allocator isn't a very good reason. :-)

It's a very simple allocator. I don't whink we have enough SRAM
without it. For now it would work but it's probably too small
for all potential users at a time.

There may be up to 6 Ethernet ports (not sure about hardware
status, not yet supported even by Intel) - 7 queues * 128 entries
each = ~ 3.5 KB. Add 2 long queues (RX) for HSS and something
for TX, and then crypto, and maybe other things.

Current allocator have its potential problems, but they can be
solved internally (fragmentation, be we tend to use only
128-entry queues (RX and TX-ready Ethernet pool) and short,
16-entry ones (TX) - easy to deal with).
-- 
Krzysztof Halasa
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-08 Thread Lennert Buytenhek
On Tue, May 08, 2007 at 04:12:17PM +0200, Krzysztof Halasa wrote:

> > The queue manager interrupts should probably be implemented as an
> > irqchip, in the same way that GPIO interrupts are implemented.  (I.e.
> > allocate 'real' interrupt numbers for them, and use the interrupt
> > cascade mechanism.)  You probably want to have separate irqchips for
> > the upper and lower halves, too.  This way, drivers can just use
> > request_irq() instead of having to bother with platform-specific
> > qmgr_set_irq() methods.
> 
> Is there a sample somewhere?

See for example arch/arm/mach-ep93xx/core.c, handling of the A/B/F
port GPIO interrupts.

In a nutshell, it goes like this.

1) Allocate a set of IRQ numbers.  E.g. in include/asm-arm/arch-ixp4xx/irqs.h:

#define IRQ_IXP4XX_QUEUE_0  64
#define IRQ_IXP4XX_QUEUE_1  65
[...]

   Adjust NR_IRQS, too.

2) Implement interrupt chip functions:

static void ixp4xx_queue_low_irq_mask_ack(unsigned int irq)
{
[...]
}

static void ixp4xx_queue_low_irq_mask(unsigned int irq)
{
[...]
}

static void ixp4xx_queue_low_irq_unmask(unsigned int irq)
{
[...]
}

static void ixp4xx_queue_low_irq_set_type(unsigned int irq)
{
[...]
}

static struct irq_chip ixp4xx_queue_low_irq_chip = {
.name   = "QMGR low",
.ack= ixp4xx_queue_low_irq_mask_ack,
.mask   = ixp4xx_queue_low_irq_mask,
.unmask = ixp4xx_queue_low_irq_unmask,
.set_type   = ixp4xx_queue_low_irq_set_type,
};

3) Hook up the queue interrupts:

for (i = IRQ_IXP4XX_QUEUE_0; i <= IRQ_IXP4XX_QUEUE_31; i++) {
set_irq_chip(i, &ixp4xx_queue_low_irq_chip);
set_irq_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}

4) Implement an interrupt handler for the parent interrupt:

static void ixp4xx_qmgr_low_irq_handler(unsigned int irq, struct 
irq_des c *desc)
{
u32 status;
int i;

status = __raw_readl(IXP4XX_WHATEVER_QMGR_LOW_STATUS_REGISTER);
for (i = 0; i < 32; i++) {
if (status & (1 << i)) {
desc = irq_desc + IRQ_IXP4XX_QUEUE_0 + i;
desc_handle_irq(IRQ_IXP4XX_QUEUE_0 + i, desc);
}
}
}

5) Hook up the parent interrupt:

set_irq_chained_handler(IRQ_IXP4XX_QM1, ixp4xx_qmgr_low_irq_handler);


Or something like that.


> > As with Christian's driver, I don't know whether an SRAM allocator
> > makes much sense.  We can just set up a static allocation map for the
> > in-tree drivers and leave out the allocator altogether.  I.e. I don't
> > think it's worth the complexity (and just because the butt-ugly Intel
> > code has an allocator isn't a very good reason. :-)
> 
> It's a very simple allocator. I don't whink we have enough SRAM
> without it. For now it would work but it's probably too small
> for all potential users at a time.
> 
> There may be up to 6 Ethernet ports (not sure about hardware
> status, not yet supported even by Intel) - 7 queues * 128 entries
> each = ~ 3.5 KB. Add 2 long queues (RX) for HSS and something
> for TX, and then crypto, and maybe other things.

You're unlikely to be using all of those at the same time, though.

And what do you do if the user does compile all of these features into
his kernel and then tries to use them all at the same time?  Return
-ENOMEM?

Shouldn't we make sure that at least the features that are compiled in
can be used at the same time?  If you want that guarantee, then you
might as well determine the SRAM map at compile time.
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-08 Thread Krzysztof Halasa
Lennert Buytenhek <[EMAIL PROTECTED]> writes:

> See for example arch/arm/mach-ep93xx/core.c, handling of the A/B/F
> port GPIO interrupts.
>
> In a nutshell, it goes like this.

Thanks, I will investigate.

>> There may be up to 6 Ethernet ports (not sure about hardware
>> status, not yet supported even by Intel) - 7 queues * 128 entries
>> each = ~ 3.5 KB. Add 2 long queues (RX) for HSS and something
>> for TX, and then crypto, and maybe other things.
>
> You're unlikely to be using all of those at the same time, though.

That's the point.

> And what do you do if the user does compile all of these features into
> his kernel and then tries to use them all at the same time?  Return
> -ENOMEM?

If he is able to do so, yes - there is nothing we can do. But
I suspect a single machine would not have all possible hardware.
The problem is, we don't know what would it have, so it must be
dynamic.

> Shouldn't we make sure that at least the features that are compiled in
> can be used at the same time?

We can't - hardware capabilities limit that. A general purpose
distribution would probably want to compile in everything (perhaps
as modules).

>  If you want that guarantee, then you
> might as well determine the SRAM map at compile time.

That would be most limiting with IMHO no visible advantage.
-- 
Krzysztof Halasa
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-09 Thread Lennert Buytenhek
On Tue, May 08, 2007 at 06:59:36PM +0200, Krzysztof Halasa wrote:

> >> There may be up to 6 Ethernet ports (not sure about hardware
> >> status, not yet supported even by Intel) - 7 queues * 128 entries
> >> each = ~ 3.5 KB. Add 2 long queues (RX) for HSS and something
> >> for TX, and then crypto, and maybe other things.
> >
> > You're unlikely to be using all of those at the same time, though.
> 
> That's the point.
> 
> > And what do you do if the user does compile all of these features into
> > his kernel and then tries to use them all at the same time?  Return
> > -ENOMEM?
> 
> If he is able to do so, yes - there is nothing we can do. But
> I suspect a single machine would not have all possible hardware.
> The problem is, we don't know what would it have, so it must be
> dynamic.

Well, you _would_ like to have a way to make sure that all the
capabilities on the board can be used.  If you have a future ixp4xx
based board with 16 ethernet ports, you don't want 'ifconfig eth7 up'
to give you -ENOMEM just because we ran out of SRAM.

The way I see it, that means that you do want to scale back your
other SRAM allocations if you know that you're going to need a lot
of SRAM (say, for ethernet RX/TX queues.)

Either you can do this with an ugly hack a la:

/*
 * The FOO board has many ethernet ports, and runs out of
 * SRAM prematurely if we use the default TX/RX ring sizes.
 */
#ifdef CONFIG_MACH_IXP483_FOO_BOARD
#define IXP4XX_ETH_RXTX_QUEUE_SIZE  32
#else
#define IXP4XX_ETH_RXTX_QUEUE_SIZE  256
#endif

Or you can put this knowledge in the board support code (cleaner, IMHO.)

E.g. let arch/arm/mach-ixp4xx/nslu2.c decide, at platform device
instantiation time, which region of queue SRAM can be used by which
queue, and take static allocations for things like the crypto unit
into account.  (This is just one form of that idea, there are many
different variations.)

That way, you can _guarantee_ that you'll always have enough SRAM
to be able to use the functionality that is exposed on the board you
are running on (which is a desirable property, IMHO), which is
something that you can't achieve with an allocator, as far as I can
see.

I'm not per se against the allocator, I just think that there are
problems (running out of SRAM, fragmentation) that can't be solved
by the allocator alone (SRAM users have to be aware which other SRAM
users there are in the system, while the idea of the allocator is to
insulate these users from each other), and any solution that solves
those two problems IMHO also automatically solves the problem that
the allocator is trying to solve.
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Intel IXP4xx network drivers v.3 - QMGR

2007-05-10 Thread Krzysztof Halasa
Lennert Buytenhek <[EMAIL PROTECTED]> writes:

> The way I see it, that means that you do want to scale back your
> other SRAM allocations if you know that you're going to need a lot
> of SRAM (say, for ethernet RX/TX queues.)

Yep, I will then add "queue_size" parameter to the platform data.
Or something like that.

> Or you can put this knowledge in the board support code (cleaner, IMHO.)

Sure.

> That way, you can _guarantee_ that you'll always have enough SRAM
> to be able to use the functionality that is exposed on the board you
> are running on (which is a desirable property, IMHO), which is
> something that you can't achieve with an allocator, as far as I can
> see.

I'd have to put SRAM address in the board code instead. Certainly
not required at this point, and perhaps it will be never needed.
-- 
Krzysztof Halasa
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html