On 09/01/06, Jan Kiszka <[EMAIL PROTECTED]> wrote:

> My mail client unfortunately refused to inline your patches, so I have to:
>
> > --- intr-cvs.c      2005-12-02 19:56:20.000000000 +0100
> > +++ intr.c  2006-01-09 21:26:44.000000000 +0100
> [...]
> > @@ -204,11 +242,66 @@
> >  int xnintr_attach (xnintr_t *intr,
> >                void *cookie)
> >  {
> > +    rthal_irq_handler_t irq_handler;
> > +    unsigned irq = intr->irq;
> > +    xnshared_irq_t *shirq;
> > +    int err = 0;
> > +    spl_t s;
> > +
> >      intr->hits = 0;
> >      intr->cookie = cookie;
> > -    return
> xnarch_hook_irq(intr->irq,&xnintr_irq_handler,intr->iack,intr);
> > +
> > +    xnlock_get_irqsave(&nklock,s);
> > +
> > +    irq_handler = rthal_irq_handler(&rthal_domain,irq);
> > +
> > +    if (irq_handler)
> > +   {
> > +   xnintr_t *old;
> > +   shirq = rthal_irq_cookie(&rthal_domain,irq);
> > +   
> > +   if (irq_handler != &xnintr_irq_handler)
> > +       {
> > +       err = -EBUSY;
> > +       goto unlock_and_exit;
> > +       }
> > +   
> > +   old = link2intr(getheadq(&shirq->handlers));
> > +
> > +   if (!(old->flags & intr->flags & XN_ISR_SHARED))
> > +       {
> > +       err = -EBUSY;
> > +       goto unlock_and_exit;
> > +       }
> > +   
> > +   appendq(&shirq->handlers,&intr->link);
> > +   }
> > +    else
> > +   {
> > +   shirq = xnmalloc(sizeof(*shirq));
>
> Why not allocating that piece of memory before taking the global lock?
> If you don't need it, you can drop it again afterward. If you need but
> the returned pointer is NULL, you can still check at the same place you
> do now. Just an idea to avoid complex functions inside the global lock
> for new xeno-code.

Ack. I thought about that actually.

>
> > +   
> > +   if (!shirq)
> > +       {
> > +       err = -ENOMEM;
> > +       goto unlock_and_exit;
> > +       }
> > +
> > +   initq(&shirq->handlers);
> > +   appendq(&shirq->handlers,&intr->link);
> > +
> > +   err = xnarch_hook_irq(irq,&xnintr_irq_handler,intr->iack,shirq);
> > +   
> > +   if (err)
> > +       xnfree(shirq);
> > +   }
> > +
> > +unlock_and_exit:
> > +
> > +    xnlock_put_irqrestore(&nklock,s);
> > +    return err;
> >  }
> >
> > +
> >  /*!
> >   * \fn int xnintr_detach (xnintr_t *intr)
> >   * \brief Detach an interrupt object.
> > @@ -241,7 +334,32 @@
> >  int xnintr_detach (xnintr_t *intr)
> >
> >  {
> > -    return xnarch_release_irq(intr->irq);
> > +    unsigned irq = intr->irq;
> > +    xnshared_irq_t *shirq;
> > +    int cleanup = 0;
> > +    int err = 0;
> > +    spl_t s;
> > +
> > +    xnlock_get_irqsave(&nklock,s);
> > +
> > +    shirq = rthal_irq_cookie(&rthal_domain,irq);
> > +
> > +    removeq(&shirq->handlers,&intr->link);
> > +
> > +    if (!countq(&shirq->handlers))
> > +   {
> > +   err = xnarch_release_irq(irq);
> > +   cleanup = 1;
> > +   }
> > +
> > +    xnlock_put_irqrestore(&nklock,s);
> > +
> > +    xnintr_shirq_spin(shirq);
> > +
> > +    if (cleanup)
> > +   xnfree(shirq);
> > +
> > +    return err;
> >  }
> >
> >  /*!
> > @@ -350,17 +468,45 @@
>
> I guess here starts the new IRQ handler. BTW, diff -p helps a lot when
> reading patches as a human being and not as a patch tool. ;)
>
> >
> >  {
> >      xnsched_t *sched = xnpod_current_sched();
> > -    xnintr_t *intr = (xnintr_t *)cookie;
> > -    int s;
> > +    xnshared_irq_t *shirq = (xnshared_irq_t *)cookie;
> > +    xnholder_t holder;
> > +    spl_t flags;
> > +    int s = 0;
> >
> >      xnarch_memory_barrier();
> >
> >      xnltt_log_event(xeno_ev_ienter,irq);
> >
> > +    xnlock_get_irqsave(&nklock,flags);
> > +
> > +    shirq = rthal_irq_cookie(&rthal_domain,irq);
> > +    xnintr_shirq_lock(shirq);
> > +
> > +    xnlock_put_irqrestore(&nklock,flags);
>
> Why "_irqsave" in IRQ context?
>
> Regarding this locking isssue in general, I first had some RCU-mechanism
> in mind to avoid the reader-side lock in the IRQ handler. But as IRQs
> typically touch some nucleus service with its own nklock-acquire anyway,
> optimising this here does not seem to be worth the effort now. As long
> as we have the global lock, it's ok and much simpler I think.

As Gilles has pointed out, nklock is dangerous here so let's forget
about it so far.
Actually, the way the shirq->handlers list is used remains some kind
of RCU, I guess.

Look, xnintr_irq_handler() doesn't hold the lock while iterating
through the list.

That's why xnintr_shirq_spin() is there, I tried to explain the idea
in my previous answer to the Gilles's letter.

Actually, I still have some doubts regarding this way, in particular,
whether everything is ok with atomicity.

I don't have my computer at hand and it seems I am occuping the comp
of my friend at the moment :) I'll post other details later if need
be.

>
> > +
> > +    if (!shirq)
> > +   {
> > +   xnintr_shirq_unlock(shirq);
> > +   xnltt_log_event(xeno_ev_iexit,irq);
> > +           return;
> > +   }
> > +
> >      ++sched->inesting;
> > -    s = intr->isr(intr);
> > +
> > +    holder = getheadq(&shirq->handlers);
> > +
> > +    while (holder)
> > +   {
> > +   xnintr_t *intr = link2intr(holder);
> > +   holder = nextq(&shirq->handlers,holder);
> > +
> > +   s |= intr->isr(intr);
> > +   ++intr->hits;
> > +   }
> > +
> > +    xnintr_shirq_unlock(shirq);
> > +
> >      --sched->inesting;
> > -    ++intr->hits;
> >
> >      if (s & XN_ISR_ENABLE)
> >     xnarch_enable_irq(irq);
>
> Looking forward to see this in Xenomai - at letting some real tests run
> with it. :)

So am I. Heh.. :)

>
> Jan
>
>


--
Best regards,
Dmitry Adamushko

Reply via email to