On 15/12/20(Tue) 17:23, Visa Hankala wrote:
> On Tue, Dec 15, 2020 at 07:46:01AM -0300, Martin Pieuchot wrote:
> > @@ -636,43 +651,59 @@ dopselect(struct proc *p, int nd, fd_set
> >     if (sigmask)
> >             dosigsuspend(p, *sigmask &~ sigcantmask);
> >  
> > -retry:
> > -   ncoll = nselcoll;
> > -   atomic_setbits_int(&p->p_flag, P_SELECT);
> > -   error = selscan(p, pibits[0], pobits[0], nd, ni, retval);
> > -   if (error || *retval)
> > +   /* Register kqueue events */
> > +   if ((error = pselregister(p, pibits[0], nd, ni, &nevents) != 0))
> >             goto done;
> 
> The above has parentheses wrong and returns 1 (EPERM) as error.
> The lines should read:
> 
>       if ((error = pselregister(p, pibits[0], nd, ni, &nevents)) != 0)
>               goto done;

Thanks fixed.
 
> In addition to the above, I noticed that select(2) behaves differently
> than before when a file descriptor that is being monitored is closed by
> another thread. The old implementation returns EBADF. The new code keeps
> on waiting on the underlying object.
> 
> The diff below makes kqueue clear kqpoll's fd event registration on
> fd close. However, it does not make select(2) return an error, the fd
> just will not cause a wakeup any longer. I think I have an idea on how
> to correct that but I need to consider it some more.

Are you saying that knote_fdclose() can't cleanup the knotes on the
per-thread kqueue?

If so, should we replace the call of kqueue_free() in kqpoll_exit() by
a KQRELE()?  The original intend was to not put the per-thread kqueue to
the fdp list.  As you just pointed out this is necessary.  So I don't
see any need for kqueue_free().  We could even assert that the refcount
is 1.

Diff below does that, feel free to commit it.

Index: kern/kern_event.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_event.c,v
retrieving revision 1.148
diff -u -p -r1.148 kern_event.c
--- kern/kern_event.c   15 Dec 2020 04:48:18 -0000      1.148
+++ kern/kern_event.c   16 Dec 2020 11:19:01 -0000
@@ -168,12 +168,11 @@ KQREF(struct kqueue *kq)
 void
 KQRELE(struct kqueue *kq)
 {
-       struct filedesc *fdp;
+       struct filedesc *fdp = kq->kq_fdp;
 
        if (atomic_dec_int_nv(&kq->kq_refs) > 0)
                return;
 
-       fdp = kq->kq_fdp;
        if (rw_status(&fdp->fd_lock) == RW_WRITE) {
                LIST_REMOVE(kq, kq_next);
        } else {
@@ -182,12 +181,6 @@ KQRELE(struct kqueue *kq)
                fdpunlock(fdp);
        }
 
-       kqueue_free(kq);
-}
-
-void
-kqueue_free(struct kqueue *kq)
-{
        free(kq->kq_knlist, M_KEVENT, kq->kq_knlistsize *
            sizeof(struct knlist));
        hashfree(kq->kq_knhash, KN_HASHSIZE, M_KEVENT);
@@ -509,12 +502,17 @@ void
 kqpoll_init(void)
 {
        struct proc *p = curproc;
+       struct filedesc *fdp;
 
        if (p->p_kq != NULL)
                return;
 
        p->p_kq = kqueue_alloc(p->p_fd);
        p->p_kq_serial = arc4random();
+       fdp = p->p_fd;
+       fdplock(fdp);
+       LIST_INSERT_HEAD(&fdp->fd_kqlist, p->p_kq, kq_next);
+       fdpunlock(fdp);
 }
 
 void
@@ -526,7 +524,8 @@ kqpoll_exit(void)
                return;
 
        kqueue_terminate(p, p->p_kq);
-       kqueue_free(p->p_kq);
+       KASSERT(p->p_kq->kq_refs == 1);
+       KQRELE(p->p_kq);
        p->p_kq = NULL;
 }
 

Reply via email to