On Wed, Nov 23, 2022 at 03:39:54PM +0300, Vitaliy Makkoveev wrote:
> On Wed, Nov 23, 2022 at 08:46:41AM +0000, Klemens Nanni wrote:
> > Mechanical move that "unlocks" the errno(2) cases.
> > 
> > This is another step towards more read-only interface ioctls running
> > with the shared net lock alone.
> >     
> > Feedback? OK?
> > 
> 
> Could this be merged with the following non "Mechanical move" diff?

Here is this commit squashed together with the two that would follow:

---
  Push kernel lock into in6_ioctl()

  Mechanical move that "unlocks" the errno(2) cases.

  This is another step towards more read-only interface ioctls running
  with the shared net lock alone.
===
  Push kernel lock into in6_ioctl_get()

  Purely mechanical except for the early function-local sockaddr dance.
===
  Push kernel lock into nd6_ioctl(), unlock SIOCGIFINFO_IN6

  Neighbour Discovery information is protected by the net lock, as
  documented in nd6.h struct nd_ifinfo, so drop the kernel lock to read.

  ndp(8) is the only SIOCGIFINFO_IN6 user in base.
---

diff --git a/sys/netinet6/in6.c b/sys/netinet6/in6.c
index becfaedc8fb..fb4aabf87b5 100644
--- a/sys/netinet6/in6.c
+++ b/sys/netinet6/in6.c
@@ -213,9 +213,7 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, 
struct ifnet *ifp)
                break;
 #endif /* MROUTING */
        default:
-               KERNEL_LOCK();
                error = in6_ioctl(cmd, data, ifp, privileged);
-               KERNEL_UNLOCK();
                break;
        }
 
@@ -225,23 +223,28 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, 
struct ifnet *ifp)
 int
 in6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp, int privileged)
 {
+       int error;
+
        if (ifp == NULL)
                return (ENXIO);
 
        switch (cmd) {
        case SIOCGIFINFO_IN6:
        case SIOCGNBRINFO_IN6:
-               return (nd6_ioctl(cmd, data, ifp));
+               return nd6_ioctl(cmd, data, ifp);
        case SIOCGIFDSTADDR_IN6:
        case SIOCGIFNETMASK_IN6:
        case SIOCGIFAFLAG_IN6:
        case SIOCGIFALIFETIME_IN6:
-               return (in6_ioctl_get(cmd, data, ifp));
+               return in6_ioctl_get(cmd, data, ifp);
        case SIOCAIFADDR_IN6:
        case SIOCDIFADDR_IN6:
                if (!privileged)
                        return (EPERM);
-               return (in6_ioctl_change_ifaddr(cmd, data, ifp));
+               KERNEL_LOCK();
+               error = in6_ioctl_change_ifaddr(cmd, data, ifp);
+               KERNEL_UNLOCK();
+               return (error);
        case SIOCSIFADDR:
        case SIOCSIFDSTADDR:
        case SIOCSIFBRDADDR:
@@ -422,6 +425,7 @@ in6_ioctl_get(u_long cmd, caddr_t data, struct ifnet *ifp)
                        return (error);
        }
 
+       KERNEL_LOCK();
        NET_LOCK_SHARED();
 
        if (sa6 != NULL) {
@@ -517,6 +521,7 @@ in6_ioctl_get(u_long cmd, caddr_t data, struct ifnet *ifp)
 
 err:
        NET_UNLOCK_SHARED();
+       KERNEL_UNLOCK();
        return (error);
 }
 
diff --git a/sys/netinet6/nd6.c b/sys/netinet6/nd6.c
index d6ccfd3a272..a657b81f5cf 100644
--- a/sys/netinet6/nd6.c
+++ b/sys/netinet6/nd6.c
@@ -1023,6 +1023,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
                struct in6_addr nb_addr = nbi->addr; /* make local for safety */
                time_t expire;
 
+               KERNEL_LOCK();
                NET_LOCK_SHARED();
                /*
                 * XXX: KAME specific hack for scoped addresses
@@ -1041,6 +1042,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
                    (ln = (struct llinfo_nd6 *)rt->rt_llinfo) == NULL) {
                        rtfree(rt);
                        NET_UNLOCK_SHARED();
+                       KERNEL_UNLOCK();
                        return (EINVAL);
                }
                expire = ln->ln_rt->rt_expire;
@@ -1056,6 +1058,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
 
                rtfree(rt);
                NET_UNLOCK_SHARED();
+               KERNEL_UNLOCK();
                return (0);
        }
        }

Reply via email to