Re: have m_copydata use a void * instead of caddr_t

2021-02-23 Thread David Gwynne
On Tue, Feb 23, 2021 at 01:09:06PM +0100, Alexander Bluhm wrote:
> On Tue, Feb 23, 2021 at 07:31:30PM +1000, David Gwynne wrote:
> > i'm not a fan of having to cast to caddr_t when we have modern
> > inventions like void *s we can take advantage of.
> 
> Shoud you remove all the (caddr_t) casts in the callers then?

i asked coccinelle to have a go, but it has terrible ideas about how to
format code.

> Without that step this diff does not provide more consistency.

it's a start though.  cocci and i came up with this to push in after.

Index: arch/armv7/sunxi/sxie.c
===
RCS file: /cvs/src/sys/arch/armv7/sunxi/sxie.c,v
retrieving revision 1.29
diff -u -p -r1.29 sxie.c
--- arch/armv7/sunxi/sxie.c 10 Jul 2020 13:26:36 -  1.29
+++ arch/armv7/sunxi/sxie.c 24 Feb 2021 06:19:13 -
@@ -524,7 +524,7 @@ sxie_start(struct ifnet *ifp)
SXIWRITE4(sc, SXIE_TXPKTLEN0 + (fifo * 4), m->m_pkthdr.len);
 
/* copy the actual packet to fifo XXX through 'align buffer' */
-   m_copydata(m, 0, m->m_pkthdr.len, (caddr_t)td);
+   m_copydata(m, 0, m->m_pkthdr.len, td);
bus_space_write_multi_4(sc->sc_iot, sc->sc_ioh,
SXIE_TXIO0,
(uint32_t *)td, SXIE_ROUNDUP(m->m_pkthdr.len, 4) >> 2);
Index: arch/octeon/dev/octcrypto.c
===
RCS file: /cvs/src/sys/arch/octeon/dev/octcrypto.c,v
retrieving revision 1.3
diff -u -p -r1.3 octcrypto.c
--- arch/octeon/dev/octcrypto.c 10 Mar 2019 14:20:44 -  1.3
+++ arch/octeon/dev/octcrypto.c 24 Feb 2021 06:19:13 -
@@ -739,7 +739,7 @@ octcrypto_authenc_gmac(struct cryptop *c
} else {
if (crp->crp_flags & CRYPTO_F_IMBUF)
m_copydata((struct mbuf *)crp->crp_buf,
-   crde->crd_inject, ivlen, (uint8_t *)iv);
+   crde->crd_inject, ivlen, iv);
else
cuio_copydata((struct uio *)crp->crp_buf,
crde->crd_inject, ivlen, (uint8_t *)iv);
@@ -1035,10 +1035,8 @@ octcrypto_authenc_hmac(struct cryptop *c
memcpy(iv, crde->crd_iv, ivlen);
} else {
if (crp->crp_flags & CRYPTO_F_IMBUF)
-   m_copydata(
-   (struct mbuf *)crp->crp_buf,
-   crde->crd_inject, ivlen,
-   (uint8_t *)iv);
+   m_copydata((struct mbuf *)crp->crp_buf,
+   crde->crd_inject, ivlen, iv);
else
cuio_copydata(
(struct uio *)crp->crp_buf,
Index: dev/ic/acx.c
===
RCS file: /cvs/src/sys/dev/ic/acx.c,v
retrieving revision 1.124
diff -u -p -r1.124 acx.c
--- dev/ic/acx.c10 Jul 2020 13:26:37 -  1.124
+++ dev/ic/acx.c24 Feb 2021 06:19:13 -
@@ -2373,7 +2373,7 @@ acx_set_probe_resp_tmplt(struct acx_soft
IEEE80211_ADDR_COPY(wh->i_addr3, ni->ni_bssid);
*(u_int16_t *)wh->i_seq = 0;
 
-   m_copydata(m, 0, m->m_pkthdr.len, (caddr_t));
+   m_copydata(m, 0, m->m_pkthdr.len, );
len = m->m_pkthdr.len + sizeof(resp.size);
m_freem(m); 
 
@@ -2427,7 +2427,7 @@ acx_set_beacon_tmplt(struct acx_softc *s
return (1);
}
 
-   m_copydata(m, 0, off, (caddr_t));
+   m_copydata(m, 0, off, );
len = off + sizeof(beacon.size);
 
if (acx_set_tmplt(sc, ACXCMD_TMPLT_BEACON, , len) != 0) {
@@ -2442,7 +2442,7 @@ acx_set_beacon_tmplt(struct acx_softc *s
return (0);
}
 
-   m_copydata(m, off, len, (caddr_t));
+   m_copydata(m, off, len, );
len += sizeof(beacon.size);
m_freem(m);
 
Index: dev/ic/an.c
===
RCS file: /cvs/src/sys/dev/ic/an.c,v
retrieving revision 1.77
diff -u -p -r1.77 an.c
--- dev/ic/an.c 8 Dec 2020 04:37:27 -   1.77
+++ dev/ic/an.c 24 Feb 2021 06:19:13 -
@@ -781,7 +781,7 @@ an_mwrite_bap(struct an_softc *sc, int i
len = min(m->m_len, totlen);
 
if ((mtod(m, u_long) & 0x1) || (len & 0x1)) {
-   m_copydata(m, 0, totlen, (caddr_t)>sc_buf.sc_txbuf);
+   m_copydata(m, 0, totlen, >sc_buf.sc_txbuf);
cnt = (totlen + 1) / 2;
an_swap16((u_int16_t *)>sc_buf.sc_txbuf, cnt); 
CSR_WRITE_MULTI_STREAM_2(sc, AN_DATA0,
@@ -1126,7 +1126,7 @@ 

mg: fix direction of mark-paragraph

2021-02-23 Thread Mark Lumsden

The current direction of marking paragraphs using 'mark-paragraph' in mg
is the opposite of emacs. Emacs goes backwards, mg goes forwards. This 
diff brings mg inline with emacs, and also simplifies the code, and fixes 
another bug with the current code.


ok?

Index: paragraph.c
===
RCS file: /cvs/src/usr.bin/mg/paragraph.c,v
retrieving revision 1.46
diff -u -p -u -p -r1.46 paragraph.c
--- paragraph.c 17 Nov 2018 09:52:34 -  1.46
+++ paragraph.c 23 Feb 2021 20:32:43 -
@@ -302,23 +302,16 @@ killpara(int f, int n)
 int
 markpara(int f, int n)
 {
-   int i = 0;
-
if (n == 0)
return (TRUE);

clearmark(FFARG, 0);

-   if (findpara() == FALSE)
-   return (TRUE);
-
-   (void)do_gotoeop(FFRAND, n, );
-
/* set the mark here */
curwp->w_markp = curwp->w_dotp;
curwp->w_marko = curwp->w_doto;

-   (void)gotobop(FFRAND, i);
+   (void)gotobop(FFRAND, n);

return (TRUE);
 }



Re: rpki-client don't clobber poll events

2021-02-23 Thread Theo de Raadt
Definately!  It will round-trip through poll() less often.

Claudio Jeker  wrote:

> It is perfectly fine to wait for read and write at the same time.
> The code in rpki-client should do that too, I think it will not matter
> but it is what I intended.
> 
> -- 
> :wq Claudio
> 
> Index: main.c
> ===
> RCS file: /cvs/src/usr.sbin/rpki-client/main.c,v
> retrieving revision 1.105
> diff -u -p -r1.105 main.c
> --- main.c23 Feb 2021 14:25:29 -  1.105
> +++ main.c23 Feb 2021 16:00:23 -
> @@ -984,10 +984,10 @@ main(int argc, char *argv[])
>   while (entity_queue > 0 && !killme) {
>   pfd[0].events = POLLIN;
>   if (rsyncq.queued)
> - pfd[0].events = POLLOUT;
> + pfd[0].events |= POLLOUT;
>   pfd[1].events = POLLIN;
>   if (procq.queued)
> - pfd[1].events = POLLOUT;
> + pfd[1].events |= POLLOUT;
>  
>   if ((c = poll(pfd, 2, INFTIM)) == -1) {
>   if (errno == EINTR)
> 



rpki-client don't clobber poll events

2021-02-23 Thread Claudio Jeker
It is perfectly fine to wait for read and write at the same time.
The code in rpki-client should do that too, I think it will not matter
but it is what I intended.

-- 
:wq Claudio

Index: main.c
===
RCS file: /cvs/src/usr.sbin/rpki-client/main.c,v
retrieving revision 1.105
diff -u -p -r1.105 main.c
--- main.c  23 Feb 2021 14:25:29 -  1.105
+++ main.c  23 Feb 2021 16:00:23 -
@@ -984,10 +984,10 @@ main(int argc, char *argv[])
while (entity_queue > 0 && !killme) {
pfd[0].events = POLLIN;
if (rsyncq.queued)
-   pfd[0].events = POLLOUT;
+   pfd[0].events |= POLLOUT;
pfd[1].events = POLLIN;
if (procq.queued)
-   pfd[1].events = POLLOUT;
+   pfd[1].events |= POLLOUT;
 
if ((c = poll(pfd, 2, INFTIM)) == -1) {
if (errno == EINTR)



Re: sparc64/clock.c: use ANSI-style function definitions

2021-02-23 Thread Theo de Raadt
Scott Cheloha  wrote:

> Any reason not to ANSIfy these?  While we're here we can kill some
> ARGUSED comments, too.

Absolutely, go ahead with that.

> I don't have a sparc64 machine so I'd appreciate a test compile.

It compiles fine.



sparc64/clock.c: use ANSI-style function definitions

2021-02-23 Thread Scott Cheloha
I'm poking around in this file and lo, K function definitions.

Any reason not to ANSIfy these?  While we're here we can kill some
ARGUSED comments, too.

I don't have a sparc64 machine so I'd appreciate a test compile.

Assuming it compiles, ok?

Index: clock.c
===
RCS file: /cvs/src/sys/arch/sparc64/sparc64/clock.c,v
retrieving revision 1.68
diff -u -p -r1.68 clock.c
--- clock.c 23 Feb 2021 04:44:31 -  1.68
+++ clock.c 23 Feb 2021 15:12:31 -
@@ -215,10 +215,7 @@ int timerblurb = 10; /* Guess a value; u
  * own special match function to call it the "clock".
  */
 static int
-clockmatch_sbus(parent, cf, aux)
-   struct device *parent;
-   void *cf;
-   void *aux;
+clockmatch_sbus(struct device *parent, void *cf, void *aux)
 {
struct sbus_attach_args *sa = aux;
 
@@ -226,10 +223,7 @@ clockmatch_sbus(parent, cf, aux)
 }
 
 static int
-clockmatch_ebus(parent, cf, aux)
-   struct device *parent;
-   void *cf;
-   void *aux;
+clockmatch_ebus(struct device *parent, void *cf, void *aux)
 {
struct ebus_attach_args *ea = aux;
 
@@ -237,10 +231,7 @@ clockmatch_ebus(parent, cf, aux)
 }
 
 static int
-clockmatch_fhc(parent, cf, aux)
-   struct device *parent;
-   void *cf;
-   void *aux;
+clockmatch_fhc(struct device *parent, void *cf, void *aux)
 {
struct fhc_attach_args *fa = aux;
 
@@ -270,11 +261,8 @@ clockmatch_fhc(parent, cf, aux)
  * a non-trivial operation.  
  */
 
-/* ARGSUSED */
 static void
-clockattach_sbus(parent, self, aux)
-   struct device *parent, *self;
-   void *aux;
+clockattach_sbus(struct device *parent, struct device *self, void *aux)
 {
struct sbus_attach_args *sa = aux;
bus_space_tag_t bt = sa->sa_bustag;
@@ -309,9 +297,7 @@ clockattach_sbus(parent, self, aux)
  * change are not atomic.
  */
 int
-clock_bus_wenable(handle, onoff)
-   struct todr_chip_handle *handle;
-   int onoff;
+clock_bus_wenable(struct todr_chip_handle *handle, int onoff)
 {
int s, err = 0;
int prot; /* nonzero => change prot */
@@ -335,11 +321,8 @@ clock_bus_wenable(handle, onoff)
return (err);
 }
 
-/* ARGSUSED */
 static void
-clockattach_ebus(parent, self, aux)
-   struct device *parent, *self;
-   void *aux;
+clockattach_ebus(struct device *parent, struct device *self, void *aux)
 {
struct ebus_attach_args *ea = aux;
bus_space_tag_t bt;
@@ -380,9 +363,7 @@ clockattach_ebus(parent, self, aux)
 }
 
 static void
-clockattach_fhc(parent, self, aux)
-   struct device *parent, *self;
-   void *aux;
+clockattach_fhc(struct device *parent, struct device *self, void *aux)
 {
struct fhc_attach_args *fa = aux;
bus_space_tag_t bt = fa->fa_bustag;
@@ -409,10 +390,7 @@ clockattach_fhc(parent, self, aux)
 }
 
 static void
-clockattach(node, bt, bh)
-   int node;
-   bus_space_tag_t bt;
-   bus_space_handle_t bh;
+clockattach(int node, bus_space_tag_t bt, bus_space_handle_t bh)
 {
char *model;
struct idprom *idp;
@@ -467,10 +445,7 @@ getidprom(void)
  * the lame UltraSPARC IIi PCI machines that don't have them.
  */
 static int
-timermatch(parent, cf, aux)
-   struct device *parent;
-   void *cf;
-   void *aux;
+timermatch(struct device *parent, void *cf, void *aux)
 {
 #ifndef MULTIPROCESSOR
struct mainbus_attach_args *ma = aux;
@@ -483,9 +458,7 @@ timermatch(parent, cf, aux)
 }
 
 static void
-timerattach(parent, self, aux)
-   struct device *parent, *self;
-   void *aux;
+timerattach(struct device *parent, struct device *self, void *aux)
 {
struct mainbus_attach_args *ma = aux;
u_int *va = ma->ma_address;
@@ -518,8 +491,7 @@ timerattach(parent, self, aux)
 }
 
 void
-stopcounter(creg)
-   struct timer_4u *creg;
+stopcounter(struct timer_4u *creg)
 {
/* Stop the clock */
volatile int discard;
@@ -531,8 +503,7 @@ stopcounter(creg)
  * XXX this belongs elsewhere
  */
 void
-myetheraddr(cp)
-   u_char *cp;
+myetheraddr(u_char *cp)
 {
struct idprom *idp;
 
@@ -714,10 +685,8 @@ cpu_initclocks(void)
 /*
  * Dummy setstatclockrate(), since we know profhz==hz.
  */
-/* ARGSUSED */
 void
-setstatclockrate(newhz)
-   int newhz;
+setstatclockrate(int newhz)
 {
/* nothing */
 }
@@ -731,8 +700,7 @@ setstatclockrate(newhz)
 static int clockcheck = 0;
 #endif
 int
-clockintr(cap)
-   void *cap;
+clockintr(void *cap)
 {
 #ifdef DEBUG
static int64_t tick_base = 0;
@@ -778,8 +746,7 @@ clockintr(cap)
  * locore.s to a level 10.
  */
 int
-tickintr(cap)
-   void *cap;
+tickintr(void *cap)
 {
struct cpu_info *ci = curcpu();
u_int64_t s;
@@ -803,8 +770,7 @@ tickintr(cap)
 }
 
 int
-sys_tickintr(cap)
-   void *cap;
+sys_tickintr(void *cap)
 {
struct cpu_info *ci = curcpu();
u_int64_t s;
@@ -827,8 +793,7 @@ sys_tickintr(cap)
 }
 
 int

Re: rpki-client lock down rsync process further

2021-02-23 Thread Theo de Raadt
OK with me.

I'll say it again, the unveils in here are misguided.  Almost as
misguided as the mmap's (which prevents large file transfer, and
there are other problems..)


Claudio Jeker  wrote:

> There is no need for cpath or the unveil of . in the rsync process.
> That process just does fork+exec for rsync.
> Removing the unveil pledge is the same as unveil(NULL, NULL) so skip that
> too.
> 
> OK?
> -- 
> :wq Claudio
> 
> Index: main.c
> ===
> RCS file: /cvs/src/usr.sbin/rpki-client/main.c,v
> retrieving revision 1.104
> diff -u -p -r1.104 main.c
> --- main.c22 Feb 2021 09:46:05 -  1.104
> +++ main.c23 Feb 2021 10:42:24 -
> @@ -941,8 +941,7 @@ main(int argc, char *argv[])
>   if (fchdir(cachefd) == -1)
>   err(1, "fchdir");
>  
> - if (pledge("stdio rpath cpath proc exec unveil", NULL)
> - == -1)
> + if (pledge("stdio rpath proc exec unveil", NULL) == -1)
>   err(1, "pledge");
>  
>   proc_rsync(rsync_prog, bind_addr, fd[0]);
> Index: rsync.c
> ===
> RCS file: /cvs/src/usr.sbin/rpki-client/rsync.c,v
> retrieving revision 1.18
> diff -u -p -r1.18 rsync.c
> --- rsync.c   19 Feb 2021 08:14:49 -  1.18
> +++ rsync.c   23 Feb 2021 10:41:50 -
> @@ -160,13 +160,6 @@ proc_rsync(char *prog, char *bind_addr, 
>   } else if (unveil(prog, "x") == -1)
>   err(1, "%s: unveil", prog);
>  
> - /* Unveil the repository directory and terminate unveiling. */
> -
> - if (unveil(".", "c") == -1)
> - err(1, "unveil");
> - if (unveil(NULL, NULL) == -1)
> - err(1, "unveil");
> -
>   if (pledge("stdio proc exec", NULL) == -1)
>   err(1, "pledge");
>  
> 



Re: have m_copydata use a void * instead of caddr_t

2021-02-23 Thread Alexander Bluhm
On Tue, Feb 23, 2021 at 07:31:30PM +1000, David Gwynne wrote:
> i'm not a fan of having to cast to caddr_t when we have modern
> inventions like void *s we can take advantage of.

Shoud you remove all the (caddr_t) casts in the callers then?
Without that step this diff does not provide more consistency.

bluhm

> ok?
> 
> Index: share/man/man9/mbuf.9
> ===
> RCS file: /cvs/src/share/man/man9/mbuf.9,v
> retrieving revision 1.120
> diff -u -p -r1.120 mbuf.9
> --- share/man/man9/mbuf.9 12 Dec 2020 11:48:52 -  1.120
> +++ share/man/man9/mbuf.9 23 Feb 2021 09:29:55 -
> @@ -116,7 +116,7 @@
>  .Ft void
>  .Fn m_reclaim "void"
>  .Ft void
> -.Fn m_copydata "struct mbuf *m" "int off" "int len" "caddr_t cp"
> +.Fn m_copydata "struct mbuf *m" "int off" "int len" "void *cp"
>  .Ft void
>  .Fn m_cat "struct mbuf *m" "struct mbuf *n"
>  .Ft struct mbuf *
> @@ -673,7 +673,7 @@ is a
>  pointer, no action occurs.
>  .It Fn m_reclaim "void"
>  Ask protocols to free unused memory space.
> -.It Fn m_copydata "struct mbuf *m" "int off" "int len" "caddr_t cp"
> +.It Fn m_copydata "struct mbuf *m" "int off" "int len" "void *cp"
>  Copy data from the mbuf chain pointed to by
>  .Fa m
>  starting at
> Index: sys/sys/mbuf.h
> ===
> RCS file: /cvs/src/sys/sys/mbuf.h,v
> retrieving revision 1.251
> diff -u -p -r1.251 mbuf.h
> --- sys/sys/mbuf.h12 Dec 2020 11:49:02 -  1.251
> +++ sys/sys/mbuf.h23 Feb 2021 09:29:55 -
> @@ -435,7 +435,7 @@ int   m_copyback(struct mbuf *, int, int, 
>  struct mbuf *m_freem(struct mbuf *);
>  void m_purge(struct mbuf *);
>  void m_reclaim(void *, int);
> -void m_copydata(struct mbuf *, int, int, caddr_t);
> +void m_copydata(struct mbuf *, int, int, void *);
>  void m_cat(struct mbuf *, struct mbuf *);
>  struct mbuf *m_devget(char *, int, int);
>  int  m_apply(struct mbuf *, int, int,
> Index: sys/kern/uipc_mbuf.c
> ===
> RCS file: /cvs/src/sys/kern/uipc_mbuf.c,v
> retrieving revision 1.277
> diff -u -p -r1.277 uipc_mbuf.c
> --- sys/kern/uipc_mbuf.c  13 Jan 2021 12:38:36 -  1.277
> +++ sys/kern/uipc_mbuf.c  23 Feb 2021 09:29:55 -
> @@ -711,8 +711,9 @@ nospace:
>   * continuing for "len" bytes, into the indicated buffer.
>   */
>  void
> -m_copydata(struct mbuf *m, int off, int len, caddr_t cp)
> +m_copydata(struct mbuf *m, int off, int len, void *p)
>  {
> + caddr_t cp = p;
>   unsigned count;
>  
>   if (off < 0)



Re: have m_copydata use a void * instead of caddr_t

2021-02-23 Thread Vitaliy Makkoveev
ok mvs@

> On 23 Feb 2021, at 12:31, David Gwynne  wrote:
> 
> i'm not a fan of having to cast to caddr_t when we have modern
> inventions like void *s we can take advantage of.
> 
> ok?
> 
> Index: share/man/man9/mbuf.9
> ===
> RCS file: /cvs/src/share/man/man9/mbuf.9,v
> retrieving revision 1.120
> diff -u -p -r1.120 mbuf.9
> --- share/man/man9/mbuf.9 12 Dec 2020 11:48:52 -  1.120
> +++ share/man/man9/mbuf.9 23 Feb 2021 09:29:55 -
> @@ -116,7 +116,7 @@
> .Ft void
> .Fn m_reclaim "void"
> .Ft void
> -.Fn m_copydata "struct mbuf *m" "int off" "int len" "caddr_t cp"
> +.Fn m_copydata "struct mbuf *m" "int off" "int len" "void *cp"
> .Ft void
> .Fn m_cat "struct mbuf *m" "struct mbuf *n"
> .Ft struct mbuf *
> @@ -673,7 +673,7 @@ is a
> pointer, no action occurs.
> .It Fn m_reclaim "void"
> Ask protocols to free unused memory space.
> -.It Fn m_copydata "struct mbuf *m" "int off" "int len" "caddr_t cp"
> +.It Fn m_copydata "struct mbuf *m" "int off" "int len" "void *cp"
> Copy data from the mbuf chain pointed to by
> .Fa m
> starting at
> Index: sys/sys/mbuf.h
> ===
> RCS file: /cvs/src/sys/sys/mbuf.h,v
> retrieving revision 1.251
> diff -u -p -r1.251 mbuf.h
> --- sys/sys/mbuf.h12 Dec 2020 11:49:02 -  1.251
> +++ sys/sys/mbuf.h23 Feb 2021 09:29:55 -
> @@ -435,7 +435,7 @@ int   m_copyback(struct mbuf *, int, int, 
> struct mbuf *m_freem(struct mbuf *);
> void  m_purge(struct mbuf *);
> void  m_reclaim(void *, int);
> -void m_copydata(struct mbuf *, int, int, caddr_t);
> +void m_copydata(struct mbuf *, int, int, void *);
> void  m_cat(struct mbuf *, struct mbuf *);
> struct mbuf *m_devget(char *, int, int);
> int   m_apply(struct mbuf *, int, int,
> Index: sys/kern/uipc_mbuf.c
> ===
> RCS file: /cvs/src/sys/kern/uipc_mbuf.c,v
> retrieving revision 1.277
> diff -u -p -r1.277 uipc_mbuf.c
> --- sys/kern/uipc_mbuf.c  13 Jan 2021 12:38:36 -  1.277
> +++ sys/kern/uipc_mbuf.c  23 Feb 2021 09:29:55 -
> @@ -711,8 +711,9 @@ nospace:
>  * continuing for "len" bytes, into the indicated buffer.
>  */
> void
> -m_copydata(struct mbuf *m, int off, int len, caddr_t cp)
> +m_copydata(struct mbuf *m, int off, int len, void *p)
> {
> + caddr_t cp = p;
>   unsigned count;
> 
>   if (off < 0)
> 



Different fix for vnode deadlock

2021-02-23 Thread Martin Pieuchot
Page faults on vnode-backed objects commonly end up calling VOP_READ(9)
or VOP_WRITE(9) to go through the buffer cache.  This implies grabbing
an inode lock after any UVM locking.

On the other hand changing the size of a vnode results in entering UVM,
generally via calling uvm_vnp_setsize() with a locked inode.

Syzkaller exposed a deadlock that anton@ fixed in r1.108 of uvm/uvm_vnode.c
by making the page fault path grab the inode lock earlier.  Sadly such
change isn't compatible with a finer locking required to unlock the lower
part of the UVM fault handler.

UVM's code make use of the PG_BUSY flag to ask other threads to not touch
a given page.  This is done to keep the ownership of a page after having
released its associated lock.  This is currently hard to follow because
the locking code has been removed ;)

With the current fix, the PG_BUSY flag is set after grabbing the inode
lock which creates a lock ordering problem with `uobj->vmlock' being
released after setting the flag.

So the diff below takes a different approach, if the thread that faulted
finds that the `inode' is contended it stops there and restart the fault.
This has the side effect of un-PG_BUSY the pages and allows the other
thread to make progress.

This is enough to move forward with `uobj->vmlock' without changing the
interaction between the existing buffer cache and UVM locking (thanks!).

I couldn't trigger the deadlock with regress/sys/uvm/vnode with this
diff. 

Is the explanation clear enough?  Comments?  Oks?

Index: uvm/uvm_vnode.c
===
RCS file: /cvs/src/sys/uvm/uvm_vnode.c,v
retrieving revision 1.108
diff -u -p -r1.108 uvm_vnode.c
--- uvm/uvm_vnode.c 26 Oct 2020 19:48:19 -  1.108
+++ uvm/uvm_vnode.c 23 Feb 2021 10:46:50 -
@@ -90,9 +90,6 @@ intuvn_io(struct uvm_vnode *, vm_page
 int uvn_put(struct uvm_object *, vm_page_t *, int, boolean_t);
 voiduvn_reference(struct uvm_object *);
 
-int uvm_vnode_lock(struct uvm_vnode *);
-voiduvm_vnode_unlock(struct uvm_vnode *);
-
 /*
  * master pager structure
  */
@@ -878,16 +875,11 @@ uvn_cluster(struct uvm_object *uobj, vof
 int
 uvn_put(struct uvm_object *uobj, struct vm_page **pps, int npages, int flags)
 {
-   struct uvm_vnode *uvn = (struct uvm_vnode *)uobj;
int retval;
 
KERNEL_ASSERT_LOCKED();
 
-   retval = uvm_vnode_lock(uvn);
-   if (retval)
-   return(retval);
-   retval = uvn_io(uvn, pps, npages, flags, UIO_WRITE);
-   uvm_vnode_unlock(uvn);
+   retval = uvn_io((struct uvm_vnode*)uobj, pps, npages, flags, UIO_WRITE);
 
return(retval);
 }
@@ -905,10 +897,9 @@ int
 uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
 int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
 {
-   struct uvm_vnode *uvn = (struct uvm_vnode *)uobj;
voff_t current_offset;
struct vm_page *ptmp;
-   int lcv, result, gotpages, retval;
+   int lcv, result, gotpages;
boolean_t done;
 
KERNEL_ASSERT_LOCKED();
@@ -983,18 +974,6 @@ uvn_get(struct uvm_object *uobj, voff_t 
}
 
/*
-* Before getting non-resident pages which must be populate with data
-* using I/O on the backing vnode, lock the same vnode. Such pages are
-* about to be allocated and busied (i.e. PG_BUSY) by the current
-* thread. Allocating and busying the page(s) before acquiring the
-* vnode lock could cause a deadlock with uvn_flush() which acquires the
-* vnode lock before waiting on pages to become unbusy and then flushed.
-*/
-   retval = uvm_vnode_lock(uvn);
-   if (retval)
-   return(retval);
-
-   /*
 * step 2: get non-resident or busy pages.
 * data structures are unlocked.
 *
@@ -1080,15 +1059,14 @@ uvn_get(struct uvm_object *uobj, voff_t 
 * we have a "fake/busy/clean" page that we just allocated.  do
 * I/O to fill it with valid data.
 */
-   result = uvn_io(uvn, , 1, PGO_SYNCIO, UIO_READ);
+   result = uvn_io((struct uvm_vnode *) uobj, , 1,
+   PGO_SYNCIO|PGO_NOWAIT, UIO_READ);
 
/*
 * I/O done.  because we used syncio the result can not be
 * PEND or AGAIN.
 */
if (result != VM_PAGER_OK) {
-   uvm_vnode_unlock(uvn);
-
if (ptmp->pg_flags & PG_WANTED)
wakeup(ptmp);
 
@@ -1119,15 +1097,12 @@ uvn_get(struct uvm_object *uobj, voff_t 
 
}
 
-   uvm_vnode_unlock(uvn);
-
return (VM_PAGER_OK);
 }
 
 /*
  * uvn_io: do I/O to a vnode
  *
- * => uvn: the backing vnode must be locked
  * => prefer map unlocked (not required)
  * => flags: PGO_SYNCIO -- use sync. I/O
  * => XXX: 

rpki-client lock down rsync process further

2021-02-23 Thread Claudio Jeker
There is no need for cpath or the unveil of . in the rsync process.
That process just does fork+exec for rsync.
Removing the unveil pledge is the same as unveil(NULL, NULL) so skip that
too.

OK?
-- 
:wq Claudio

Index: main.c
===
RCS file: /cvs/src/usr.sbin/rpki-client/main.c,v
retrieving revision 1.104
diff -u -p -r1.104 main.c
--- main.c  22 Feb 2021 09:46:05 -  1.104
+++ main.c  23 Feb 2021 10:42:24 -
@@ -941,8 +941,7 @@ main(int argc, char *argv[])
if (fchdir(cachefd) == -1)
err(1, "fchdir");
 
-   if (pledge("stdio rpath cpath proc exec unveil", NULL)
-   == -1)
+   if (pledge("stdio rpath proc exec unveil", NULL) == -1)
err(1, "pledge");
 
proc_rsync(rsync_prog, bind_addr, fd[0]);
Index: rsync.c
===
RCS file: /cvs/src/usr.sbin/rpki-client/rsync.c,v
retrieving revision 1.18
diff -u -p -r1.18 rsync.c
--- rsync.c 19 Feb 2021 08:14:49 -  1.18
+++ rsync.c 23 Feb 2021 10:41:50 -
@@ -160,13 +160,6 @@ proc_rsync(char *prog, char *bind_addr, 
} else if (unveil(prog, "x") == -1)
err(1, "%s: unveil", prog);
 
-   /* Unveil the repository directory and terminate unveiling. */
-
-   if (unveil(".", "c") == -1)
-   err(1, "unveil");
-   if (unveil(NULL, NULL) == -1)
-   err(1, "unveil");
-
if (pledge("stdio proc exec", NULL) == -1)
err(1, "pledge");
 



allow xlock -dpms* values < 30 and > 0

2021-02-23 Thread Alex Raschi
Hi,

I noticed that xlock does not allow values between 30 and 0 for options
-dpms{standby,suspend,off}, the man page suggests that all values >= 0
are accepted. I use the blank mode and i would like to set a lower
timeout so i made a patch to remove the limit.

ok?

Index: app/xlockmore/xlock/xlock.c
===
RCS file: /cvs/xenocara/app/xlockmore/xlock/xlock.c,v
retrieving revision 1.2
diff -u -p -r1.2 xlock.c
--- app/xlockmore/xlock/xlock.c 26 Nov 2006 17:13:23 -  1.2
+++ app/xlockmore/xlock/xlock.c 20 Feb 2021 15:37:47 -
@@ -624,7 +624,6 @@ extern int  XHPEnableReset(Display * dsp
 
 #endif
 #ifdef USE_DPMS
-#define MIN_DPMS 30/* 30 second minimum */
 #if 1
 #include 
 #include 
@@ -1114,9 +1113,9 @@ SetDPMS(Display * display, int nstandby,
DPMSSetTimeouts(display, standby, suspend, off);
else
DPMSSetTimeouts(display,
-   (nstandby <= 0 ? 0 : (nstandby > 
MIN_DPMS ? nstandby : MIN_DPMS)),
-   (nsuspend <= 0 ? 0 : (nsuspend > 
MIN_DPMS ? nsuspend : MIN_DPMS)),
-   (noff <= 0 ? 0 : (noff > MIN_DPMS ? 
noff : MIN_DPMS)));
+   (nstandby <= 0 ? 0 : nstandby),
+   (nsuspend <= 0 ? 0 : nsuspend),
+   (noff <= 0 ? 0 : noff ));
}
 }



have m_copydata use a void * instead of caddr_t

2021-02-23 Thread David Gwynne
i'm not a fan of having to cast to caddr_t when we have modern
inventions like void *s we can take advantage of.

ok?

Index: share/man/man9/mbuf.9
===
RCS file: /cvs/src/share/man/man9/mbuf.9,v
retrieving revision 1.120
diff -u -p -r1.120 mbuf.9
--- share/man/man9/mbuf.9   12 Dec 2020 11:48:52 -  1.120
+++ share/man/man9/mbuf.9   23 Feb 2021 09:29:55 -
@@ -116,7 +116,7 @@
 .Ft void
 .Fn m_reclaim "void"
 .Ft void
-.Fn m_copydata "struct mbuf *m" "int off" "int len" "caddr_t cp"
+.Fn m_copydata "struct mbuf *m" "int off" "int len" "void *cp"
 .Ft void
 .Fn m_cat "struct mbuf *m" "struct mbuf *n"
 .Ft struct mbuf *
@@ -673,7 +673,7 @@ is a
 pointer, no action occurs.
 .It Fn m_reclaim "void"
 Ask protocols to free unused memory space.
-.It Fn m_copydata "struct mbuf *m" "int off" "int len" "caddr_t cp"
+.It Fn m_copydata "struct mbuf *m" "int off" "int len" "void *cp"
 Copy data from the mbuf chain pointed to by
 .Fa m
 starting at
Index: sys/sys/mbuf.h
===
RCS file: /cvs/src/sys/sys/mbuf.h,v
retrieving revision 1.251
diff -u -p -r1.251 mbuf.h
--- sys/sys/mbuf.h  12 Dec 2020 11:49:02 -  1.251
+++ sys/sys/mbuf.h  23 Feb 2021 09:29:55 -
@@ -435,7 +435,7 @@ int m_copyback(struct mbuf *, int, int, 
 struct mbuf *m_freem(struct mbuf *);
 void   m_purge(struct mbuf *);
 void   m_reclaim(void *, int);
-void   m_copydata(struct mbuf *, int, int, caddr_t);
+void   m_copydata(struct mbuf *, int, int, void *);
 void   m_cat(struct mbuf *, struct mbuf *);
 struct mbuf *m_devget(char *, int, int);
 intm_apply(struct mbuf *, int, int,
Index: sys/kern/uipc_mbuf.c
===
RCS file: /cvs/src/sys/kern/uipc_mbuf.c,v
retrieving revision 1.277
diff -u -p -r1.277 uipc_mbuf.c
--- sys/kern/uipc_mbuf.c13 Jan 2021 12:38:36 -  1.277
+++ sys/kern/uipc_mbuf.c23 Feb 2021 09:29:55 -
@@ -711,8 +711,9 @@ nospace:
  * continuing for "len" bytes, into the indicated buffer.
  */
 void
-m_copydata(struct mbuf *m, int off, int len, caddr_t cp)
+m_copydata(struct mbuf *m, int off, int len, void *p)
 {
+   caddr_t cp = p;
unsigned count;
 
if (off < 0)



Re: uvm_fault_lower refactoring

2021-02-23 Thread Mark Kettenis
> Date: Tue, 23 Feb 2021 10:07:43 +0100
> From: Martin Pieuchot 
> 
> On 23/02/21(Tue) 00:24, Mark Kettenis wrote:
> > > Date: Mon, 22 Feb 2021 10:10:21 +0100
> > > From: Martin Pieuchot 
> > > 
> > > On 16/02/21(Tue) 11:20, Martin Pieuchot wrote:
> > > > Start by moving `pgo_fault' handler outside of uvm_fault_lower().
> > > > 
> > > > If a page has a backing object that prefer to handler to fault itself
> > > > the locking will be different, so keep it under KERNEL_LOCK() for the
> > > > moment and make it separate from the rest of uvm_fault_lower().
> > > > 
> > > > ok?
> > > 
> > > Anyone?
> > 
> > This diverges from NetBSD; you think that's a good idea?
> 
> Which tree are you looking at?  I'm doing this exactly to reduce
> differences with NetBSD.  r1.228 of uvm_fault.c in NetBSD contain this
> logic in uvm_fault_internal().

Was looking at r1.221.  So go ahead then.

> > > > Index: uvm/uvm_fault.c
> > > > ===
> > > > RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
> > > > retrieving revision 1.115
> > > > diff -u -p -r1.115 uvm_fault.c
> > > > --- uvm/uvm_fault.c 16 Feb 2021 09:10:17 -  1.115
> > > > +++ uvm/uvm_fault.c 16 Feb 2021 10:13:58 -
> > > > @@ -598,10 +598,37 @@ uvm_fault(vm_map_t orig_map, vaddr_t vad
> > > > /* case 1: fault on an anon in our amap */
> > > > error = uvm_fault_upper(, , anons, 
> > > > fault_type);
> > > > } else {
> > > > -   /* case 2: fault on backing object or zero fill 
> > > > */
> > > > -   KERNEL_LOCK();
> > > > -   error = uvm_fault_lower(, , pages, 
> > > > fault_type);
> > > > -   KERNEL_UNLOCK();
> > > > +   struct uvm_object *uobj = 
> > > > ufi.entry->object.uvm_obj;
> > > > +
> > > > +   /*
> > > > +* if the desired page is not shadowed by the 
> > > > amap and
> > > > +* we have a backing object, then we check to 
> > > > see if
> > > > +* the backing object would prefer to handle 
> > > > the fault
> > > > +* itself (rather than letting us do it with 
> > > > the usual
> > > > +* pgo_get hook).  the backing object signals 
> > > > this by
> > > > +* providing a pgo_fault routine.
> > > > +*/
> > > > +   if (uobj != NULL && uobj->pgops->pgo_fault != 
> > > > NULL) {
> > > > +   KERNEL_LOCK();
> > > > +   error = uobj->pgops->pgo_fault(,
> > > > +   flt.startva, pages, flt.npages,
> > > > +   flt.centeridx, fault_type, 
> > > > flt.access_type,
> > > > +   PGO_LOCKED);
> > > > +   KERNEL_UNLOCK();
> > > > +
> > > > +   if (error == VM_PAGER_OK)
> > > > +   error = 0;
> > > > +   else if (error == VM_PAGER_REFAULT)
> > > > +   error = ERESTART;
> > > > +   else
> > > > +   error = EACCES;
> > > > +   } else {
> > > > +   /* case 2: fault on backing obj or zero 
> > > > fill */
> > > > +   KERNEL_LOCK();
> > > > +   error = uvm_fault_lower(, , 
> > > > pages,
> > > > +   fault_type);
> > > > +   KERNEL_UNLOCK();
> > > > +   }
> > > > }
> > > > }
> > > >  
> > > > @@ -1054,26 +1081,6 @@ uvm_fault_lower(struct uvm_faultinfo *uf
> > > > struct vm_anon *anon = NULL;
> > > > vaddr_t currva;
> > > > voff_t uoff;
> > > > -
> > > > -   /*
> > > > -* if the desired page is not shadowed by the amap and we have a
> > > > -* backing object, then we check to see if the backing object 
> > > > would
> > > > -* prefer to handle the fault itself (rather than letting us do 
> > > > it
> > > > -* with the usual pgo_get hook).  the backing object signals 
> > > > this by
> > > > -* providing a pgo_fault routine.
> > > > -*/
> > > > -   if (uobj != NULL && uobj->pgops->pgo_fault != NULL) {
> > > > -   result = uobj->pgops->pgo_fault(ufi, flt->startva, 
> > > > pages,
> > > > -   flt->npages, flt->centeridx, fault_type, 
> > > > flt->access_type,
> > > > -   PGO_LOCKED);
> > > > -
> > > > -   if (result == VM_PAGER_OK)
> > > > -   return (0); /* pgo_fault did pmap 
> > > > enter */
> > > > -   

Re: uvm_fault_lower refactoring

2021-02-23 Thread Martin Pieuchot
On 23/02/21(Tue) 00:24, Mark Kettenis wrote:
> > Date: Mon, 22 Feb 2021 10:10:21 +0100
> > From: Martin Pieuchot 
> > 
> > On 16/02/21(Tue) 11:20, Martin Pieuchot wrote:
> > > Start by moving `pgo_fault' handler outside of uvm_fault_lower().
> > > 
> > > If a page has a backing object that prefer to handler to fault itself
> > > the locking will be different, so keep it under KERNEL_LOCK() for the
> > > moment and make it separate from the rest of uvm_fault_lower().
> > > 
> > > ok?
> > 
> > Anyone?
> 
> This diverges from NetBSD; you think that's a good idea?

Which tree are you looking at?  I'm doing this exactly to reduce
differences with NetBSD.  r1.228 of uvm_fault.c in NetBSD contain this
logic in uvm_fault_internal().

> > > Index: uvm/uvm_fault.c
> > > ===
> > > RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
> > > retrieving revision 1.115
> > > diff -u -p -r1.115 uvm_fault.c
> > > --- uvm/uvm_fault.c   16 Feb 2021 09:10:17 -  1.115
> > > +++ uvm/uvm_fault.c   16 Feb 2021 10:13:58 -
> > > @@ -598,10 +598,37 @@ uvm_fault(vm_map_t orig_map, vaddr_t vad
> > >   /* case 1: fault on an anon in our amap */
> > >   error = uvm_fault_upper(, , anons, fault_type);
> > >   } else {
> > > - /* case 2: fault on backing object or zero fill */
> > > - KERNEL_LOCK();
> > > - error = uvm_fault_lower(, , pages, fault_type);
> > > - KERNEL_UNLOCK();
> > > + struct uvm_object *uobj = ufi.entry->object.uvm_obj;
> > > +
> > > + /*
> > > +  * if the desired page is not shadowed by the amap and
> > > +  * we have a backing object, then we check to see if
> > > +  * the backing object would prefer to handle the fault
> > > +  * itself (rather than letting us do it with the usual
> > > +  * pgo_get hook).  the backing object signals this by
> > > +  * providing a pgo_fault routine.
> > > +  */
> > > + if (uobj != NULL && uobj->pgops->pgo_fault != NULL) {
> > > + KERNEL_LOCK();
> > > + error = uobj->pgops->pgo_fault(,
> > > + flt.startva, pages, flt.npages,
> > > + flt.centeridx, fault_type, flt.access_type,
> > > + PGO_LOCKED);
> > > + KERNEL_UNLOCK();
> > > +
> > > + if (error == VM_PAGER_OK)
> > > + error = 0;
> > > + else if (error == VM_PAGER_REFAULT)
> > > + error = ERESTART;
> > > + else
> > > + error = EACCES;
> > > + } else {
> > > + /* case 2: fault on backing obj or zero fill */
> > > + KERNEL_LOCK();
> > > + error = uvm_fault_lower(, , pages,
> > > + fault_type);
> > > + KERNEL_UNLOCK();
> > > + }
> > >   }
> > >   }
> > >  
> > > @@ -1054,26 +1081,6 @@ uvm_fault_lower(struct uvm_faultinfo *uf
> > >   struct vm_anon *anon = NULL;
> > >   vaddr_t currva;
> > >   voff_t uoff;
> > > -
> > > - /*
> > > -  * if the desired page is not shadowed by the amap and we have a
> > > -  * backing object, then we check to see if the backing object would
> > > -  * prefer to handle the fault itself (rather than letting us do it
> > > -  * with the usual pgo_get hook).  the backing object signals this by
> > > -  * providing a pgo_fault routine.
> > > -  */
> > > - if (uobj != NULL && uobj->pgops->pgo_fault != NULL) {
> > > - result = uobj->pgops->pgo_fault(ufi, flt->startva, pages,
> > > - flt->npages, flt->centeridx, fault_type, flt->access_type,
> > > - PGO_LOCKED);
> > > -
> > > - if (result == VM_PAGER_OK)
> > > - return (0); /* pgo_fault did pmap enter */
> > > - else if (result == VM_PAGER_REFAULT)
> > > - return ERESTART;/* try again! */
> > > - else
> > > - return (EACCES);
> > > - }
> > >  
> > >   /*
> > >* now, if the desired page is not shadowed by the amap and we have
> > > 
> > 
> >