[hackers] [PATCH] [ubase] dd: Use sigaction(2) to obviate select(2)

2017-09-09 Thread Eric Pruitt
By setting the SIGINT handler using sigaction(2), automatic retries of
the splice(2) call can be disabled by not setting SA_RESTART. This makes
it possible to use Ctrl+C even if standard input is a terminal.
---
 dd.c | 58 +-
 1 file changed, 29 insertions(+), 29 deletions(-)

diff --git dd.c dd.c
index cc05d40..4011045 100644
--- dd.c
+++ dd.c
@@ -8,7 +8,6 @@
  */
 #include 
 #include 
-#include 
 #include 
 #include 
 #include 
@@ -38,6 +37,16 @@ struct dd_config {
 
 static int sigint = 0;
 
+static void
+sig_int(int unused_1, siginfo_t *unused_2, void *unused_3)
+{
+   (void) unused_1;
+   (void) unused_2;
+   (void) unused_3;
+   fprintf(stderr, "SIGINT! Aborting ...\n");
+   sigint = 1;
+}
+
 static int
 prepare_copy(struct dd_config *ddc, int *ifd, int *ofd)
 {
@@ -147,7 +156,6 @@ copy_splice(struct dd_config *ddc)
int ifd, ofd, p[2] = {-1, -1};
ssize_t r = 0;
size_t n = 0;
-   fd_set rfd, wfd;
 
if (prepare_copy(ddc, &ifd, &ofd) < 0)
return -1;
@@ -165,26 +173,19 @@ copy_splice(struct dd_config *ddc)
 #endif
n = ddc->bs;
for (;ddc->b_out != ddc->count && !sigint;) {
-   FD_ZERO(&rfd);
-   FD_ZERO(&wfd);
-   FD_SET(ifd, &rfd);
-   FD_SET(ofd, &wfd);
-   r = select(ifd > ofd ? ifd + 1 : ofd + 1, &rfd, &wfd, NULL, 
NULL);
if (r < 0)
break;
-   if (FD_ISSET(ifd, &rfd) == 1 && FD_ISSET(ofd, &wfd) == 1) {
-   if (n > ddc->count - ddc->b_out)
-   n = ddc->count - ddc->b_out;
-   r = splice(ifd, NULL, p[1], NULL, n, SPLICE_F_MORE);
-   if (r <= 0)
-   break;
-   ++ddc->rec_in;
-   r = splice(p[0], NULL, ofd, NULL, r, SPLICE_F_MORE);
-   if (r <= 0)
-   break;
-   ddc->b_out += r;
-   ++ddc->rec_out;
-   }
+   if (n > ddc->count - ddc->b_out)
+   n = ddc->count - ddc->b_out;
+   r = splice(ifd, NULL, p[1], NULL, n, SPLICE_F_MORE);
+   if (r <= 0)
+   break;
+   ++ddc->rec_in;
+   r = splice(p[0], NULL, ofd, NULL, r, SPLICE_F_MORE);
+   if (r <= 0)
+   break;
+   ddc->b_out += r;
+   ++ddc->rec_out;
}
close(ifd);
close(ofd);
@@ -227,14 +228,6 @@ print_stat(const struct dd_config *ddc)
 }
 
 static void
-sig_int(int unused)
-{
-   (void) unused;
-   fprintf(stderr, "SIGINT! Aborting ...\n");
-   sigint = 1;
-}
-
-static void
 usage(void)
 {
eprintf("usage: %s [-h] [if=infile] [of=outfile] [bs[=N]] [seek=N] "
@@ -248,6 +241,7 @@ main(int argc, char *argv[])
int i = 0;
char buf[1024];
struct dd_config config;
+   struct sigaction sa;
 
argv0 = argv[0];
memset(&config, 0, sizeof(config));
@@ -286,7 +280,13 @@ main(int argc, char *argv[])
}
 
signal(SIGPIPE, SIG_IGN);
-   signal(SIGINT, sig_int);
+
+   sa.sa_flags = SA_SIGINFO;
+   sigemptyset(&sa.sa_mask);
+   sa.sa_sigaction = sig_int;
+
+   if (sigaction(SIGINT, &sa, NULL) == -1)
+   weprintf("sigaction");
 
if (copy(&config) < 0)
weprintf("copy:");
-- 
2.11.0






[hackers] [sbase] tar: Fix a few weprintf/eprintf format strings || Michael Forney

2017-09-09 Thread git
commit e6d3032131760725ab409a9c3c78fa81fff3d1e2
Author: Michael Forney 
AuthorDate: Sat Sep 9 17:34:43 2017 -0700
Commit: Michael Forney 
CommitDate: Sat Sep 9 17:34:43 2017 -0700

tar: Fix a few weprintf/eprintf format strings

Thanks to Jim Beveridge for spotting the incorrect utimensat error.

Turns out there are a few other instances of this in the vicinity.

diff --git a/tar.c b/tar.c
index 53a737c..a6ead2e 100644
--- a/tar.c
+++ b/tar.c
@@ -331,15 +331,15 @@ unarchive(char *fname, ssize_t l, char b[BLKSIZ])
times[0].tv_sec = times[1].tv_sec = mtime;
times[0].tv_nsec = times[1].tv_nsec = 0;
if (!mflag && utimensat(AT_FDCWD, fname, times, AT_SYMLINK_NOFOLLOW) < 
0)
-   weprintf("utimensat %s:\n", fname);
+   weprintf("utimensat %s:", fname);
if (h->type == SYMLINK) {
if (!getuid() && lchown(fname, uid, gid))
-   weprintf("lchown %s:\n", fname);
+   weprintf("lchown %s:", fname);
} else {
if (!getuid() && chown(fname, uid, gid))
-   weprintf("chown %s:\n", fname);
+   weprintf("chown %s:", fname);
if (chmod(fname, mode) < 0)
-   eprintf("fchmod %s:\n", fname);
+   eprintf("fchmod %s:", fname);
}
 
return 0;



Re: [hackers] [PATCH][sbase] tar: use bigger buffer size to increase performance

2017-09-09 Thread Michael Forney
Make sure to CC Jim in replies; he is not subscribed to the list.

On Sat, Sep 9, 2017 at 1:31 PM, Jim Beveridge  wrote:
> Several comments to clear up some points about my tar code:

Thanks, Jim. I still have a few questions.

I also have to insist that these issues are tackled one-by-one rather
in one large commit.

> The CHUNK changes were not intended to optimize the general case, that was
> just a happy side effect. The changes were intended to address severe
> performance problems on platforms (like Fuchsia, at least at the moment)
> that don't do write-behind and/or read-ahead caching.

Yes, this needs to be addressed, however, I think we should do this
with a copy buffer size define in util.h, since libutil/concat.c
should use a larger buffer as well.

> My code supports short reads. I specifically had a test bed that was capable
> of reproducing short reads from a pipe and I spent quite a bit of time
> making that case work properly. Perhaps I'm reading too much into your
> comments, but I don't see the point of waiting on support for short write
> support to commit these changes supporting short reads.

Yes, but I think adding a utility function to deal with the case of
reading an entire amount (like plan9's readn or go's io.ReadFull)
would simplify this.

> Regarding your comment about, "If you tar a file within another directory,
> you end up with just one entry. This check means that the parent directory
> won't be created when trying to extract this file. Other tar implementations
> are able to extract such an archive." I completely agree, but I'm not sure
> what's driving your comment? My implementation supports this case.

Sorry, I should have tested and provided an example. You're right,
that case is supported by your patch. However, the following case is
not:

$ mkdir -p a/b
$ tar -cf a.tar a/b
$ rm -r a
$ tar -xf a.tar
tar: mkdir a/b: No such file or directory

> The
> previous implementation had issues because it never allowed the directory
> creation switch case to do its job, which caused loss of permissions on
> directory creation and unnecessary user-facing warnings in some cases.

Can you provide a concrete example of one of these cases? As far as I
understand it, if all the directories are present in the archive, they
will be created by the directory creation switch case with the
appropriate permissions. mkdirp should be harmless on a parent
directory that already exists since it never removes directories or
changes their mode.

Which warnings are you talking about? mkdirp should not warn if the
call to mkdir fails with EEXIST, and since we call it with the dirname
of fname, it should not be creating directories for the tar entry
itself.

I also wonder if this recent commit solves your problem, but even if
it does, I'm curious about the behavior you were observing:

https://git.suckless.org/sbase/commit/libutil/mkdirp.c?id=6ac5f01cc94b2a6f7b6406ddd151e7b4d8fb1d7d

> Finally, st should not be added into the if clause. That was a missing merge
> in my code. I fixed this in my pull request to be:
> if (r->depth && S_ISDIR(st->st_mode))

Okay, thanks for confirming.



Re: [hackers] [PATCH][sbase] tar: use bigger buffer size to increase performance

2017-09-09 Thread Michael Forney
On Sat, Sep 9, 2017 at 2:08 AM, Silvan Jegen  wrote:
> From: Jim Beveridge 
>
> The original code is by Jim Beveridge working on Fuchsia. I merged it
> with slight changes.
>
> Time to tar two 1GB files:
>
> Before patch:
>
> real0m6.428s
> user0m0.245s
> sys 0m4.881s
>
> real0m6.454s
> user0m0.239s
> sys 0m4.883s
>
> real0m6.515s
> user0m0.259s
> sys 0m4.839s
>
> After patch:
>
> real0m4.755s
> user0m0.026s
> sys 0m1.598s
>
> real0m4.788s
> user0m0.063s
> sys 0m1.578s
>
> real0m4.822s
> user0m0.007s
> sys 0m1.662s
>
> A similar speedup can be observed for untaring.
>
> In addition to the buffer size increase we change the code to only create
> directories for non-compliant tar files and we check for st to be NULL
> in the recursive copy function.

He also sent me a pull request on my github branch for oasis:
https://github.com/michaelforney/sbase/pull/2

I think we should work on fixing correctness of tar before trying to
optimize it. Currently it does not handle short reads or writes at all
(when working with pipes). I was thinking we should add a readall in
libutil analogous to writeall and then make use of that.

Regarding COPY_CHUNK_SIZE, it is probably a good idea to put that in
util.h (perhaps with a different name). concat has the same problem
with a small BUFSIZ (musl's is only 1024).

> ---
>  tar.c | 72 
> +++
>  1 file changed, 55 insertions(+), 17 deletions(-)
>
> diff --git a/tar.c b/tar.c
> index 53a737c..8cd1abe 100644
> --- a/tar.c
> +++ b/tar.c
> @@ -16,6 +16,8 @@
>  #include "util.h"
>
>  #define BLKSIZ 512
> +// COPY_CHUNK_SIZE must be a power of 2
> +#define COPY_CHUNK_SIZE 8192
>
>  enum Type {
> REG   = '0',
> @@ -236,10 +238,13 @@ archive(const char *path)
> ewrite(tarfd, b, BLKSIZ);
>
> if (fd != -1) {
> -   while ((l = eread(fd, b, BLKSIZ)) > 0) {
> -   if (l < BLKSIZ)
> -   memset(b + l, 0, BLKSIZ - l);
> -   ewrite(tarfd, b, BLKSIZ);
> +   char chunk[COPY_CHUNK_SIZE];
> +   while ((l = eread(fd, chunk, COPY_CHUNK_SIZE)) > 0) {
> +   // Ceiling to BLKSIZ boundary
> +   int ceilsize = (l + (BLKSIZ-1)) & ~(BLKSIZ-1);
> +   if (l < ceilsize)
> +   memset(chunk + l, 0, ceilsize - l);
> +   ewrite(tarfd, chunk, ceilsize);
> }
> close(fd);
> }
> @@ -250,7 +255,7 @@ archive(const char *path)
>  static int
>  unarchive(char *fname, ssize_t l, char b[BLKSIZ])
>  {
> -   char lname[101], *tmp, *p;
> +   char lname[101], *p;
> long mode, major, minor, type, mtime, uid, gid;
> struct header *h = (struct header *)b;
> int fd = -1;
> @@ -261,9 +266,13 @@ unarchive(char *fname, ssize_t l, char b[BLKSIZ])
> if (remove(fname) < 0 && errno != ENOENT)
> weprintf("remove %s:", fname);
>
> -   tmp = estrdup(fname);
> -   mkdirp(dirname(tmp), 0777, 0777);
> -   free(tmp);
> +   // tar files normally create the directory chain. This is a fallback
> +   // for noncompliant tar files.
> +   if (h->type != DIRECTORY) {
> +   char* tmp = estrdup(fname);
> +   mkdirp(dirname(tmp), 0777, 0777);
> +   free(tmp);
> +   }

If you tar a file within another directory, you end up with just one
entry. This check means that the parent directory won't be created
when trying to extract this file. Other tar implementations are able
to extract such an archive.

>
> switch (h->type) {
> case REG:
> @@ -319,9 +328,25 @@ unarchive(char *fname, ssize_t l, char b[BLKSIZ])
> eprintf("strtol %s: invalid number\n", h->gid);
>
> if (fd != -1) {
> -   for (; l > 0; l -= BLKSIZ)
> -   if (eread(tarfd, b, BLKSIZ) > 0)
> -   ewrite(fd, b, MIN(l, BLKSIZ));
> +   // Ceiling to BLKSIZ boundary
> +   int readsize = (l + (BLKSIZ-1)) & ~(BLKSIZ-1);
> +   char chunk[COPY_CHUNK_SIZE];
> +   int lastread = 0;
> +
> +   for (; readsize > 0; l -= lastread, readsize -= lastread) {
> +   int chunk_size = MIN(readsize, COPY_CHUNK_SIZE);
> +   // Short reads are legal, so don't expect to read
> +   // everything that was requested.
> +   lastread = eread(tarfd, chunk, chunk_size);
> +   if (lastread == 0) {
> +   close(fd);
> +   remove(fname);
> +   eprintf("unexpected end of file reading 
> %s.\n",
> +   fname);
> +   }
> +
> +

Re: [hackers] [PATCH][sbase] tar: use bigger buffer size to increase performance

2017-09-09 Thread Hiltjo Posthuma
On Sat, Sep 09, 2017 at 11:08:42AM +0200, Silvan Jegen wrote:
> From: Jim Beveridge 
> 
> The original code is by Jim Beveridge working on Fuchsia. I merged it
> with slight changes.
> 

To be clear: is it under the sbase LICENSE?

> Time to tar two 1GB files:
> 
> Before patch:
> 
> real  0m6.428s
> user  0m0.245s
> sys   0m4.881s
> 
> real  0m6.454s
> user  0m0.239s
> sys   0m4.883s
> 
> real  0m6.515s
> user  0m0.259s
> sys   0m4.839s
> 
> After patch:
> 
> real  0m4.755s
> user  0m0.026s
> sys   0m1.598s
> 
> real  0m4.788s
> user  0m0.063s
> sys   0m1.578s
> 
> real  0m4.822s
> user  0m0.007s
> sys   0m1.662s
> 
> A similar speedup can be observed for untaring.
> 
> In addition to the buffer size increase we change the code to only create
> directories for non-compliant tar files and we check for st to be NULL
> in the recursive copy function.
> ---
>  tar.c | 72 
> +++
>  1 file changed, 55 insertions(+), 17 deletions(-)
> 
> diff --git a/tar.c b/tar.c
> index 53a737c..8cd1abe 100644
> --- a/tar.c
> +++ b/tar.c
> @@ -16,6 +16,8 @@
>  #include "util.h"
>  
>  #define BLKSIZ 512
> +// COPY_CHUNK_SIZE must be a power of 2
> +#define COPY_CHUNK_SIZE 8192
>  

Instead of COPY_CHUNK_SIZE is might be worthwhile to query the pagesize, but
i've not tested it.

>  enum Type {
>   REG   = '0',
> @@ -236,10 +238,13 @@ archive(const char *path)
>   ewrite(tarfd, b, BLKSIZ);
>  
>   if (fd != -1) {
> - while ((l = eread(fd, b, BLKSIZ)) > 0) {
> - if (l < BLKSIZ)
> - memset(b + l, 0, BLKSIZ - l);
> - ewrite(tarfd, b, BLKSIZ);
> + char chunk[COPY_CHUNK_SIZE];
> + while ((l = eread(fd, chunk, COPY_CHUNK_SIZE)) > 0) {
> + // Ceiling to BLKSIZ boundary
> + int ceilsize = (l + (BLKSIZ-1)) & ~(BLKSIZ-1);
> + if (l < ceilsize)
> + memset(chunk + l, 0, ceilsize - l);
> + ewrite(tarfd, chunk, ceilsize);
>   }
>   close(fd);
>   }
> @@ -250,7 +255,7 @@ archive(const char *path)
>  static int
>  unarchive(char *fname, ssize_t l, char b[BLKSIZ])
>  {
> - char lname[101], *tmp, *p;
> + char lname[101], *p;
>   long mode, major, minor, type, mtime, uid, gid;
>   struct header *h = (struct header *)b;
>   int fd = -1;
> @@ -261,9 +266,13 @@ unarchive(char *fname, ssize_t l, char b[BLKSIZ])
>   if (remove(fname) < 0 && errno != ENOENT)
>   weprintf("remove %s:", fname);
>  
> - tmp = estrdup(fname);
> - mkdirp(dirname(tmp), 0777, 0777);
> - free(tmp);
> + // tar files normally create the directory chain. This is a fallback
> + // for noncompliant tar files.
> + if (h->type != DIRECTORY) {
> + char* tmp = estrdup(fname);
> + mkdirp(dirname(tmp), 0777, 0777);
> + free(tmp);
> + }
>  
>   switch (h->type) {
>   case REG:
> @@ -319,9 +328,25 @@ unarchive(char *fname, ssize_t l, char b[BLKSIZ])
>   eprintf("strtol %s: invalid number\n", h->gid);
>  
>   if (fd != -1) {
> - for (; l > 0; l -= BLKSIZ)
> - if (eread(tarfd, b, BLKSIZ) > 0)
> - ewrite(fd, b, MIN(l, BLKSIZ));
> + // Ceiling to BLKSIZ boundary
> + int readsize = (l + (BLKSIZ-1)) & ~(BLKSIZ-1);
> + char chunk[COPY_CHUNK_SIZE];
> + int lastread = 0;
> +
> + for (; readsize > 0; l -= lastread, readsize -= lastread) {
> + int chunk_size = MIN(readsize, COPY_CHUNK_SIZE);
> + // Short reads are legal, so don't expect to read
> + // everything that was requested.
> + lastread = eread(tarfd, chunk, chunk_size);
> + if (lastread == 0) {
> + close(fd);
> + remove(fname);

Do all the tar tools remove the file in this case? It might be better to not
remove it.

> + eprintf("unexpected end of file reading %s.\n",
> + fname);
> + }
> +
> + ewrite(fd, chunk, MIN(l, lastread));
> + }
>   close(fd);
>   }
>  
> @@ -331,7 +356,7 @@ unarchive(char *fname, ssize_t l, char b[BLKSIZ])
>   times[0].tv_sec = times[1].tv_sec = mtime;
>   times[0].tv_nsec = times[1].tv_nsec = 0;
>   if (!mflag && utimensat(AT_FDCWD, fname, times, AT_SYMLINK_NOFOLLOW) < 
> 0)
> - weprintf("utimensat %s:\n", fname);
> + weprintf("utimensat %s %d:\n", fname, errno);
>   if (h->type == SYMLINK) {
>   if (!getuid() && lchown(fname, uid, gid))
>   weprintf("lchown %s:\n", fname);
> @@ -349,10 +374,23 @@ static void
> 

[hackers] [PATCH][sbase] tar: use bigger buffer size to increase performance

2017-09-09 Thread Silvan Jegen
From: Jim Beveridge 

The original code is by Jim Beveridge working on Fuchsia. I merged it
with slight changes.

Time to tar two 1GB files:

Before patch:

real0m6.428s
user0m0.245s
sys 0m4.881s

real0m6.454s
user0m0.239s
sys 0m4.883s

real0m6.515s
user0m0.259s
sys 0m4.839s

After patch:

real0m4.755s
user0m0.026s
sys 0m1.598s

real0m4.788s
user0m0.063s
sys 0m1.578s

real0m4.822s
user0m0.007s
sys 0m1.662s

A similar speedup can be observed for untaring.

In addition to the buffer size increase we change the code to only create
directories for non-compliant tar files and we check for st to be NULL
in the recursive copy function.
---
 tar.c | 72 +++
 1 file changed, 55 insertions(+), 17 deletions(-)

diff --git a/tar.c b/tar.c
index 53a737c..8cd1abe 100644
--- a/tar.c
+++ b/tar.c
@@ -16,6 +16,8 @@
 #include "util.h"
 
 #define BLKSIZ 512
+// COPY_CHUNK_SIZE must be a power of 2
+#define COPY_CHUNK_SIZE 8192
 
 enum Type {
REG   = '0',
@@ -236,10 +238,13 @@ archive(const char *path)
ewrite(tarfd, b, BLKSIZ);
 
if (fd != -1) {
-   while ((l = eread(fd, b, BLKSIZ)) > 0) {
-   if (l < BLKSIZ)
-   memset(b + l, 0, BLKSIZ - l);
-   ewrite(tarfd, b, BLKSIZ);
+   char chunk[COPY_CHUNK_SIZE];
+   while ((l = eread(fd, chunk, COPY_CHUNK_SIZE)) > 0) {
+   // Ceiling to BLKSIZ boundary
+   int ceilsize = (l + (BLKSIZ-1)) & ~(BLKSIZ-1);
+   if (l < ceilsize)
+   memset(chunk + l, 0, ceilsize - l);
+   ewrite(tarfd, chunk, ceilsize);
}
close(fd);
}
@@ -250,7 +255,7 @@ archive(const char *path)
 static int
 unarchive(char *fname, ssize_t l, char b[BLKSIZ])
 {
-   char lname[101], *tmp, *p;
+   char lname[101], *p;
long mode, major, minor, type, mtime, uid, gid;
struct header *h = (struct header *)b;
int fd = -1;
@@ -261,9 +266,13 @@ unarchive(char *fname, ssize_t l, char b[BLKSIZ])
if (remove(fname) < 0 && errno != ENOENT)
weprintf("remove %s:", fname);
 
-   tmp = estrdup(fname);
-   mkdirp(dirname(tmp), 0777, 0777);
-   free(tmp);
+   // tar files normally create the directory chain. This is a fallback
+   // for noncompliant tar files.
+   if (h->type != DIRECTORY) {
+   char* tmp = estrdup(fname);
+   mkdirp(dirname(tmp), 0777, 0777);
+   free(tmp);
+   }
 
switch (h->type) {
case REG:
@@ -319,9 +328,25 @@ unarchive(char *fname, ssize_t l, char b[BLKSIZ])
eprintf("strtol %s: invalid number\n", h->gid);
 
if (fd != -1) {
-   for (; l > 0; l -= BLKSIZ)
-   if (eread(tarfd, b, BLKSIZ) > 0)
-   ewrite(fd, b, MIN(l, BLKSIZ));
+   // Ceiling to BLKSIZ boundary
+   int readsize = (l + (BLKSIZ-1)) & ~(BLKSIZ-1);
+   char chunk[COPY_CHUNK_SIZE];
+   int lastread = 0;
+
+   for (; readsize > 0; l -= lastread, readsize -= lastread) {
+   int chunk_size = MIN(readsize, COPY_CHUNK_SIZE);
+   // Short reads are legal, so don't expect to read
+   // everything that was requested.
+   lastread = eread(tarfd, chunk, chunk_size);
+   if (lastread == 0) {
+   close(fd);
+   remove(fname);
+   eprintf("unexpected end of file reading %s.\n",
+   fname);
+   }
+
+   ewrite(fd, chunk, MIN(l, lastread));
+   }
close(fd);
}
 
@@ -331,7 +356,7 @@ unarchive(char *fname, ssize_t l, char b[BLKSIZ])
times[0].tv_sec = times[1].tv_sec = mtime;
times[0].tv_nsec = times[1].tv_nsec = 0;
if (!mflag && utimensat(AT_FDCWD, fname, times, AT_SYMLINK_NOFOLLOW) < 
0)
-   weprintf("utimensat %s:\n", fname);
+   weprintf("utimensat %s %d:\n", fname, errno);
if (h->type == SYMLINK) {
if (!getuid() && lchown(fname, uid, gid))
weprintf("lchown %s:\n", fname);
@@ -349,10 +374,23 @@ static void
 skipblk(ssize_t l)
 {
char b[BLKSIZ];
-
-   for (; l > 0; l -= BLKSIZ)
-   if (!eread(tarfd, b, BLKSIZ))
-   break;
+   int lastread = 0;
+   // Ceiling to BLKSIZ boundary
+   int ceilsize = (l + (BLKSIZ-1)) & ~(BLKSIZ-1);
+
+   off_t offset = lseek(tarfd, ceilsize, SEEK_CUR);
+   if (offset >= ceilsize)
+   return;
+   if (errno