On Thu, Sep 11, 2025 at 12:20:04PM +0300, Vladimir Sementsov-Ogievskiy wrote:
> We want to switch from qemu_socket_set_block() to newer
> qemu_set_blocking(), which provides return status of operation,
> to handle errors.
>
> Still, we want to keep qio_channel_socket_readv() interface clean,
> as currently it set @fds and @nfds only on success.
>
> So, in case of error, we should to close all incoming fds and don't
> touch user's @fds and @nfds.
>
> Let's make separate functions qio_channel_handle_fds() and
> qio_channel_cleanup_fds(), to achieve what we want.
>
> Signed-off-by: Vladimir Sementsov-Ogievskiy <[email protected]>
> ---
> io/channel-socket.c | 73 +++++++++++++++++++++++++++++++++++----------
> 1 file changed, 57 insertions(+), 16 deletions(-)
>
> diff --git a/io/channel-socket.c b/io/channel-socket.c
> index f7e3cb9742..afae97b2ef 100644
> --- a/io/channel-socket.c
> +++ b/io/channel-socket.c
> @@ -464,8 +464,7 @@ static void qio_channel_socket_finalize(Object *obj)
>
> #ifndef WIN32
> static void qio_channel_socket_copy_fds(struct msghdr *msg,
> - int **fds, size_t *nfds,
> - bool preserve_blocking)
> + int **fds, size_t *nfds)
> {
> struct cmsghdr *cmsg;
>
> @@ -473,7 +472,7 @@ static void qio_channel_socket_copy_fds(struct msghdr
> *msg,
> *fds = NULL;
>
> for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
> - int fd_size, i;
> + int fd_size;
> int gotfds;
>
> if (cmsg->cmsg_len < CMSG_LEN(sizeof(int)) ||
> @@ -491,24 +490,54 @@ static void qio_channel_socket_copy_fds(struct msghdr
> *msg,
> gotfds = fd_size / sizeof(int);
> *fds = g_renew(int, *fds, *nfds + gotfds);
> memcpy(*fds + *nfds, CMSG_DATA(cmsg), fd_size);
> + *nfds += gotfds;
> + }
> +}
>
> - for (i = 0; i < gotfds; i++) {
> - int fd = (*fds)[*nfds + i];
> - if (fd < 0) {
> - continue;
> - }
> +static bool qio_channel_handle_fds(int *fds, size_t nfds,
> + bool preserve_blocking, Error **errp)
> +{
> + int *end = fds + nfds, *fd;
> +
> +#ifdef MSG_CMSG_CLOEXEC
> + if (preserve_blocking) {
> + /* Nothing to do */
> + return true;
> + }
> +#endif
>
> - if (!preserve_blocking) {
> - /* O_NONBLOCK is preserved across SCM_RIGHTS so reset it */
> - qemu_socket_set_block(fd);
> + for (fd = fds; fd != end; fd++) {
> + if (*fd < 0) {
> + continue;
> + }
> +
> + if (!preserve_blocking) {
> + /* O_NONBLOCK is preserved across SCM_RIGHTS so reset it */
> + if (!qemu_set_blocking(*fd, true, errp)) {
> + return false;
> }
> + }
>
> #ifndef MSG_CMSG_CLOEXEC
> - qemu_set_cloexec(fd);
> + qemu_set_cloexec(*fd);
> #endif
> + }
> +
> + return true;
> +}
> +
> +static void qio_channel_cleanup_fds(int *fds, size_t nfds)
Suggest we change this to
... qio_channel_cleanup_fds(int **fds, size_t *nfds)
> +{
> + int *end = fds + nfds, *fd;
> +
> + for (fd = fds; fd != end; fd++) {
I can't help feeling this would be clearer as
for (size_t i = 0; i < nfds; i++) {
> + if (*fd < 0) {
> + continue;
> }
> - *nfds += gotfds;
> + close(*fd);
> }
> +
> + g_free(fds);
Then here we can use:
g_clear_poointer(fds, g_free);
*nfds = 0;
> }
>
>
> @@ -559,9 +588,21 @@ static ssize_t qio_channel_socket_readv(QIOChannel *ioc,
> }
>
> if (fds && nfds) {
> - qio_channel_socket_copy_fds(
> - &msg, fds, nfds,
> - flags & QIO_CHANNEL_READ_FLAG_FD_PRESERVE_BLOCKING);
> + int *local_fds;
> + size_t local_nfds;
> + bool preserve_blocking =
> + flags & QIO_CHANNEL_READ_FLAG_FD_PRESERVE_BLOCKING;
> +
> + qio_channel_socket_copy_fds(&msg, &local_fds, &local_nfds);
> +
> + if (!qio_channel_handle_fds(local_fds, local_nfds,
> + preserve_blocking, errp)) {
> + qio_channel_cleanup_fds(local_fds, local_nfds);
> + return -1;
> + }
> +
> + *fds = local_fds;
> + *nfds = local_nfds;
We could eliminate the 'local_fds' / 'local_nfds' here and directly use
'fds' and 'nfds' when we make qio_channel_cleanup_fds responsible for
clearing the pointers it receives.
> }
>
> return ret;
> --
> 2.48.1
>
With regards,
Daniel
--
|: https://berrange.com -o- https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o- https://fstop138.berrange.com :|
|: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|