On Wed, Nov 07, 2018 at 10:08:05PM +0400, Marc-André Lureau wrote: > Hi > > On Fri, Oct 19, 2018 at 5:41 PM Daniel P. Berrangé <berra...@redhat.com> > wrote: > > > > The inotify userspace API for reading events is quite horrible, so it is > > useful to wrap it in a more friendly API to avoid duplicating code > > across many users in QEMU. Wrapping it also allows introduction of a > > platform portability layer, so that we can add impls for non-Linux based > > equivalents in future. > > > > Signed-off-by: Daniel P. Berrangé <berra...@redhat.com> > > ---
> > +struct QFileMonitor { > > + QemuMutex lock; > > + int fd; > > + > > + GHashTable *dirs; /* dirname => QFileMonitorDir */ > > + GHashTable *idmap; /* inotify ID => dirname */ > > +}; > > + > > + > > +typedef struct { > > + int id; /* watch ID */ > > + char *filename; /* optional filter */ > > + QFileMonitorHandler cb; > > + void *opaque; > > +} QFileMonitorWatch; > > + > > + > > +typedef struct { > > + char *path; > > + int id; /* inotify ID */ > > + int nextid; /* watch ID counter */ > > + gsize nwatches; > > + QFileMonitorWatch *watches; > > +} QFileMonitorDir; > > + > > + > > +#ifdef CONFIG_INOTIFY1 > > +#include <sys/inotify.h> > > + > > +static void qemu_file_monitor_watch(void *arg) > > +{ > > + QFileMonitor *mon = arg; > > + char buf[4096] > > + __attribute__ ((aligned(__alignof__(struct inotify_event)))); > > + int used = 0; > > + int len = read(mon->fd, buf, sizeof(buf)); > > + > > + qemu_mutex_lock(&mon->lock); > > I suppose the lock should guard from mon->fd above, or there might be > a race when modifying/removing a watch from a different thread. The mutex is only there to protect the "dirs" and "idmap" hash tables. The "fd" has enough safety - the kernel is fine with you reading from the fd, while another thread is adding/removing files on the inotify handle. Since the QFileMonitor object is a singleton that can never be free'd we'll never race with closing 'fd either. > > > + > > + if (len < 0) { > > + if (errno != EAGAIN) { > > + error_report("Failure monitoring inotify FD, disabling > > events"); > > strerror(errno) could be useful Yep. > > +static void > > +qemu_file_monitor_dir_free(void *data) > > +{ > > + QFileMonitorDir *dir = data; > > + > > + g_free(dir->watches); > > for sake, I would add > assert(dir->nwatches = 0) Yep. > > +#ifdef CONFIG_INOTIFY1 > > +int > > +qemu_file_monitor_add_watch(QFileMonitor *mon, > > + const char *dirpath, > > + const char *filename, > > + QFileMonitorHandler cb, > > + void *opaque, > > + Error **errp) > > +{ > > + QFileMonitorDir *dir; > > + int ret = -1; > > + > > + qemu_mutex_lock(&mon->lock); > > + dir = g_hash_table_lookup(mon->dirs, dirpath); > > + if (!dir) { > > + int rv = inotify_add_watch(mon->fd, dirpath, > > + IN_CREATE | IN_DELETE | IN_MODIFY | > > + IN_MOVED_TO | IN_MOVED_FROM); > > + > > + if (rv < 0) { > > + error_setg_errno(errp, errno, "Unable to watch '%s'", dirpath); > > + goto cleanup; > > + } > > + > > + trace_qemu_file_monitor_enable_watch(mon, dirpath, rv); > > + > > + dir = g_new0(QFileMonitorDir, 1); > > + dir->path = g_strdup(dirpath); > > + dir->id = rv; > > + > > + g_hash_table_insert(mon->dirs, dir->path, dir); > > + g_hash_table_insert(mon->idmap, GINT_TO_POINTER(rv), dir); > > + > > + if (g_hash_table_size(mon->dirs) == 1) { > > + qemu_set_fd_handler(mon->fd, qemu_file_monitor_watch, NULL, > > mon); > > + } > > + } > > + > > + dir->watches = g_renew(QFileMonitorWatch, dir->watches, dir->nwatches > > + 1); > > GArray could eventually make handling of watches a bit simpler > (counting, resizing, removing etc) ok, i'll have a look at that API. > > > + > > + dir->watches[dir->nwatches].id = ++dir->nextid; > > + dir->watches[dir->nwatches].filename = filename ? g_strdup(filename) : > > NULL; > > g_strdup(NULL) returns NULL already Ah, I forget that. > > > + dir->watches[dir->nwatches].cb = cb; > > + dir->watches[dir->nwatches].opaque = opaque; > > + dir->nwatches++; > > + > > + trace_qemu_file_monitor_add_watch(mon, dirpath, > > + filename ? filename : "<none>", > > + cb, opaque, > > + dir->watches[dir->nwatches - 1].id); > > + > > + ret = 0; > > + > > + cleanup: > > + qemu_mutex_unlock(&mon->lock); > > + return ret; > > +} > > + > > + > > +void qemu_file_monitor_remove_watch(QFileMonitor *mon, > > + const char *dirpath, > > + int id) > > +{ > > + QFileMonitorDir *dir; > > + gsize i; > > + > > + qemu_mutex_lock(&mon->lock); > > + > > + trace_qemu_file_monitor_remove_watch(mon, dirpath, id); > > + > > + dir = g_hash_table_lookup(mon->dirs, dirpath); > > + if (!dir) { > > + goto cleanup; > > + } > > + > > + for (i = 0; i < dir->nwatches; i++) { > > + if (dir->watches[i].id == id) { > > + if (i < (dir->nwatches - 1)) { > > + memmove(dir->watches + i, > > + dir->watches + i + 1, > > + sizeof(QFileMonitorWatch) * > > + (dir->nwatches - (i + 1))); > > + dir->watches = g_renew(QFileMonitorWatch, dir->watches, > > + dir->nwatches - 1); > > + dir->nwatches--; > > + } > > + break; > > + } > > + } > > + > > + if (dir->nwatches == 0) { > > + inotify_rm_watch(mon->fd, dir->id); > > + trace_qemu_file_monitor_disable_watch(mon, dir->path, dir->id); > > + > > + g_hash_table_remove(mon->idmap, GINT_TO_POINTER(dir->id)); > > + g_hash_table_remove(mon->dirs, dir->path); > > + } > > + > > + cleanup: > > + qemu_mutex_lock(&mon->lock); > > +} > > + > > +#else > > +int > > +qemu_file_monitor_add_watch(QFileMonitor *mon, > > + const char *dirpath, > > + const char *filename, > > + QFileMonitorHandler cb, > > + void *opaque, > > + Error **errp) > > +{ > > + error_setg(errp, "File monitoring not available on this platform"); > > + return -1; > > +} > > + > > +void qemu_file_monitor_remove_watch(QFileMonitor *mon, > > + const char *dirpath, > > + int id) > > +{ > > +} > > Wouldn't it be cleaner with stubs/ ? I guess we could do it that way. > > Looks good, but would be even better with tests :) I'll see what I can do about that. Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|