Except for two small nits it looks good.

Note: Because we use atomic_inc_not_zero(i_count) and the fact that iput can increment i_count back, we can _potentially_ see situation that open_by_handle_at first fails to open and next is able to open the same handle. But I don't think it's a big deal for us.

On 7/28/20 2:59 PM, Andrey Zhadchenko wrote:
criu uses fhandle from fdinfo to dump inotify objects. cgroup super block has
no export operations, but .encode_fh and .fh_to_dentry are needed for
inotify_fdinfo function and open_by_handle_at syscall in order to correctly
open files located on cgroupfs by fhandle.
Add hash table as a storage for inodes with exported fhandle.

v3: use inode->i_gen to protect from i_ino reusage. increase fhandle size to
2 * u32.
Add an option to take reference of inode in cgroup_find_inode, so no one can
delete recently found inode.

https://jira.sw.ru/browse/PSBM-105889
Signed-off-by: Andrey Zhadchenko <andrey.zhadche...@virtuozzo.com>
---
  kernel/cgroup.c | 137 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
  1 file changed, 136 insertions(+), 1 deletion(-)

diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9fdba79..267a5a4 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -62,6 +62,8 @@
  #include <linux/kthread.h>
  #include <linux/ve.h>
  #include <linux/stacktrace.h>
+#include <linux/exportfs.h>
+#include <linux/time.h>
#include <linux/atomic.h> @@ -765,6 +767,7 @@ static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb) if (inode) {
                inode->i_ino = get_next_ino();
+               inode->i_generation = get_seconds();

We likely want to use prandom_u32() instead, see mainstream linux commit 46c9a946d766 ("shmem: use monotonic time for i_generation"), they've change it for shmem to overcome overflow.

                inode->i_mode = mode;
                inode->i_uid = current_fsuid();
                inode->i_gid = current_fsgid();
@@ -1390,9 +1393,140 @@ out:
  }
  #endif
+/*
+ * hashtable for inodes that have exported fhandles.
+ * When we export fhandle, we add it's inode into
+ * hashtable so we can find it fast
+ */
+
+#define CGROUP_INODE_HASH_BITS 10
+static DEFINE_HASHTABLE(cgroup_inode_table, CGROUP_INODE_HASH_BITS);
+static DEFINE_SPINLOCK(cgroup_inode_table_lock);
+
+struct cg_inode_hitem {
+       struct inode *inode;
+       struct hlist_node hlist;
+};
+
+static inline unsigned long cgroup_inode_get_hash(unsigned int i_ino)
+{
+       return hash_32(i_ino, CGROUP_INODE_HASH_BITS);
+}
+
+static struct cg_inode_hitem *cgroup_find_inode(unsigned long fhandle[2],
+                                                       char take_ref)
+{
+       struct cg_inode_hitem *i;
+       struct hlist_head *head = cgroup_inode_table
+               + cgroup_inode_get_hash(fhandle[1]);
+       struct cg_inode_hitem *found = NULL;
+
+       spin_lock(&cgroup_inode_table_lock);
+       hlist_for_each_entry(i, head, hlist) {
+               if (i->inode->i_generation == fhandle[0] &&
+                   i->inode->i_ino == fhandle[1]) {
+                       /*
+                        * If we need to increase refcount, we should be aware
+                        * of possible deadlock. Another thread may have
+                        * started deleting this inode: iput->iput_final->
+                        * ->cgroup_delete_inode->cgroup_find_inode. if we
+                        * just call igrab, it will try to take i_lock and
+                        * this will result in deadlock, because deleting
+                        * thread has already taken it and waits on
+                        * cgroup_inode_table_lock to find inode in hashtable.
+                        * If i_count is zero, someone is deleting it -> skip.
+                        */
+                       if (take_ref)
+                               if (!atomic_inc_not_zero(&i->inode->i_count))
+                                       continue;

Should be break instead of continue? There can't be another entry in hash with same i_ino and i_generation.

+                       found = i;
+                       break;
+               }
+       }
+       spin_unlock(&cgroup_inode_table_lock);
+
+       return found;
+}
+
+static struct dentry *cgroup_fh_to_dentry(struct super_block *sb,
+               struct fid *fid, int fh_len, int fh_type)
+{
+       struct cg_inode_hitem *item;
+       struct dentry *dentry = ERR_PTR(-ENOENT);
+       unsigned long fhandle[2] = {fid->raw[0], fid->raw[1]};
+
+       if (fh_len < 2)
+               return NULL;
+
+       item = cgroup_find_inode(fhandle, 1);
+       if (item) {
+               dentry = d_find_alias(item->inode);
+               iput(item->inode);
+       }
+       return dentry;
+}
+
+static int cgroup_encode_fh(struct inode *inode, __u32 *fh, int *len,
+                               struct inode *parent)
+{
+       struct cg_inode_hitem *item;
+       struct hlist_head *head = cgroup_inode_table
+               + cgroup_inode_get_hash(inode->i_ino);
+       unsigned long fhandle[2] = {inode->i_generation, inode->i_ino};
+
+       if (*len < 2) {
+               *len = 2;
+               return FILEID_INVALID;
+       }
+
+       if (!cgroup_find_inode(fhandle, 0)) {
+               item = kmalloc(sizeof(struct cg_inode_hitem),
+                       GFP_KERNEL);
+               /*
+                * encode_fh is expected to return 255 (FILEID_INVALID)
+                * in case of failure. We can't return ENOMEM, so
+                * return FILEID_INVALID at least
+                */
+               if (!item)
+                       return FILEID_INVALID;
+               item->inode = inode;
+
+               spin_lock(&cgroup_inode_table_lock);
+               hlist_add_head(&item->hlist, head);
+               spin_unlock(&cgroup_inode_table_lock);
+       }
+
+       fh[0] = fhandle[0];
+       fh[1] = fhandle[1];
+       *len = 2;
+       return 1;
+}
+
+static const struct export_operations cgroup_export_ops = {
+       .encode_fh      = cgroup_encode_fh,
+       .fh_to_dentry   = cgroup_fh_to_dentry,
+};
+
+static int cgroup_delete_inode(struct inode *inode)
+{
+       struct cg_inode_hitem *item = NULL;
+       unsigned long fhandle[2] = {inode->i_generation, inode->i_ino};
+
+       item = cgroup_find_inode(fhandle, 0);
+       if (item) {
+               spin_lock(&cgroup_inode_table_lock);
+               hlist_del(&item->hlist);
+               spin_unlock(&cgroup_inode_table_lock);
+
+               kfree(item);
+       }
+
+       return generic_delete_inode(inode);
+}
+
  static const struct super_operations cgroup_ops = {
        .statfs = simple_statfs,
-       .drop_inode = generic_delete_inode,
+       .drop_inode = cgroup_delete_inode,
        .show_options = cgroup_show_options,
  #ifdef CONFIG_VE
        .show_path = cgroup_show_path,
@@ -1539,6 +1673,7 @@ static int cgroup_set_super(struct super_block *sb, void 
*data)
        sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
        sb->s_magic = CGROUP_SUPER_MAGIC;
        sb->s_op = &cgroup_ops;
+       sb->s_export_op = &cgroup_export_ops;
return 0;
  }


--
Best regards, Tikhomirov Pavel
Software Developer, Virtuozzo.
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to