On 17/10/24 10:50, alex chen wrote:
> The subsystem.su_mutex is required while accessing the item->ci_parent,
> otherwise, NULL pointer dereference to the item->ci_parent will be
> triggered in the following situation:
> add node                     delete node
> sys_write
>  vfs_write
>   configfs_write_file
>    o2nm_node_store
>     o2nm_node_local_write
>                              do_rmdir
>                               vfs_rmdir
>                                configfs_rmdir
>                                 mutex_lock(&subsys->su_mutex);
>                                 unlink_obj
>                                  item->ci_group = NULL;
>                                  item->ci_parent = NULL;                      
>        to_o2nm_cluster_from_node
>         node->nd_item.ci_parent->ci_parent
>         BUG since of NULL pointer dereference to nd_item.ci_parent
> 
> Moreover, the o2nm_cluster also should be protected by the subsystem.su_mutex.
> 
> Signed-off-by: Alex Chen <alex.c...@huawei.com>
> Reviewed-by: Jun Piao <piao...@huawei.com>
Reviewed-by: Joseph Qi <jiangqi...@gmail.com>

> 
> ---
>  fs/ocfs2/cluster/nodemanager.c | 63 
> ++++++++++++++++++++++++++++++++++++------
>  1 file changed, 55 insertions(+), 8 deletions(-)
> 
> diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
> index b17d180..c204ac9b 100644
> --- a/fs/ocfs2/cluster/nodemanager.c
> +++ b/fs/ocfs2/cluster/nodemanager.c
> @@ -40,6 +40,9 @@
>               "panic",        /* O2NM_FENCE_PANIC */
>  };
> 
> +static inline void o2nm_lock_subsystem(void);
> +static inline void o2nm_unlock_subsystem(void);
> +
>  struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
>  {
>       struct o2nm_node *node = NULL;
> @@ -181,7 +184,10 @@ static struct o2nm_cluster 
> *to_o2nm_cluster_from_node(struct o2nm_node *node)
>  {
>       /* through the first node_set .parent
>        * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
> -     return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
> +     if (node->nd_item.ci_parent)
> +             return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
> +     else
> +             return NULL;
>  }
> 
>  enum {
> @@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item 
> *item, const char *page,
>                                  size_t count)
>  {
>       struct o2nm_node *node = to_o2nm_node(item);
> -     struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
> +     struct o2nm_cluster *cluster;
>       unsigned long tmp;
>       char *p = (char *)page;
>       int ret = 0;
> @@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item 
> *item, const char *page,
>           !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
>               return -EINVAL; /* XXX */
> 
> +     o2nm_lock_subsystem();
> +     cluster = to_o2nm_cluster_from_node(node);
> +     if (!cluster) {
> +             o2nm_unlock_subsystem();
> +             return -EINVAL;
> +     }
> +
>       write_lock(&cluster->cl_nodes_lock);
>       if (cluster->cl_nodes[tmp])
>               ret = -EEXIST;
> @@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item 
> *item, const char *page,
>               set_bit(tmp, cluster->cl_nodes_bitmap);
>       }
>       write_unlock(&cluster->cl_nodes_lock);
> +     o2nm_unlock_subsystem();
> +
>       if (ret)
>               return ret;
> 
> @@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct 
> config_item *item,
>                                           size_t count)
>  {
>       struct o2nm_node *node = to_o2nm_node(item);
> -     struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
> +     struct o2nm_cluster *cluster;
>       int ret, i;
>       struct rb_node **p, *parent;
>       unsigned int octets[4];
> @@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct 
> config_item *item,
>               be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
>       }
> 
> +     o2nm_lock_subsystem();
> +     cluster = to_o2nm_cluster_from_node(node);
> +     if (!cluster) {
> +             o2nm_unlock_subsystem();
> +             return -EINVAL;
> +     }
> +
>       ret = 0;
>       write_lock(&cluster->cl_nodes_lock);
>       if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
> @@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct 
> config_item *item,
>               rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
>       }
>       write_unlock(&cluster->cl_nodes_lock);
> +     o2nm_unlock_subsystem();
> +
>       if (ret)
>               return ret;
> 
> @@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item 
> *item, const char *page,
>                                    size_t count)
>  {
>       struct o2nm_node *node = to_o2nm_node(item);
> -     struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
> +     struct o2nm_cluster *cluster;
>       unsigned long tmp;
>       char *p = (char *)page;
>       ssize_t ret;
> @@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item 
> *item, const char *page,
>           !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
>               return -EINVAL; /* XXX */
> 
> +     o2nm_lock_subsystem();
> +     cluster = to_o2nm_cluster_from_node(node);
> +     if (!cluster) {
> +             ret = -EINVAL;
> +             goto out;
> +     }
> +
>       /* the only failure case is trying to set a new local node
>        * when a different one is already set */
>       if (tmp && tmp == cluster->cl_has_local &&
> -         cluster->cl_local_node != node->nd_num)
> -             return -EBUSY;
> +         cluster->cl_local_node != node->nd_num) {
> +             ret = -EBUSY;
> +             goto out;
> +     }
> 
>       /* bring up the rx thread if we're setting the new local node. */
>       if (tmp && !cluster->cl_has_local) {
>               ret = o2net_start_listening(node);
>               if (ret)
> -                     return ret;
> +                     goto out;
>       }
> 
>       if (!tmp && cluster->cl_has_local &&
> @@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item 
> *item, const char *page,
>               cluster->cl_local_node = node->nd_num;
>       }
> 
> -     return count;
> +     ret = count;
> +
> +out:
> +     o2nm_unlock_subsystem();
> +     return ret;
>  }
> 
>  CONFIGFS_ATTR(o2nm_node_, num);
> @@ -738,6 +775,16 @@ static void o2nm_cluster_group_drop_item(struct 
> config_group *group, struct conf
>       },
>  };
> 
> +static inline void o2nm_lock_subsystem(void)
> +{
> +     mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
> +}
> +
> +static inline void o2nm_unlock_subsystem(void)
> +{
> +     mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
> +}
> +
>  int o2nm_depend_item(struct config_item *item)
>  {
>       return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
> 

_______________________________________________
Ocfs2-devel mailing list
Ocfs2-devel@oss.oracle.com
https://oss.oracle.com/mailman/listinfo/ocfs2-devel

Reply via email to