ocfs2: subsystem.su_mutex is required while accessing the item->ci_parent

The subsystem.su_mutex is required while accessing the item->ci_parent,
otherwise, NULL pointer dereference to the item->ci_parent will be
triggered in the following situation:

add node                     delete node
sys_write
 vfs_write
  configfs_write_file
   o2nm_node_store
    o2nm_node_local_write
                             do_rmdir
                              vfs_rmdir
                               configfs_rmdir
                                mutex_lock(&subsys->su_mutex);
                                unlink_obj
                                 item->ci_group = NULL;
                                 item->ci_parent = NULL;
	 to_o2nm_cluster_from_node
	  node->nd_item.ci_parent->ci_parent
	  BUG since of NULL pointer dereference to nd_item.ci_parent

Moreover, the o2nm_cluster also should be protected by the
subsystem.su_mutex.

[alex.chen@huawei.com: v2]
  Link: http://lkml.kernel.org/r/59EEAA69.9080703@huawei.com
Link: http://lkml.kernel.org/r/59E9B36A.10700@huawei.com
Signed-off-by: Alex Chen <alex.chen@huawei.com>
Reviewed-by: Jun Piao <piaojun@huawei.com>
Reviewed-by: Joseph Qi <jiangqi903@gmail.com>
Cc: Mark Fasheh <mfasheh@versity.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
alex chen 2017-11-15 17:31:48 -08:00 committed by Linus Torvalds
parent 3e4c56d41e
commit 853bc26a7e

View File

@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
"panic", /* O2NM_FENCE_PANIC */ "panic", /* O2NM_FENCE_PANIC */
}; };
static inline void o2nm_lock_subsystem(void);
static inline void o2nm_unlock_subsystem(void);
struct o2nm_node *o2nm_get_node_by_num(u8 node_num) struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
{ {
struct o2nm_node *node = NULL; struct o2nm_node *node = NULL;
@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
{ {
/* through the first node_set .parent /* through the first node_set .parent
* mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); if (node->nd_item.ci_parent)
return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
else
return NULL;
} }
enum { enum {
@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
size_t count) size_t count)
{ {
struct o2nm_node *node = to_o2nm_node(item); struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); struct o2nm_cluster *cluster;
unsigned long tmp; unsigned long tmp;
char *p = (char *)page; char *p = (char *)page;
int ret = 0; int ret = 0;
@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */ return -EINVAL; /* XXX */
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
o2nm_unlock_subsystem();
return -EINVAL;
}
write_lock(&cluster->cl_nodes_lock); write_lock(&cluster->cl_nodes_lock);
if (cluster->cl_nodes[tmp]) if (cluster->cl_nodes[tmp])
ret = -EEXIST; ret = -EEXIST;
@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
set_bit(tmp, cluster->cl_nodes_bitmap); set_bit(tmp, cluster->cl_nodes_bitmap);
} }
write_unlock(&cluster->cl_nodes_lock); write_unlock(&cluster->cl_nodes_lock);
o2nm_unlock_subsystem();
if (ret) if (ret)
return ret; return ret;
@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
size_t count) size_t count)
{ {
struct o2nm_node *node = to_o2nm_node(item); struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); struct o2nm_cluster *cluster;
int ret, i; int ret, i;
struct rb_node **p, *parent; struct rb_node **p, *parent;
unsigned int octets[4]; unsigned int octets[4];
@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
} }
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
o2nm_unlock_subsystem();
return -EINVAL;
}
ret = 0; ret = 0;
write_lock(&cluster->cl_nodes_lock); write_lock(&cluster->cl_nodes_lock);
if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
} }
write_unlock(&cluster->cl_nodes_lock); write_unlock(&cluster->cl_nodes_lock);
o2nm_unlock_subsystem();
if (ret) if (ret)
return ret; return ret;
@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
size_t count) size_t count)
{ {
struct o2nm_node *node = to_o2nm_node(item); struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); struct o2nm_cluster *cluster;
unsigned long tmp; unsigned long tmp;
char *p = (char *)page; char *p = (char *)page;
ssize_t ret; ssize_t ret;
@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */ return -EINVAL; /* XXX */
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
ret = -EINVAL;
goto out;
}
/* the only failure case is trying to set a new local node /* the only failure case is trying to set a new local node
* when a different one is already set */ * when a different one is already set */
if (tmp && tmp == cluster->cl_has_local && if (tmp && tmp == cluster->cl_has_local &&
cluster->cl_local_node != node->nd_num) cluster->cl_local_node != node->nd_num) {
return -EBUSY; ret = -EBUSY;
goto out;
}
/* bring up the rx thread if we're setting the new local node. */ /* bring up the rx thread if we're setting the new local node. */
if (tmp && !cluster->cl_has_local) { if (tmp && !cluster->cl_has_local) {
ret = o2net_start_listening(node); ret = o2net_start_listening(node);
if (ret) if (ret)
return ret; goto out;
} }
if (!tmp && cluster->cl_has_local && if (!tmp && cluster->cl_has_local &&
@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
cluster->cl_local_node = node->nd_num; cluster->cl_local_node = node->nd_num;
} }
return count; ret = count;
out:
o2nm_unlock_subsystem();
return ret;
} }
CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, num);
@ -738,6 +775,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = {
}, },
}; };
static inline void o2nm_lock_subsystem(void)
{
mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
}
static inline void o2nm_unlock_subsystem(void)
{
mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
}
int o2nm_depend_item(struct config_item *item) int o2nm_depend_item(struct config_item *item)
{ {
return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);