The commit is pushed to "branch-rh10-6.12.0-55.13.1.2.x.vz10-ovz" and will
appear at [email protected]:openvz/vzkernel.git
after rh10-6.12.0-55.13.1.2.21.vz10
------>
commit db66fd709f4046da23acfb14a6f7d9de5cb86d5f
Author: Pavel Tikhomirov <[email protected]>
Date: Mon Nov 24 19:20:40 2025 +0800
ve: Rename ve_ns to ve_nsproxy
Else one can think that ve_ns stands for ve namespace. In next patches I
want to add ve namespace, so it would be better to separate those names
early.
https://virtuozzo.atlassian.net/browse/VSTOR-118289
Signed-off-by: Pavel Tikhomirov <[email protected]>
Feature: ve: ve generic structures
======
Patchset description:
ve: Add VE namespace
Main ideas behind VE namespace explained in "ve: Introduce VE
namespace".
---
drivers/connector/cn_proc.c | 24 +++++------
drivers/connector/connector.c | 8 ++--
fs/nfsd/nfs4recover.c | 2 +-
include/linux/ve.h | 8 ++--
kernel/cgroup/cgroup-v1.c | 4 +-
kernel/cgroup/cgroup.c | 20 ++++-----
kernel/ve/ve.c | 98 +++++++++++++++++++++----------------------
kernel/ve/vzevent.c | 2 +-
net/core/netprio_cgroup.c | 24 +++++------
9 files changed, 95 insertions(+), 95 deletions(-)
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 5487f1958c9fd..eb280258b5b6b 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -193,7 +193,7 @@ static bool fill_fork_event(struct proc_event *ev, struct
ve_struct *ve,
struct task_struct *task, long unused)
{
struct task_struct *parent;
- struct pid_namespace *pid_ns = ve->ve_ns->pid_ns_for_children;
+ struct pid_namespace *pid_ns = ve->ve_nsproxy->pid_ns_for_children;
rcu_read_lock();
parent = rcu_dereference(task->real_parent);
@@ -213,7 +213,7 @@ void proc_fork_connector(struct task_struct *task)
static bool fill_exec_event(struct proc_event *ev, struct ve_struct *ve,
struct task_struct *task, long unused)
{
- struct pid_namespace *pid_ns = ve->ve_ns->pid_ns_for_children;
+ struct pid_namespace *pid_ns = ve->ve_nsproxy->pid_ns_for_children;
ev->event_data.exec.process_pid = task_pid_nr_ns(task, pid_ns);
ev->event_data.exec.process_tgid = task_tgid_nr_ns(task, pid_ns);
@@ -229,7 +229,7 @@ static bool fill_id_event(struct proc_event *ev, struct
ve_struct *ve,
struct task_struct *task, long which_id)
{
const struct cred *cred;
- struct pid_namespace *pid_ns = ve->ve_ns->pid_ns_for_children;
+ struct pid_namespace *pid_ns = ve->ve_nsproxy->pid_ns_for_children;
struct user_namespace *user_ns = ve->init_cred->user_ns;
ev->event_data.id.process_pid = task_pid_nr_ns(task, pid_ns);
@@ -258,7 +258,7 @@ void proc_id_connector(struct task_struct *task, int
which_id)
static bool fill_sid_event(struct proc_event *ev, struct ve_struct *ve,
struct task_struct *task, long unused)
{
- struct pid_namespace *pid_ns = ve->ve_ns->pid_ns_for_children;
+ struct pid_namespace *pid_ns = ve->ve_nsproxy->pid_ns_for_children;
ev->event_data.sid.process_pid = task_pid_nr_ns(task, pid_ns);
ev->event_data.sid.process_tgid = task_tgid_nr_ns(task, pid_ns);
@@ -273,7 +273,7 @@ void proc_sid_connector(struct task_struct *task)
static bool fill_ptrace_event(struct proc_event *ev, struct ve_struct *ve,
struct task_struct *task, long ptrace_id)
{
- struct pid_namespace *pid_ns = ve->ve_ns->pid_ns_for_children;
+ struct pid_namespace *pid_ns = ve->ve_nsproxy->pid_ns_for_children;
ev->event_data.ptrace.process_pid = task_pid_nr_ns(task, pid_ns);
ev->event_data.ptrace.process_tgid = task_tgid_nr_ns(task, pid_ns);
@@ -297,7 +297,7 @@ void proc_ptrace_connector(struct task_struct *task, int
ptrace_id)
static bool fill_comm_event(struct proc_event *ev, struct ve_struct *ve,
struct task_struct *task, long unused)
{
- struct pid_namespace *pid_ns = ve->ve_ns->pid_ns_for_children;
+ struct pid_namespace *pid_ns = ve->ve_nsproxy->pid_ns_for_children;
ev->event_data.comm.process_pid = task_pid_nr_ns(task, pid_ns);
ev->event_data.comm.process_tgid = task_tgid_nr_ns(task, pid_ns);
@@ -313,7 +313,7 @@ void proc_comm_connector(struct task_struct *task)
static bool fill_coredump_event(struct proc_event *ev, struct ve_struct *ve,
struct task_struct *task, long unused)
{
- struct pid_namespace *pid_ns = ve->ve_ns->pid_ns_for_children;
+ struct pid_namespace *pid_ns = ve->ve_nsproxy->pid_ns_for_children;
struct task_struct *parent;
ev->event_data.coredump.process_pid =
@@ -341,7 +341,7 @@ void proc_coredump_connector(struct task_struct *task)
static bool fill_exit_event(struct proc_event *ev, struct ve_struct *ve,
struct task_struct *task, long cookie_pids)
{
- struct pid_namespace *pid_ns = ve->ve_ns->pid_ns_for_children;
+ struct pid_namespace *pid_ns = ve->ve_nsproxy->pid_ns_for_children;
struct task_struct *parent;
struct pids *pids = (struct pids *)cookie_pids;
@@ -409,7 +409,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
{
enum proc_cn_mcast_op mc_op = 0, prev_mc_op = 0;
struct ve_struct *ve = get_exec_env();
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
struct proc_input *pinput = NULL;
enum proc_cn_event ev_type = 0;
int err = 0, initial = 0;
@@ -421,9 +421,9 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
* other namespaces.
*/
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- if (!current_user_ns_initial() || !ve_ns ||
- (task_active_pid_ns(current) != ve_ns->pid_ns_for_children)) {
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ if (!current_user_ns_initial() || !ve_nsproxy ||
+ (task_active_pid_ns(current) != ve_nsproxy->pid_ns_for_children)) {
rcu_read_unlock();
return;
}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index adbfe34330f5e..47f7fb284540b 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -315,10 +315,10 @@ static int cn_init_ve(void *data)
dev = &ve->cn->cdev;
/*
- * This is a hook, hooks are called under a single lock, so ve_ns will
+ * This is a hook, hooks are called under a single lock, so ve_nsproxy
will
* not disappear, so rcu_read_lock()/unlock is not needed here.
*/
- net = rcu_dereference_check(ve->ve_ns, 1)->net_ns;
+ net = rcu_dereference_check(ve->ve_nsproxy, 1)->net_ns;
err = -EIO;
dev->nls = netlink_kernel_create(net, NETLINK_CONNECTOR, &cfg);
@@ -369,11 +369,11 @@ static void cn_fini_ve(void *data)
cn_proc_fini_ve(ve);
/*
- * This is a hook called on ve stop, ve->ve_ns will be destroyed
+ * This is a hook called on ve stop, ve->ve_nsproxy will be destroyed
* later in the same thread, parallel ve stop is impossible,
* so rcu_read_lock()/unlock is not needed here.
*/
- net = rcu_dereference_check(ve->ve_ns, 1)->net_ns;
+ net = rcu_dereference_check(ve->ve_nsproxy, 1)->net_ns;
remove_proc_entry("connector", net->proc_net);
cn_queue_free_dev(dev->cbdev);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 0976f6c12493d..29f5b8eaa863a 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -1887,7 +1887,7 @@ nfsd4_umh_cltrack_init(struct net *net)
struct net *ve_net;
rcu_read_lock();
- ve_net = rcu_dereference(get_exec_env()->ve_ns)->net_ns;
+ ve_net = rcu_dereference(get_exec_env()->ve_nsproxy)->net_ns;
if (!net_eq(net, ve_net)) {
rcu_read_unlock();
diff --git a/include/linux/ve.h b/include/linux/ve.h
index 37562dff25aab..24d06ed21e749 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -51,7 +51,7 @@ struct ve_struct {
/* per VE CPU stats*/
u64 start_jiffies; /* Deprecated */
- struct nsproxy __rcu *ve_ns;
+ struct nsproxy __rcu *ve_nsproxy;
struct cred *init_cred;
/* see vzcalluser.h for VE_FEATURE_XXX definitions */
@@ -193,12 +193,12 @@ extern struct cgroup_subsys_state *ve_get_init_css(struct
ve_struct *ve, int sub
static inline struct time_namespace *ve_get_time_ns(struct ve_struct *ve)
{
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
struct time_namespace *time_ns;
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- time_ns = ve_ns ? get_time_ns(ve_ns->time_ns) : NULL;
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ time_ns = ve_nsproxy ? get_time_ns(ve_nsproxy->time_ns) : NULL;
rcu_read_unlock();
return time_ns;
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 36b9eced835cd..e4ef76e4bde6e 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -866,11 +866,11 @@ void cgroup1_release_agent(struct work_struct *work)
* Release agent work can be queued only to running ve (see
* ve_add_to_release_list) and ve waits for all queued works to finish
* (see ve_workqueue_stop) before stopping, so we can safely access
- * ve_ns->cgroup_ns here without rcu_read_lock - it won't disappear
+ * ve_nsproxy->cgroup_ns here without rcu_read_lock - it won't disappear
* under us.
*/
struct cgroup_namespace *ve_cgroup_ns =
- rcu_dereference_protected(ve->ve_ns, 1)->cgroup_ns;
+ rcu_dereference_protected(ve->ve_nsproxy, 1)->cgroup_ns;
char *pathbuf, *agentbuf;
char *argv[3], *envp[3];
unsigned long flags;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 561fab2179d24..5893d8f4d010a 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2209,11 +2209,11 @@ int cgroup_mark_ve_roots(struct ve_struct *ve)
struct cgroup *cgrp;
/*
- * It's safe to use ve->ve_ns->cgroup_ns->root_cset here without extra
+ * It's safe to use ve->ve_nsproxy->cgroup_ns->root_cset here without
extra
* locking as we do it from container init at container start after
* ve_grab_context and only container init can tear those down.
*/
- cset = rcu_dereference_protected(ve->ve_ns, 1)->cgroup_ns->root_cset;
+ cset = rcu_dereference_protected(ve->ve_nsproxy,
1)->cgroup_ns->root_cset;
BUG_ON(!cset);
spin_lock_irq(&css_set_lock);
@@ -2246,11 +2246,11 @@ void cgroup_unmark_ve_roots(struct ve_struct *ve)
struct cgroup *cgrp;
/*
- * It's safe to use ve->ve_ns->cgroup_ns->root_cset here without extra
+ * It's safe to use ve->ve_nsproxy->cgroup_ns->root_cset here without
extra
* locking as we do it from container init at container start after
* ve_grab_context and only container init can tear those down.
*/
- cset = rcu_dereference_protected(ve->ve_ns, 1)->cgroup_ns->root_cset;
+ cset = rcu_dereference_protected(ve->ve_nsproxy,
1)->cgroup_ns->root_cset;
BUG_ON(!cset);
spin_lock_irq(&css_set_lock);
@@ -2279,7 +2279,7 @@ int cgroup_join_vz_slice(struct ve_struct *ve)
struct cgroup *cgrp;
int ret;
- cset = rcu_dereference_protected(ve->ve_ns, 1)->cgroup_ns->root_cset;
+ cset = rcu_dereference_protected(ve->ve_nsproxy,
1)->cgroup_ns->root_cset;
cgrp = cset_cgroup_from_root(cset, &cgrp_dfl_root);
if (!is_virtualized_cgroup(cgrp) ||
@@ -2314,7 +2314,7 @@ int cgroup_leave_vz_slice(struct ve_struct *ve)
struct css_set *cset;
struct cgroup *cgrp;
- cset = rcu_dereference_protected(ve->ve_ns, 1)->cgroup_ns->root_cset;
+ cset = rcu_dereference_protected(ve->ve_nsproxy,
1)->cgroup_ns->root_cset;
cgrp = cset_cgroup_from_root(cset, &cgrp_dfl_root);
if (!is_virtualized_cgroup(cgrp) ||
@@ -2341,7 +2341,7 @@ static void ve_release_agent_setup_work(struct
callback_head *head)
ve = container_of(head, struct ve_struct,
ve_release_agent_setup_head);
- cset = rcu_dereference_protected(ve->ve_ns, 1)->cgroup_ns->root_cset;
+ cset = rcu_dereference_protected(ve->ve_nsproxy,
1)->cgroup_ns->root_cset;
/*
* We want to traverse the cset->cgrp_links list and
@@ -2399,7 +2399,7 @@ int ve_release_agent_setup(struct ve_struct *ve)
* cgroup_mutex, we can't take cgroup_mutex under the refcount
* or under locks, which are under the refcount (ve->op_sem).
*
- * It's safe to use ve->ve_ns->cgroup_ns->root_cset inside task_work
+ * It's safe to use ve->ve_nsproxy->cgroup_ns->root_cset inside
task_work
* without extra locking as we do it from container init before exiting
* to userspace just after container start and only container init can
* tear those down.
@@ -2421,11 +2421,11 @@ void ve_release_agent_teardown(struct ve_struct *ve)
BUG_ON(!cft || cft->file_offset);
/*
- * It's safe to use ve->ve_ns->cgroup_ns->root_cset here without extra
+ * It's safe to use ve->ve_nsproxy->cgroup_ns->root_cset here without
extra
* locking as we do it from container init at container stop before
* ve_drop_context and only container init can tear those down.
*/
- cset = rcu_dereference_protected(ve->ve_ns, 1)->cgroup_ns->root_cset;
+ cset = rcu_dereference_protected(ve->ve_nsproxy,
1)->cgroup_ns->root_cset;
mutex_lock(&cgroup_mutex);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index d5dc15942ab50..75148d25b7b39 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -64,7 +64,7 @@ struct ve_struct ve0 = {
.ve_name = "0",
.start_jiffies = INITIAL_JIFFIES,
- RCU_POINTER_INITIALIZER(ve_ns, &init_nsproxy),
+ RCU_POINTER_INITIALIZER(ve_nsproxy, &init_nsproxy),
.state = VE_STATE_RUNNING,
.is_pseudosuper = 1,
@@ -249,7 +249,7 @@ bool current_user_ns_initial(void)
return true;
rcu_read_lock();
- if (ve->ve_ns && ve->init_cred->user_ns == current_user_ns())
+ if (ve->ve_nsproxy && ve->init_cred->user_ns == current_user_ns())
ret = true;
rcu_read_unlock();
@@ -285,16 +285,16 @@ EXPORT_SYMBOL(ve_net_hide_sysctl);
bool is_ve_init_net(const struct net *net)
{
struct ve_struct *ve = net->owner_ve;
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
bool ret = false;
if (ve_is_super(ve))
return net_eq(net, &init_net);
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- if (ve_ns)
- ret = net_eq(ve_ns->net_ns, net);
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ if (ve_nsproxy)
+ ret = net_eq(ve_nsproxy->net_ns, net);
rcu_read_unlock();
return ret;
@@ -434,7 +434,7 @@ struct cgroup_subsys_state *ve_get_init_css(struct
ve_struct *ve, int subsys_id)
rcu_read_lock();
- nsproxy = rcu_dereference(ve->ve_ns);
+ nsproxy = rcu_dereference(ve->ve_nsproxy);
if (!nsproxy)
nsproxy = &init_nsproxy;
@@ -453,19 +453,19 @@ static void ve_grab_context(struct ve_struct *ve)
ve->init_cred = (struct cred *)get_current_cred();
get_nsproxy(tsk->nsproxy);
- rcu_assign_pointer(ve->ve_ns, tsk->nsproxy);
+ rcu_assign_pointer(ve->ve_nsproxy, tsk->nsproxy);
}
static void ve_drop_context(struct ve_struct *ve)
{
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
- ve_ns = rcu_dereference_protected(ve->ve_ns,
lockdep_is_held(&ve->op_sem));
+ ve_nsproxy = rcu_dereference_protected(ve->ve_nsproxy,
lockdep_is_held(&ve->op_sem));
- /* Allows to dereference init_cred and init_task if ve_ns is set */
- rcu_assign_pointer(ve->ve_ns, NULL);
+ /* Allows to dereference init_cred and init_task if ve_nsproxy is set */
+ rcu_assign_pointer(ve->ve_nsproxy, NULL);
synchronize_rcu();
- put_nsproxy(ve_ns);
+ put_nsproxy(ve_nsproxy);
put_cred(ve->init_cred);
ve->init_cred = NULL;
@@ -697,22 +697,22 @@ void ve_rm_from_release_list(struct cgroup *cgrp)
static int ve_start_container(struct ve_struct *ve)
{
struct task_struct *tsk = current;
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
int err;
if (!ve->veid)
return -ENOENT;
- ve_ns = rcu_dereference_protected(ve->ve_ns,
lockdep_is_held(&ve->op_sem));
+ ve_nsproxy = rcu_dereference_protected(ve->ve_nsproxy,
lockdep_is_held(&ve->op_sem));
- if (ve->state != VE_STATE_STARTING || ve_ns)
+ if (ve->state != VE_STATE_STARTING || ve_nsproxy)
return -EBUSY;
if (tsk->task_ve != ve || !is_child_reaper(task_pid(tsk)))
return -ECHILD;
/*
- * It's comfortable to use ve_struct::ve_ns::pid_ns_for_children
+ * It's comfortable to use ve_struct::ve_nsproxy::pid_ns_for_children
* as a pointer to ve's root pid namespace. Here we sanity check
* the task namespaces are so.
*/
@@ -792,15 +792,15 @@ static int ve_start_container(struct ve_struct *ve)
void ve_stop_ns(struct pid_namespace *pid_ns)
{
struct ve_struct *ve = current->task_ve;
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
down_write(&ve->op_sem);
- ve_ns = rcu_dereference_protected(ve->ve_ns,
lockdep_is_held(&ve->op_sem));
+ ve_nsproxy = rcu_dereference_protected(ve->ve_nsproxy,
lockdep_is_held(&ve->op_sem));
/*
* current->cgroups already switched to init_css_set in cgroup_exit(),
* but current->task_ve still points to our exec ve.
*/
- if (!ve_ns || ve_ns->pid_ns_for_children != pid_ns)
+ if (!ve_nsproxy || ve_nsproxy->pid_ns_for_children != pid_ns)
goto unlock;
/*
* Here the VE changes its state into stopping.
@@ -844,14 +844,14 @@ void ve_stop_ns(struct pid_namespace *pid_ns)
void ve_exit_ns(struct pid_namespace *pid_ns)
{
struct ve_struct *ve = current->task_ve;
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
/*
* Check that root container pidns dies and we are here to stop VE
*/
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- if (!ve_ns || ve_ns->pid_ns_for_children != pid_ns) {
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ if (!ve_nsproxy || ve_nsproxy->pid_ns_for_children != pid_ns) {
rcu_read_unlock();
return;
}
@@ -1232,7 +1232,7 @@ static void ve_attach(struct cgroup_taskset *tset)
* to ve.
*
* So we have two cases:
- * - init is handled in ve_exit_ns, unless there is no ve_ns;
+ * - init is handled in ve_exit_ns, unless there is no ve_nsproxy;
* - "external" task (the task which does not resolve to a valid pid in
* container pid namespace, and thus there is no guaranty that it would be
* reaped in time) is handled below.
@@ -1249,8 +1249,8 @@ static void ve_exit(struct task_struct *task)
* Clear task_ve if ve has no namespaces (ve is starting, stopped or
* stopping), or in case of "external" task.
*/
- if (!ve->ve_ns ||
- !task_pid_nr_ns(task, ve->ve_ns->pid_ns_for_children))
+ if (!ve->ve_nsproxy ||
+ !task_pid_nr_ns(task, ve->ve_nsproxy->pid_ns_for_children))
rcu_assign_pointer(task->task_ve, &ve0);
rcu_read_unlock();
}
@@ -1310,17 +1310,17 @@ static u64 ve_id_read(struct cgroup_subsys_state *css,
struct cftype *cft)
static int ve_id_write(struct cgroup_subsys_state *css, struct cftype *cft,
u64 val)
{
struct ve_struct *ve = css_to_ve(css);
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
int err = 0;
if (val <= 0 || val > INT_MAX)
return -EINVAL;
down_write(&ve->op_sem);
- ve_ns = rcu_dereference_protected(ve->ve_ns,
lockdep_is_held(&ve->op_sem));
+ ve_nsproxy = rcu_dereference_protected(ve->ve_nsproxy,
lockdep_is_held(&ve->op_sem));
/* FIXME: check veid is uniqul */
- if (VE_IS_RUNNING(ve) || ve_ns) {
+ if (VE_IS_RUNNING(ve) || ve_nsproxy) {
if (ve->veid != val)
err = -EBUSY;
} else
@@ -1355,7 +1355,7 @@ static int ve_pseudosuper_write(struct
cgroup_subsys_state *css, struct cftype *
return -EPERM;
down_write(&ve->op_sem);
- if (val && (VE_IS_RUNNING(ve) || ve->ve_ns)) {
+ if (val && (VE_IS_RUNNING(ve) || ve->ve_nsproxy)) {
up_write(&ve->op_sem);
return -EBUSY;
}
@@ -1391,7 +1391,7 @@ static int ve_features_write(struct cgroup_subsys_state
*css, struct cftype *cft
return -EPERM;
down_write(&ve->op_sem);
- if (VE_IS_RUNNING(ve) || ve->ve_ns) {
+ if (VE_IS_RUNNING(ve) || ve->ve_nsproxy) {
up_write(&ve->op_sem);
return -EBUSY;
}
@@ -1414,7 +1414,7 @@ static int ve_netns_max_nr_write(struct
cgroup_subsys_state *css, struct cftype
return -EPERM;
down_write(&ve->op_sem);
- if (VE_IS_RUNNING(ve) || ve->ve_ns) {
+ if (VE_IS_RUNNING(ve) || ve->ve_nsproxy) {
up_write(&ve->op_sem);
return -EBUSY;
}
@@ -1466,13 +1466,13 @@ static int ve_os_release_read(struct seq_file *sf, void
*v)
down_read(&ve->op_sem);
- if (!ve->ve_ns) {
+ if (!ve->ve_nsproxy) {
ret = -ENOENT;
goto up_opsem;
}
down_read(&uts_sem);
- seq_puts(sf, ve->ve_ns->uts_ns->name.release);
+ seq_puts(sf, ve->ve_nsproxy->uts_ns->name.release);
seq_putc(sf, '\n');
up_read(&uts_sem);
up_opsem:
@@ -1492,7 +1492,7 @@ static ssize_t ve_os_release_write(struct
kernfs_open_file *of, char *buf,
down_read(&ve->op_sem);
- if (!ve->ve_ns) {
+ if (!ve->ve_nsproxy) {
ret = -ENOENT;
goto up_opsem;
}
@@ -1504,7 +1504,7 @@ static ssize_t ve_os_release_write(struct
kernfs_open_file *of, char *buf,
}
down_write(&uts_sem);
- release = ve->ve_ns->uts_ns->name.release;
+ release = ve->ve_nsproxy->uts_ns->name.release;
strncpy(release, buf, __NEW_UTS_LEN);
release[__NEW_UTS_LEN] = '\0';
up_write(&uts_sem);
@@ -1523,13 +1523,13 @@ static u64 ve_pid_max_read_u64(struct
cgroup_subsys_state *css,
struct cftype *cft)
{
struct ve_struct *ve = css_to_ve(css);
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
u64 pid_max = 0;
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- if (ve_ns && ve_ns->pid_ns_for_children)
- pid_max = ve_ns->pid_ns_for_children->pid_max;
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ if (ve_nsproxy && ve_nsproxy->pid_ns_for_children)
+ pid_max = ve_nsproxy->pid_ns_for_children->pid_max;
rcu_read_unlock();
@@ -1542,22 +1542,22 @@ static int ve_pid_max_write_running_u64(struct
cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
struct ve_struct *ve = css_to_ve(css);
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
if (!ve_is_super(get_exec_env()) &&
!ve->is_pseudosuper)
return -EPERM;
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- if (!ve_ns || !ve_ns->pid_ns_for_children) {
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ if (!ve_nsproxy || !ve_nsproxy->pid_ns_for_children) {
return -EBUSY;
}
if (pid_max_min > val || pid_max_max < val) {
return -EINVAL;
}
- ve->ve_ns->pid_ns_for_children->pid_max = val;
+ ve->ve_nsproxy->pid_ns_for_children->pid_max = val;
rcu_read_unlock();
return 0;
@@ -1566,19 +1566,19 @@ static int ve_pid_max_write_running_u64(struct
cgroup_subsys_state *css,
static int ve_ts_read(struct seq_file *sf, void *v)
{
struct ve_struct *ve = css_to_ve(seq_css(sf));
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
struct time_namespace *time_ns;
struct timespec64 tp = ns_to_timespec64(0);
struct timespec64 *offset = NULL;
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- if (!ve_ns) {
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ if (!ve_nsproxy) {
rcu_read_unlock();
goto out;
}
- time_ns = get_time_ns(ve_ns->time_ns);
+ time_ns = get_time_ns(ve_nsproxy->time_ns);
rcu_read_unlock();
switch (seq_cft(sf)->private) {
@@ -1765,7 +1765,7 @@ static int ve_aio_max_nr_write(struct cgroup_subsys_state
*css,
return -EPERM;
down_write(&ve->op_sem);
- if (VE_IS_RUNNING(ve) || ve->ve_ns) {
+ if (VE_IS_RUNNING(ve) || ve->ve_nsproxy) {
up_write(&ve->op_sem);
return -EBUSY;
}
diff --git a/kernel/ve/vzevent.c b/kernel/ve/vzevent.c
index 4340babc3b672..bb8f22ea05049 100644
--- a/kernel/ve/vzevent.c
+++ b/kernel/ve/vzevent.c
@@ -112,7 +112,7 @@ static void ve_stop(void *data)
struct pid_namespace *pid_ns;
rcu_read_lock();
- pid_ns = rcu_dereference(ve->ve_ns)->pid_ns_for_children;
+ pid_ns = rcu_dereference(ve->ve_nsproxy)->pid_ns_for_children;
if (pid_ns->reboot == SIGHUP && reboot_event)
event = VE_EVENT_REBOOT;
rcu_read_unlock();
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 68f9efbaff29a..30c2599e3cc1c 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -175,7 +175,7 @@ static int cgrp_css_online(struct cgroup_subsys_state *css)
* Inherit prios from the parent cgroup in scope of ve init netns.
*/
if (!ve_is_super(ve)) {
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
struct net *net = NULL;
/*
@@ -185,9 +185,9 @@ static int cgrp_css_online(struct cgroup_subsys_state *css)
* cleanup_net().
*/
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- if (ve_ns)
- net = ve_ns->net_ns;
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ if (ve_nsproxy)
+ net = ve_nsproxy->net_ns;
rcu_read_unlock();
if (net && net != &init_net) {
@@ -222,12 +222,12 @@ static int read_priomap(struct seq_file *sf, void *v)
ve = get_curr_ve();
if (!ve_is_super(ve)) {
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- if (ve_ns)
- _net = get_net(ve_ns->net_ns);
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ if (ve_nsproxy)
+ _net = get_net(ve_nsproxy->net_ns);
rcu_read_unlock();
}
put_ve(ve);
@@ -258,12 +258,12 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
ve = get_curr_ve();
if (!ve_is_super(ve)) {
- struct nsproxy *ve_ns;
+ struct nsproxy *ve_nsproxy;
rcu_read_lock();
- ve_ns = rcu_dereference(ve->ve_ns);
- if (ve_ns)
- _net = get_net(ve_ns->net_ns);
+ ve_nsproxy = rcu_dereference(ve->ve_nsproxy);
+ if (ve_nsproxy)
+ _net = get_net(ve_nsproxy->net_ns);
rcu_read_unlock();
}
put_ve(ve);
_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel