[Devel] [PATCH v3 1/2] tcache: Refactor tcache_shrink_scan()

2018-01-23 Thread Kirill Tkhai
Make the function have the only return.

Signed-off-by: Kirill Tkhai 
---
 mm/tcache.c |   16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/mm/tcache.c b/mm/tcache.c
index a45af63fbd1b..b7756adda6d8 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -1200,24 +1200,24 @@ static DEFINE_PER_CPU(struct page * 
[TCACHE_SCAN_BATCH], tcache_page_vec);
 static unsigned long tcache_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
 {
-   struct page **pages = get_cpu_var(tcache_page_vec);
-   int nr_isolated, nr_reclaimed;
+   long nr_isolated, nr_reclaimed;
+   struct page **pages;
+
+   pages = get_cpu_var(tcache_page_vec);
 
if (WARN_ON(sc->nr_to_scan > TCACHE_SCAN_BATCH))
sc->nr_to_scan = TCACHE_SCAN_BATCH;
 
nr_isolated = tcache_lru_isolate(sc->nid, pages, sc->nr_to_scan);
if (!nr_isolated) {
-   put_cpu_var(tcache_page_vec);
-   return SHRINK_STOP;
+   nr_reclaimed = SHRINK_STOP;
+   goto out;
}
-
nr_reclaimed = tcache_reclaim_pages(pages, nr_isolated);
-   put_cpu_var(tcache_page_vec);
-
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_reclaimed;
-
+out:
+   put_cpu_var(tcache_page_vec);
return nr_reclaimed;
 }
 

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH v3 2/2] tcache: Fix race between tcache_invalidate_node() and tcache_attach_page()

2018-01-23 Thread Kirill Tkhai
tcache_attach_page()tcache_invalidate_node()
..  __tcache_lookup_node()
..  __tcache_delete_node()
Check node->invalidated ..
tcache_page_tree_insert()   ..
tcache_lru_add()..
..  tcache_invalidate_node_pages()
..node->invalidated = true

Check nr_page to determ if there is a race and repeat
node pages iterations if so.

v2: Move invalidate assignment down in tcache_invalidate_node_tree().
v3: Synchronize sched in case of race with tcache_shrink_count() too
to minimize repeats numbers.

Signed-off-by: Kirill Tkhai 
---
 mm/tcache.c |   32 +++-
 1 file changed, 23 insertions(+), 9 deletions(-)

diff --git a/mm/tcache.c b/mm/tcache.c
index b7756adda6d8..897ddfe02ab5 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -676,6 +676,7 @@ static void tcache_invalidate_node(struct tcache_pool *pool,
node = __tcache_lookup_node(&tree->root, key, &rb_link, &rb_parent);
if (node) {
tcache_hold_node(node);
+   node->invalidated = true;
__tcache_delete_node(&tree->root, node);
}
spin_unlock_irq(&tree->lock);
@@ -703,6 +704,7 @@ tcache_invalidate_node_tree(struct tcache_node_tree *tree)
WARN_ON(atomic_read(&node->kref.refcount) != 1);
WARN_ON(node->nr_pages == 0);
WARN_ON(node->invalidated);
+   node->invalidated = true;
 
tcache_hold_node(node);
tcache_invalidate_node_pages(node);
@@ -799,13 +801,16 @@ tcache_attach_page(struct tcache_node *node, pgoff_t 
index, struct page *page)
int err = 0;
 
tcache_init_page(page, node, index);
-
+   /*
+* Disabling of irqs implies rcu_read_lock_sched().
+* See tcache_invalidate_node_pages() for details.
+*/
spin_lock_irqsave(&node->tree_lock, flags);
err = tcache_page_tree_insert(node, index, page);
spin_unlock(&node->tree_lock);
if (!err)
tcache_lru_add(node->pool, page);
-   local_irq_restore(flags);
+   local_irq_restore(flags); /* Implies rcu_read_lock_sched() */
return err;
 }
 
@@ -911,17 +916,16 @@ static unsigned tcache_lookup(struct page **pages, struct 
tcache_node *node,
 static noinline_for_stack void
 tcache_invalidate_node_pages(struct tcache_node *node)
 {
+   bool repeat, synchronize_sched_once = true;
pgoff_t indices[TCACHE_PAGEVEC_SIZE];
struct page *pages[TCACHE_PAGEVEC_SIZE];
pgoff_t index = 0;
unsigned nr_pages;
-   bool repeat;
int i;
 
/*
 * First forbid new page insertions - see tcache_page_tree_replace.
 */
-   node->invalidated = true;
 again:
repeat = false;
while ((nr_pages = tcache_lookup(pages, node, index,
@@ -940,6 +944,7 @@ tcache_invalidate_node_pages(struct tcache_node *node)
local_irq_enable();
tcache_put_page(page);
} else {
+   /* Race with page_ref_freeze() */
local_irq_enable();
repeat = true;
}
@@ -948,9 +953,18 @@ tcache_invalidate_node_pages(struct tcache_node *node)
index++;
}
 
-   if (repeat) {
-   index = 0;
-   goto again;
+   if (synchronize_sched_once) {
+   synchronize_sched_once = false;
+   if (!repeat) {
+   /* Race with tcache_attach_page() */
+   spin_lock_irq(&node->tree_lock);
+   repeat = (node->nr_pages != 0);
+   spin_unlock_irq(&node->tree_lock);
+   }
+   if (repeat) {
+   synchronize_sched();
+   goto again;
+   }
}
 
WARN_ON(node->nr_pages != 0);
@@ -1203,7 +1217,7 @@ static unsigned long tcache_shrink_scan(struct shrinker 
*shrink,
long nr_isolated, nr_reclaimed;
struct page **pages;
 
-   pages = get_cpu_var(tcache_page_vec);
+   pages = get_cpu_var(tcache_page_vec); /* Implies rcu_read_lock_sched() 
*/
 
if (WARN_ON(sc->nr_to_scan > TCACHE_SCAN_BATCH))
sc->nr_to_scan = TCACHE_SCAN_BATCH;
@@ -1217,7 +1231,7 @@ static unsigned long tcache_shrink_scan(struct shrinker 
*shrink,
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_reclaimed;
 out:
-   put_cpu_var(tcache_page_vec);
+   put_cpu_var(tcache_page_vec); /* Implies rcu_read_unlock_sched() */
return nr_reclaimed;
 }
 

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Kirill Tkhai
Stanislav Kinsburskiy says:

"SPFS manager has a special "--exit-with-spfs" options, which is used by CRIU.
 The idea of the option is simple: force SPFS manager to exit, when it has some
 SPFS processes among its children (i.e. spfs was mounted at least once),
 but all these processes have exited for whatever reason (which usually happens
 when restore has failed and spfs mounts where unmounted).
 Although it works in overall (main SPFS manager process exits), its children
 (responsible to SPFS replacement) may wait on FUTEX for "release" command
 for corresponding SPFS mount and thus never stop until they are killed".

1 spfs-manager
2   \_ spfs
3   \_ spfs-manager
4   \_ spfs
5   \_ spfs-manager

2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
The patch makes spfs-manager 1 kill 3 in case of 2 exited.

https://jira.sw.ru/browse/PSBM-80055

Signed-off-by: Kirill Tkhai 
---
 manager/context.c |4 
 manager/spfs.c|1 +
 2 files changed, 5 insertions(+)

diff --git a/manager/context.c b/manager/context.c
index 1eb37c9..4464a23 100644
--- a/manager/context.c
+++ b/manager/context.c
@@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct spfs_manager_context_s 
*ctx,
/* SPFS master was killed. We need to release the reference */
spfs_release_mnt(info);
 
+   if (killed || WEXITSTATUS(status))
+   kill(info->replacer, SIGKILL);
+
info->dead = true;
del_spfs_info(ctx->spfs_mounts, info);
 
@@ -88,6 +91,7 @@ static void sigchld_handler(int signal, siginfo_t *siginfo, 
void *data)
 * corresponding fd.
 */
spfs_release_mnt(info);
+   info->replacer = -1;
} else {
info = find_spfs_by_pid(ctx->spfs_mounts, pid);
if (info) {
diff --git a/manager/spfs.c b/manager/spfs.c
index 3e0f667..0b94587 100644
--- a/manager/spfs.c
+++ b/manager/spfs.c
@@ -39,6 +39,7 @@ int create_spfs_info(const char *id,
pr_err("failed to allocate\n");
return -ENOMEM;
}
+   info->replacer = -1;
 
err = init_mount_info(&info->mnt, id, mountpoint, ns_mountpoint);
if (err)

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Kirill Tkhai
Missed header. Actually it's v2

On 23.01.2018 12:41, Kirill Tkhai wrote:
> Stanislav Kinsburskiy says:
> 
> "SPFS manager has a special "--exit-with-spfs" options, which is used by CRIU.
>  The idea of the option is simple: force SPFS manager to exit, when it has 
> some
>  SPFS processes among its children (i.e. spfs was mounted at least once),
>  but all these processes have exited for whatever reason (which usually 
> happens
>  when restore has failed and spfs mounts where unmounted).
>  Although it works in overall (main SPFS manager process exits), its children
>  (responsible to SPFS replacement) may wait on FUTEX for "release" command
>  for corresponding SPFS mount and thus never stop until they are killed".
> 
> 1 spfs-manager
> 2   \_ spfs
> 3   \_ spfs-manager
> 4   \_ spfs
> 5   \_ spfs-manager
> 
> 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
> The patch makes spfs-manager 1 kill 3 in case of 2 exited.
> 
> https://jira.sw.ru/browse/PSBM-80055
> 
> Signed-off-by: Kirill Tkhai 
> ---
>  manager/context.c |4 
>  manager/spfs.c|1 +
>  2 files changed, 5 insertions(+)
> 
> diff --git a/manager/context.c b/manager/context.c
> index 1eb37c9..4464a23 100644
> --- a/manager/context.c
> +++ b/manager/context.c
> @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
> spfs_manager_context_s *ctx,
>   /* SPFS master was killed. We need to release the reference */
>   spfs_release_mnt(info);
>  
> + if (killed || WEXITSTATUS(status))
> + kill(info->replacer, SIGKILL);
> +
>   info->dead = true;
>   del_spfs_info(ctx->spfs_mounts, info);
>  
> @@ -88,6 +91,7 @@ static void sigchld_handler(int signal, siginfo_t *siginfo, 
> void *data)
>* corresponding fd.
>*/
>   spfs_release_mnt(info);
> + info->replacer = -1;
>   } else {
>   info = find_spfs_by_pid(ctx->spfs_mounts, pid);
>   if (info) {
> diff --git a/manager/spfs.c b/manager/spfs.c
> index 3e0f667..0b94587 100644
> --- a/manager/spfs.c
> +++ b/manager/spfs.c
> @@ -39,6 +39,7 @@ int create_spfs_info(const char *id,
>   pr_err("failed to allocate\n");
>   return -ENOMEM;
>   }
> + info->replacer = -1;
>  
>   err = init_mount_info(&info->mnt, id, mountpoint, ns_mountpoint);
>   if (err)
> 
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Stanislav Kinsburskiy
Please, see a couple of nits below

23.01.2018 10:41, Kirill Tkhai пишет:
> Stanislav Kinsburskiy says:
> 
> "SPFS manager has a special "--exit-with-spfs" options, which is used by CRIU.
>  The idea of the option is simple: force SPFS manager to exit, when it has 
> some
>  SPFS processes among its children (i.e. spfs was mounted at least once),
>  but all these processes have exited for whatever reason (which usually 
> happens
>  when restore has failed and spfs mounts where unmounted).
>  Although it works in overall (main SPFS manager process exits), its children
>  (responsible to SPFS replacement) may wait on FUTEX for "release" command
>  for corresponding SPFS mount and thus never stop until they are killed".
> 
> 1 spfs-manager
> 2   \_ spfs
> 3   \_ spfs-manager
> 4   \_ spfs
> 5   \_ spfs-manager
> 
> 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
> The patch makes spfs-manager 1 kill 3 in case of 2 exited.
> 
> https://jira.sw.ru/browse/PSBM-80055
> 
> Signed-off-by: Kirill Tkhai 
> ---
>  manager/context.c |4 
>  manager/spfs.c|1 +
>  2 files changed, 5 insertions(+)
> 
> diff --git a/manager/context.c b/manager/context.c
> index 1eb37c9..4464a23 100644
> --- a/manager/context.c
> +++ b/manager/context.c
> @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
> spfs_manager_context_s *ctx,
>   /* SPFS master was killed. We need to release the reference */
>   spfs_release_mnt(info);
>  
> + if (killed || WEXITSTATUS(status))
> + kill(info->replacer, SIGKILL);
> +

There is "if (killed)" check above.
Could you please move this hunk there?

>   info->dead = true;
>   del_spfs_info(ctx->spfs_mounts, info);
>  
> @@ -88,6 +91,7 @@ static void sigchld_handler(int signal, siginfo_t *siginfo, 
> void *data)
>* corresponding fd.
>*/
>   spfs_release_mnt(info);
> + info->replacer = -1;
>   } else {
>   info = find_spfs_by_pid(ctx->spfs_mounts, pid);
>   if (info) {
> diff --git a/manager/spfs.c b/manager/spfs.c
> index 3e0f667..0b94587 100644
> --- a/manager/spfs.c
> +++ b/manager/spfs.c
> @@ -39,6 +39,7 @@ int create_spfs_info(const char *id,
>   pr_err("failed to allocate\n");
>   return -ENOMEM;
>   }
> + info->replacer = -1;
>  

Could you, please, move it down to the end of the function where unconditional 
actions take place?

>   err = init_mount_info(&info->mnt, id, mountpoint, ns_mountpoint);
>   if (err)
> 
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Kirill Tkhai
On 23.01.2018 12:48, Stanislav Kinsburskiy wrote:
> Please, see a couple of nits below
> 
> 23.01.2018 10:41, Kirill Tkhai пишет:
>> Stanislav Kinsburskiy says:
>>
>> "SPFS manager has a special "--exit-with-spfs" options, which is used by 
>> CRIU.
>>  The idea of the option is simple: force SPFS manager to exit, when it has 
>> some
>>  SPFS processes among its children (i.e. spfs was mounted at least once),
>>  but all these processes have exited for whatever reason (which usually 
>> happens
>>  when restore has failed and spfs mounts where unmounted).
>>  Although it works in overall (main SPFS manager process exits), its children
>>  (responsible to SPFS replacement) may wait on FUTEX for "release" command
>>  for corresponding SPFS mount and thus never stop until they are killed".
>>
>> 1 spfs-manager
>> 2   \_ spfs
>> 3   \_ spfs-manager
>> 4   \_ spfs
>> 5   \_ spfs-manager
>>
>> 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
>> The patch makes spfs-manager 1 kill 3 in case of 2 exited.
>>
>> https://jira.sw.ru/browse/PSBM-80055
>>
>> Signed-off-by: Kirill Tkhai 
>> ---
>>  manager/context.c |4 
>>  manager/spfs.c|1 +
>>  2 files changed, 5 insertions(+)
>>
>> diff --git a/manager/context.c b/manager/context.c
>> index 1eb37c9..4464a23 100644
>> --- a/manager/context.c
>> +++ b/manager/context.c
>> @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
>> spfs_manager_context_s *ctx,
>>  /* SPFS master was killed. We need to release the reference */
>>  spfs_release_mnt(info);
>>  
>> +if (killed || WEXITSTATUS(status))
>> +kill(info->replacer, SIGKILL);
>> +
> 
> There is "if (killed)" check above.
> Could you please move this hunk there?

There is logical OR (||) in the hunk. How should I move it to unconditional 
check?

>>  info->dead = true;
>>  del_spfs_info(ctx->spfs_mounts, info);
>>  
>> @@ -88,6 +91,7 @@ static void sigchld_handler(int signal, siginfo_t 
>> *siginfo, void *data)
>>   * corresponding fd.
>>   */
>>  spfs_release_mnt(info);
>> +info->replacer = -1;
>>  } else {
>>  info = find_spfs_by_pid(ctx->spfs_mounts, pid);
>>  if (info) {
>> diff --git a/manager/spfs.c b/manager/spfs.c
>> index 3e0f667..0b94587 100644
>> --- a/manager/spfs.c
>> +++ b/manager/spfs.c
>> @@ -39,6 +39,7 @@ int create_spfs_info(const char *id,
>>  pr_err("failed to allocate\n");
>>  return -ENOMEM;
>>  }
>> +info->replacer = -1;
>>  
> 
> Could you, please, move it down to the end of the function where 
> unconditional actions take place?

No, problem

>>  err = init_mount_info(&info->mnt, id, mountpoint, ns_mountpoint);
>>  if (err)
>>
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Kirill Tkhai
On 23.01.2018 13:07, Stanislav Kinsburskiy wrote:
> 
> 
> 23.01.2018 10:51, Kirill Tkhai пишет:
>> On 23.01.2018 12:48, Stanislav Kinsburskiy wrote:
>>> Please, see a couple of nits below
>>>
>>> 23.01.2018 10:41, Kirill Tkhai пишет:
 Stanislav Kinsburskiy says:

 "SPFS manager has a special "--exit-with-spfs" options, which is used by 
 CRIU.
  The idea of the option is simple: force SPFS manager to exit, when it has 
 some
  SPFS processes among its children (i.e. spfs was mounted at least once),
  but all these processes have exited for whatever reason (which usually 
 happens
  when restore has failed and spfs mounts where unmounted).
  Although it works in overall (main SPFS manager process exits), its 
 children
  (responsible to SPFS replacement) may wait on FUTEX for "release" command
  for corresponding SPFS mount and thus never stop until they are killed".

 1 spfs-manager
 2   \_ spfs
 3   \_ spfs-manager
 4   \_ spfs
 5   \_ spfs-manager

 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
 The patch makes spfs-manager 1 kill 3 in case of 2 exited.

 https://jira.sw.ru/browse/PSBM-80055

 Signed-off-by: Kirill Tkhai 
 ---
  manager/context.c |4 
  manager/spfs.c|1 +
  2 files changed, 5 insertions(+)

 diff --git a/manager/context.c b/manager/context.c
 index 1eb37c9..4464a23 100644
 --- a/manager/context.c
 +++ b/manager/context.c
 @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
 spfs_manager_context_s *ctx,
/* SPFS master was killed. We need to release the reference */
spfs_release_mnt(info);
  
 +  if (killed || WEXITSTATUS(status))
 +  kill(info->replacer, SIGKILL);
 +
>>>
>>> There is "if (killed)" check above.
>>> Could you please move this hunk there?
>>
>> There is logical OR (||) in the hunk. How should I move it to unconditional 
>> check?
>>
> 
> Ah, ok. Then let the check be as it is.
> Could you please add warning message for this kill with the process pid being 
> killed and the reason why (spfs was killed of exited with error)?

Ok
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Stanislav Kinsburskiy


23.01.2018 10:51, Kirill Tkhai пишет:
> On 23.01.2018 12:48, Stanislav Kinsburskiy wrote:
>> Please, see a couple of nits below
>>
>> 23.01.2018 10:41, Kirill Tkhai пишет:
>>> Stanislav Kinsburskiy says:
>>>
>>> "SPFS manager has a special "--exit-with-spfs" options, which is used by 
>>> CRIU.
>>>  The idea of the option is simple: force SPFS manager to exit, when it has 
>>> some
>>>  SPFS processes among its children (i.e. spfs was mounted at least once),
>>>  but all these processes have exited for whatever reason (which usually 
>>> happens
>>>  when restore has failed and spfs mounts where unmounted).
>>>  Although it works in overall (main SPFS manager process exits), its 
>>> children
>>>  (responsible to SPFS replacement) may wait on FUTEX for "release" command
>>>  for corresponding SPFS mount and thus never stop until they are killed".
>>>
>>> 1 spfs-manager
>>> 2   \_ spfs
>>> 3   \_ spfs-manager
>>> 4   \_ spfs
>>> 5   \_ spfs-manager
>>>
>>> 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
>>> The patch makes spfs-manager 1 kill 3 in case of 2 exited.
>>>
>>> https://jira.sw.ru/browse/PSBM-80055
>>>
>>> Signed-off-by: Kirill Tkhai 
>>> ---
>>>  manager/context.c |4 
>>>  manager/spfs.c|1 +
>>>  2 files changed, 5 insertions(+)
>>>
>>> diff --git a/manager/context.c b/manager/context.c
>>> index 1eb37c9..4464a23 100644
>>> --- a/manager/context.c
>>> +++ b/manager/context.c
>>> @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
>>> spfs_manager_context_s *ctx,
>>> /* SPFS master was killed. We need to release the reference */
>>> spfs_release_mnt(info);
>>>  
>>> +   if (killed || WEXITSTATUS(status))
>>> +   kill(info->replacer, SIGKILL);
>>> +
>>
>> There is "if (killed)" check above.
>> Could you please move this hunk there?
> 
> There is logical OR (||) in the hunk. How should I move it to unconditional 
> check?
> 

Ah, ok. Then let the check be as it is.
Could you please add warning message for this kill with the process pid being 
killed and the reason why (spfs was killed of exited with error)?
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Kirill Tkhai
On 23.01.2018 13:07, Stanislav Kinsburskiy wrote:
> 
> 
> 23.01.2018 10:51, Kirill Tkhai пишет:
>> On 23.01.2018 12:48, Stanislav Kinsburskiy wrote:
>>> Please, see a couple of nits below
>>>
>>> 23.01.2018 10:41, Kirill Tkhai пишет:
 Stanislav Kinsburskiy says:

 "SPFS manager has a special "--exit-with-spfs" options, which is used by 
 CRIU.
  The idea of the option is simple: force SPFS manager to exit, when it has 
 some
  SPFS processes among its children (i.e. spfs was mounted at least once),
  but all these processes have exited for whatever reason (which usually 
 happens
  when restore has failed and spfs mounts where unmounted).
  Although it works in overall (main SPFS manager process exits), its 
 children
  (responsible to SPFS replacement) may wait on FUTEX for "release" command
  for corresponding SPFS mount and thus never stop until they are killed".

 1 spfs-manager
 2   \_ spfs
 3   \_ spfs-manager
 4   \_ spfs
 5   \_ spfs-manager

 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
 The patch makes spfs-manager 1 kill 3 in case of 2 exited.

 https://jira.sw.ru/browse/PSBM-80055

 Signed-off-by: Kirill Tkhai 
 ---
  manager/context.c |4 
  manager/spfs.c|1 +
  2 files changed, 5 insertions(+)

 diff --git a/manager/context.c b/manager/context.c
 index 1eb37c9..4464a23 100644
 --- a/manager/context.c
 +++ b/manager/context.c
 @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
 spfs_manager_context_s *ctx,
/* SPFS master was killed. We need to release the reference */
spfs_release_mnt(info);
  
 +  if (killed || WEXITSTATUS(status))
 +  kill(info->replacer, SIGKILL);
 +
>>>
>>> There is "if (killed)" check above.
>>> Could you please move this hunk there?
>>
>> There is logical OR (||) in the hunk. How should I move it to unconditional 
>> check?
>>
> 
> Ah, ok. Then let the check be as it is.
> Could you please add warning message for this kill with the process pid being 
> killed and the reason why (spfs was killed of exited with error)?

Maybe we should call spfs_release_mnt() in case of exit status != 0 too?
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Stanislav Kinsburskiy


23.01.2018 11:09, Kirill Tkhai пишет:
> On 23.01.2018 13:07, Stanislav Kinsburskiy wrote:
>>
>>
>> 23.01.2018 10:51, Kirill Tkhai пишет:
>>> On 23.01.2018 12:48, Stanislav Kinsburskiy wrote:
 Please, see a couple of nits below

 23.01.2018 10:41, Kirill Tkhai пишет:
> Stanislav Kinsburskiy says:
>
> "SPFS manager has a special "--exit-with-spfs" options, which is used by 
> CRIU.
>  The idea of the option is simple: force SPFS manager to exit, when it 
> has some
>  SPFS processes among its children (i.e. spfs was mounted at least once),
>  but all these processes have exited for whatever reason (which usually 
> happens
>  when restore has failed and spfs mounts where unmounted).
>  Although it works in overall (main SPFS manager process exits), its 
> children
>  (responsible to SPFS replacement) may wait on FUTEX for "release" command
>  for corresponding SPFS mount and thus never stop until they are killed".
>
> 1 spfs-manager
> 2   \_ spfs
> 3   \_ spfs-manager
> 4   \_ spfs
> 5   \_ spfs-manager
>
> 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
> The patch makes spfs-manager 1 kill 3 in case of 2 exited.
>
> https://jira.sw.ru/browse/PSBM-80055
>
> Signed-off-by: Kirill Tkhai 
> ---
>  manager/context.c |4 
>  manager/spfs.c|1 +
>  2 files changed, 5 insertions(+)
>
> diff --git a/manager/context.c b/manager/context.c
> index 1eb37c9..4464a23 100644
> --- a/manager/context.c
> +++ b/manager/context.c
> @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
> spfs_manager_context_s *ctx,
>   /* SPFS master was killed. We need to release the reference */
>   spfs_release_mnt(info);
>  
> + if (killed || WEXITSTATUS(status))
> + kill(info->replacer, SIGKILL);
> +

 There is "if (killed)" check above.
 Could you please move this hunk there?
>>>
>>> There is logical OR (||) in the hunk. How should I move it to unconditional 
>>> check?
>>>
>>
>> Ah, ok. Then let the check be as it is.
>> Could you please add warning message for this kill with the process pid 
>> being killed and the reason why (spfs was killed of exited with error)?
> 
> Maybe we should call spfs_release_mnt() in case of exit status != 0 too?
> 

Well, this makes sense.
If spfs exited with non-zero result (either it was killed or exited due to some 
error), then there is no need in replacer. And there is no need in the 
reference.
So, probably this check you proposed should be used for both 
spfs_release_mnt(info) and kill(info->replacer, SIGKILL).



___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Kirill Tkhai
On 23.01.2018 13:14, Stanislav Kinsburskiy wrote:
> 
> 
> 23.01.2018 11:09, Kirill Tkhai пишет:
>> On 23.01.2018 13:07, Stanislav Kinsburskiy wrote:
>>>
>>>
>>> 23.01.2018 10:51, Kirill Tkhai пишет:
 On 23.01.2018 12:48, Stanislav Kinsburskiy wrote:
> Please, see a couple of nits below
>
> 23.01.2018 10:41, Kirill Tkhai пишет:
>> Stanislav Kinsburskiy says:
>>
>> "SPFS manager has a special "--exit-with-spfs" options, which is used by 
>> CRIU.
>>  The idea of the option is simple: force SPFS manager to exit, when it 
>> has some
>>  SPFS processes among its children (i.e. spfs was mounted at least once),
>>  but all these processes have exited for whatever reason (which usually 
>> happens
>>  when restore has failed and spfs mounts where unmounted).
>>  Although it works in overall (main SPFS manager process exits), its 
>> children
>>  (responsible to SPFS replacement) may wait on FUTEX for "release" 
>> command
>>  for corresponding SPFS mount and thus never stop until they are killed".
>>
>> 1 spfs-manager
>> 2   \_ spfs
>> 3   \_ spfs-manager
>> 4   \_ spfs
>> 5   \_ spfs-manager
>>
>> 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
>> The patch makes spfs-manager 1 kill 3 in case of 2 exited.
>>
>> https://jira.sw.ru/browse/PSBM-80055
>>
>> Signed-off-by: Kirill Tkhai 
>> ---
>>  manager/context.c |4 
>>  manager/spfs.c|1 +
>>  2 files changed, 5 insertions(+)
>>
>> diff --git a/manager/context.c b/manager/context.c
>> index 1eb37c9..4464a23 100644
>> --- a/manager/context.c
>> +++ b/manager/context.c
>> @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
>> spfs_manager_context_s *ctx,
>>  /* SPFS master was killed. We need to release the 
>> reference */
>>  spfs_release_mnt(info);
>>  
>> +if (killed || WEXITSTATUS(status))
>> +kill(info->replacer, SIGKILL);
>> +
>
> There is "if (killed)" check above.
> Could you please move this hunk there?

 There is logical OR (||) in the hunk. How should I move it to 
 unconditional check?

>>>
>>> Ah, ok. Then let the check be as it is.
>>> Could you please add warning message for this kill with the process pid 
>>> being killed and the reason why (spfs was killed of exited with error)?
>>
>> Maybe we should call spfs_release_mnt() in case of exit status != 0 too?
>>
> 
> Well, this makes sense.
> If spfs exited with non-zero result (either it was killed or exited due to 
> some error), then there is no need in replacer. And there is no need in the 
> reference.
> So, probably this check you proposed should be used for both 
> spfs_release_mnt(info) and kill(info->replacer, SIGKILL).

Are you OK with this change sent in the only patch, or should we use one more 
patch?
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Stanislav Kinsburskiy


23.01.2018 11:17, Kirill Tkhai пишет:
> On 23.01.2018 13:14, Stanislav Kinsburskiy wrote:
>>
>>
>> 23.01.2018 11:09, Kirill Tkhai пишет:
>>> On 23.01.2018 13:07, Stanislav Kinsburskiy wrote:


 23.01.2018 10:51, Kirill Tkhai пишет:
> On 23.01.2018 12:48, Stanislav Kinsburskiy wrote:
>> Please, see a couple of nits below
>>
>> 23.01.2018 10:41, Kirill Tkhai пишет:
>>> Stanislav Kinsburskiy says:
>>>
>>> "SPFS manager has a special "--exit-with-spfs" options, which is used 
>>> by CRIU.
>>>  The idea of the option is simple: force SPFS manager to exit, when it 
>>> has some
>>>  SPFS processes among its children (i.e. spfs was mounted at least 
>>> once),
>>>  but all these processes have exited for whatever reason (which usually 
>>> happens
>>>  when restore has failed and spfs mounts where unmounted).
>>>  Although it works in overall (main SPFS manager process exits), its 
>>> children
>>>  (responsible to SPFS replacement) may wait on FUTEX for "release" 
>>> command
>>>  for corresponding SPFS mount and thus never stop until they are 
>>> killed".
>>>
>>> 1 spfs-manager
>>> 2   \_ spfs
>>> 3   \_ spfs-manager
>>> 4   \_ spfs
>>> 5   \_ spfs-manager
>>>
>>> 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
>>> The patch makes spfs-manager 1 kill 3 in case of 2 exited.
>>>
>>> https://jira.sw.ru/browse/PSBM-80055
>>>
>>> Signed-off-by: Kirill Tkhai 
>>> ---
>>>  manager/context.c |4 
>>>  manager/spfs.c|1 +
>>>  2 files changed, 5 insertions(+)
>>>
>>> diff --git a/manager/context.c b/manager/context.c
>>> index 1eb37c9..4464a23 100644
>>> --- a/manager/context.c
>>> +++ b/manager/context.c
>>> @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
>>> spfs_manager_context_s *ctx,
>>> /* SPFS master was killed. We need to release the 
>>> reference */
>>> spfs_release_mnt(info);
>>>  
>>> +   if (killed || WEXITSTATUS(status))
>>> +   kill(info->replacer, SIGKILL);
>>> +
>>
>> There is "if (killed)" check above.
>> Could you please move this hunk there?
>
> There is logical OR (||) in the hunk. How should I move it to 
> unconditional check?
>

 Ah, ok. Then let the check be as it is.
 Could you please add warning message for this kill with the process pid 
 being killed and the reason why (spfs was killed of exited with error)?
>>>
>>> Maybe we should call spfs_release_mnt() in case of exit status != 0 too?
>>>
>>
>> Well, this makes sense.
>> If spfs exited with non-zero result (either it was killed or exited due to 
>> some error), then there is no need in replacer. And there is no need in the 
>> reference.
>> So, probably this check you proposed should be used for both 
>> spfs_release_mnt(info) and kill(info->replacer, SIGKILL).
> 
> Are you OK with this change sent in the only patch, or should we use one more 
> patch?
> 

I think one patch is OK. Only mention the change explicitly in the patch 
description, please.
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Kirill Tkhai
On 23.01.2018 13:18, Stanislav Kinsburskiy wrote:
> 
> 
> 23.01.2018 11:17, Kirill Tkhai пишет:
>> On 23.01.2018 13:14, Stanislav Kinsburskiy wrote:
>>>
>>>
>>> 23.01.2018 11:09, Kirill Tkhai пишет:
 On 23.01.2018 13:07, Stanislav Kinsburskiy wrote:
>
>
> 23.01.2018 10:51, Kirill Tkhai пишет:
>> On 23.01.2018 12:48, Stanislav Kinsburskiy wrote:
>>> Please, see a couple of nits below
>>>
>>> 23.01.2018 10:41, Kirill Tkhai пишет:
 Stanislav Kinsburskiy says:

 "SPFS manager has a special "--exit-with-spfs" options, which is used 
 by CRIU.
  The idea of the option is simple: force SPFS manager to exit, when it 
 has some
  SPFS processes among its children (i.e. spfs was mounted at least 
 once),
  but all these processes have exited for whatever reason (which 
 usually happens
  when restore has failed and spfs mounts where unmounted).
  Although it works in overall (main SPFS manager process exits), its 
 children
  (responsible to SPFS replacement) may wait on FUTEX for "release" 
 command
  for corresponding SPFS mount and thus never stop until they are 
 killed".

 1 spfs-manager
 2   \_ spfs
 3   \_ spfs-manager
 4   \_ spfs
 5   \_ spfs-manager

 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
 The patch makes spfs-manager 1 kill 3 in case of 2 exited.

 https://jira.sw.ru/browse/PSBM-80055

 Signed-off-by: Kirill Tkhai 
 ---
  manager/context.c |4 
  manager/spfs.c|1 +
  2 files changed, 5 insertions(+)

 diff --git a/manager/context.c b/manager/context.c
 index 1eb37c9..4464a23 100644
 --- a/manager/context.c
 +++ b/manager/context.c
 @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
 spfs_manager_context_s *ctx,
/* SPFS master was killed. We need to release the 
 reference */
spfs_release_mnt(info);
  
 +  if (killed || WEXITSTATUS(status))
 +  kill(info->replacer, SIGKILL);
 +
>>>
>>> There is "if (killed)" check above.
>>> Could you please move this hunk there?
>>
>> There is logical OR (||) in the hunk. How should I move it to 
>> unconditional check?
>>
>
> Ah, ok. Then let the check be as it is.
> Could you please add warning message for this kill with the process pid 
> being killed and the reason why (spfs was killed of exited with error)?

 Maybe we should call spfs_release_mnt() in case of exit status != 0 too?

>>>
>>> Well, this makes sense.
>>> If spfs exited with non-zero result (either it was killed or exited due to 
>>> some error), then there is no need in replacer. And there is no need in the 
>>> reference.
>>> So, probably this check you proposed should be used for both 
>>> spfs_release_mnt(info) and kill(info->replacer, SIGKILL).
>>
>> Are you OK with this change sent in the only patch, or should we use one 
>> more patch?
>>
> 
> I think one patch is OK. Only mention the change explicitly in the patch 
> description, please.

Ok, then what about spfs_cleanup_env(info, killed)? It also has killed argument
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Stanislav Kinsburskiy


23.01.2018 11:19, Kirill Tkhai пишет:
> On 23.01.2018 13:18, Stanislav Kinsburskiy wrote:
>>
>>
>> 23.01.2018 11:17, Kirill Tkhai пишет:
>>> On 23.01.2018 13:14, Stanislav Kinsburskiy wrote:


 23.01.2018 11:09, Kirill Tkhai пишет:
> On 23.01.2018 13:07, Stanislav Kinsburskiy wrote:
>>
>>
>> 23.01.2018 10:51, Kirill Tkhai пишет:
>>> On 23.01.2018 12:48, Stanislav Kinsburskiy wrote:
 Please, see a couple of nits below

 23.01.2018 10:41, Kirill Tkhai пишет:
> Stanislav Kinsburskiy says:
>
> "SPFS manager has a special "--exit-with-spfs" options, which is used 
> by CRIU.
>  The idea of the option is simple: force SPFS manager to exit, when 
> it has some
>  SPFS processes among its children (i.e. spfs was mounted at least 
> once),
>  but all these processes have exited for whatever reason (which 
> usually happens
>  when restore has failed and spfs mounts where unmounted).
>  Although it works in overall (main SPFS manager process exits), its 
> children
>  (responsible to SPFS replacement) may wait on FUTEX for "release" 
> command
>  for corresponding SPFS mount and thus never stop until they are 
> killed".
>
> 1 spfs-manager
> 2   \_ spfs
> 3   \_ spfs-manager
> 4   \_ spfs
> 5   \_ spfs-manager
>
> 2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
> The patch makes spfs-manager 1 kill 3 in case of 2 exited.
>
> https://jira.sw.ru/browse/PSBM-80055
>
> Signed-off-by: Kirill Tkhai 
> ---
>  manager/context.c |4 
>  manager/spfs.c|1 +
>  2 files changed, 5 insertions(+)
>
> diff --git a/manager/context.c b/manager/context.c
> index 1eb37c9..4464a23 100644
> --- a/manager/context.c
> +++ b/manager/context.c
> @@ -53,6 +53,9 @@ static void cleanup_spfs_mount(struct 
> spfs_manager_context_s *ctx,
>   /* SPFS master was killed. We need to release the 
> reference */
>   spfs_release_mnt(info);
>  
> + if (killed || WEXITSTATUS(status))
> + kill(info->replacer, SIGKILL);
> +

 There is "if (killed)" check above.
 Could you please move this hunk there?
>>>
>>> There is logical OR (||) in the hunk. How should I move it to 
>>> unconditional check?
>>>
>>
>> Ah, ok. Then let the check be as it is.
>> Could you please add warning message for this kill with the process pid 
>> being killed and the reason why (spfs was killed of exited with error)?
>
> Maybe we should call spfs_release_mnt() in case of exit status != 0 too?
>

 Well, this makes sense.
 If spfs exited with non-zero result (either it was killed or exited due to 
 some error), then there is no need in replacer. And there is no need in 
 the reference.
 So, probably this check you proposed should be used for both 
 spfs_release_mnt(info) and kill(info->replacer, SIGKILL).
>>>
>>> Are you OK with this change sent in the only patch, or should we use one 
>>> more patch?
>>>
>>
>> I think one patch is OK. Only mention the change explicitly in the patch 
>> description, please.
> 
> Ok, then what about spfs_cleanup_env(info, killed)? It also has killed 
> argument
> 

Well, this is also true...
Maybe the right thing here would be to have 2 patches, and the first one should 
be something like this (also rename variable to reflect the change):


diff --git a/manager/context.c b/manager/context.c
index 1eb37c9..756971b 100644
--- a/manager/context.c
+++ b/manager/context.c
@@ -45,12 +45,12 @@ const char *mgr_ovz_id(void)
 static void cleanup_spfs_mount(struct spfs_manager_context_s *ctx,
   struct spfs_info_s *info, int status)
 {
-   bool killed = WIFSIGNALED(status);
+   bool failed = WIFSIGNALED(status) || WEXITSTATUS(status);
 
pr_debug("removing info %s from the list\n", info->mnt.id);
 
-   if (killed)
-   /* SPFS master was killed. We need to release the reference */
+   if (failed)
+   /* SPFS master has failed. We need to release the reference */
spfs_release_mnt(info);
 
info->dead = true;
@@ -59,7 +59,7 @@ static void cleanup_spfs_mount(struct spfs_manager_context_s 
*ctx,
if (unlink(info->socket_path))
pr_perror("failed to unlink %s", info->socket_path);
 
-   spfs_cleanup_env(info, killed);
+   spfs_cleanup_env(info, failed);
 
close_namespaces(info->ns_fds);
 }
diff --git a/manager/spfs.c b/manager/spfs.c
index 3e0f667..99845b1 100644
--- a/manager/spfs.c
+++ b/manager/spfs.c
@@ -409,9 +409,9 @@ int spfs_p

[Devel] [PATCH v3 1/2] spfs: Handle non-zero exit status in cleanup_spfs_mount()

2018-01-23 Thread Kirill Tkhai
Consider non-zero exit status of spfs similar to killed
status and do the same cleanups.

v3: New

Signed-off-by: Kirill Tkhai 
---
 manager/context.c |8 
 manager/spfs.c|8 
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/manager/context.c b/manager/context.c
index 1eb37c9..73d5ada 100644
--- a/manager/context.c
+++ b/manager/context.c
@@ -45,12 +45,12 @@ const char *mgr_ovz_id(void)
 static void cleanup_spfs_mount(struct spfs_manager_context_s *ctx,
   struct spfs_info_s *info, int status)
 {
-   bool killed = WIFSIGNALED(status);
+   bool failed = WIFSIGNALED(status) || !!WEXITSTATUS(status);
 
pr_debug("removing info %s from the list\n", info->mnt.id);
 
-   if (killed)
-   /* SPFS master was killed. We need to release the reference */
+   if (failed)
+   /* SPFS master was failed. We need to release the reference */
spfs_release_mnt(info);
 
info->dead = true;
@@ -59,7 +59,7 @@ static void cleanup_spfs_mount(struct spfs_manager_context_s 
*ctx,
if (unlink(info->socket_path))
pr_perror("failed to unlink %s", info->socket_path);
 
-   spfs_cleanup_env(info, killed);
+   spfs_cleanup_env(info, failed);
 
close_namespaces(info->ns_fds);
 }
diff --git a/manager/spfs.c b/manager/spfs.c
index 3e0f667..99845b1 100644
--- a/manager/spfs.c
+++ b/manager/spfs.c
@@ -409,9 +409,9 @@ int spfs_prepare_env(struct spfs_info_s *info, const char 
*proxy_dir)
return err ? err : res;
 }
 
-static int __spfs_cleanup_env(struct spfs_info_s *info, bool killed)
+static int __spfs_cleanup_env(struct spfs_info_s *info, bool failed)
 {
-   if (killed && umount(info->work_dir)) {
+   if (failed && umount(info->work_dir)) {
pr_perror("failed to umount %s", info->work_dir);
return -errno;
}
@@ -423,7 +423,7 @@ static int __spfs_cleanup_env(struct spfs_info_s *info, 
bool killed)
return 0;
 }
 
-int spfs_cleanup_env(struct spfs_info_s *info, bool killed)
+int spfs_cleanup_env(struct spfs_info_s *info, bool failed)
 {
int err, res;
unsigned orig_ns_mask;
@@ -432,7 +432,7 @@ int spfs_cleanup_env(struct spfs_info_s *info, bool killed)
if (res)
return res;
 
-   err = __spfs_cleanup_env(info, killed);
+   err = __spfs_cleanup_env(info, failed);
 
res = leave_spfs_context(info, orig_ns_mask);
 

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH v3 2/2] spfs: Main process wakes and kills its children on exit

2018-01-23 Thread Kirill Tkhai
Stanislav Kinsburskiy says:

"SPFS manager has a special "--exit-with-spfs" options, which is used by CRIU.
 The idea of the option is simple: force SPFS manager to exit, when it has some
 SPFS processes among its children (i.e. spfs was mounted at least once),
 but all these processes have exited for whatever reason (which usually happens
 when restore has failed and spfs mounts where unmounted).
 Although it works in overall (main SPFS manager process exits), its children
 (responsible to SPFS replacement) may wait on FUTEX for "release" command
 for corresponding SPFS mount and thus never stop until they are killed".

1 spfs-manager
2   \_ spfs
3   \_ spfs-manager
4   \_ spfs
5   \_ spfs-manager

2 and 3 are pair of a mount, and 4 and 5 are pair of another mount.
The patch makes spfs-manager 1 kill 3 in case of 2 exited.

https://jira.sw.ru/browse/PSBM-80055

v2: Kill only single replacer, not all of them.
v3: Print debug msg about kill().

Signed-off-by: Kirill Tkhai 
---
 manager/context.c |9 +++--
 manager/spfs.c|1 +
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/manager/context.c b/manager/context.c
index 73d5ada..54667bb 100644
--- a/manager/context.c
+++ b/manager/context.c
@@ -47,11 +47,15 @@ static void cleanup_spfs_mount(struct 
spfs_manager_context_s *ctx,
 {
bool failed = WIFSIGNALED(status) || !!WEXITSTATUS(status);
 
-   pr_debug("removing info %s from the list\n", info->mnt.id);
+   pr_debug("removing info %s from the list (replacer pid %d)\n",
+ info->mnt.id, info->replacer);
 
-   if (failed)
+   if (failed) {
/* SPFS master was failed. We need to release the reference */
spfs_release_mnt(info);
+   if (info->replacer > 0 && kill(info->replacer, SIGKILL))
+   pr_perror("Failed to kill replacer");
+   }
 
info->dead = true;
del_spfs_info(ctx->spfs_mounts, info);
@@ -88,6 +92,7 @@ static void sigchld_handler(int signal, siginfo_t *siginfo, 
void *data)
 * corresponding fd.
 */
spfs_release_mnt(info);
+   info->replacer = -1;
} else {
info = find_spfs_by_pid(ctx->spfs_mounts, pid);
if (info) {
diff --git a/manager/spfs.c b/manager/spfs.c
index 99845b1..7ee582f 100644
--- a/manager/spfs.c
+++ b/manager/spfs.c
@@ -107,6 +107,7 @@ int create_spfs_info(const char *id,
INIT_LIST_HEAD(&info->processes);
 
info->mode = SPFS_REPLACE_MODE_HOLD;
+   info->replacer = -1;
 
*i = info;
 

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH v3 1/2] spfs: Handle non-zero exit status in cleanup_spfs_mount()

2018-01-23 Thread Stanislav Kinsburskiy
Looks good to me

Acked-by: Stanislav Kinsburskiy 


23.01.2018 11:43, Kirill Tkhai пишет:
> Consider non-zero exit status of spfs similar to killed
> status and do the same cleanups.
> 
> v3: New
> 
> Signed-off-by: Kirill Tkhai 
> ---
>  manager/context.c |8 
>  manager/spfs.c|8 
>  2 files changed, 8 insertions(+), 8 deletions(-)
> 
> diff --git a/manager/context.c b/manager/context.c
> index 1eb37c9..73d5ada 100644
> --- a/manager/context.c
> +++ b/manager/context.c
> @@ -45,12 +45,12 @@ const char *mgr_ovz_id(void)
>  static void cleanup_spfs_mount(struct spfs_manager_context_s *ctx,
>  struct spfs_info_s *info, int status)
>  {
> - bool killed = WIFSIGNALED(status);
> + bool failed = WIFSIGNALED(status) || !!WEXITSTATUS(status);
>  
>   pr_debug("removing info %s from the list\n", info->mnt.id);
>  
> - if (killed)
> - /* SPFS master was killed. We need to release the reference */
> + if (failed)
> + /* SPFS master was failed. We need to release the reference */
>   spfs_release_mnt(info);
>  
>   info->dead = true;
> @@ -59,7 +59,7 @@ static void cleanup_spfs_mount(struct 
> spfs_manager_context_s *ctx,
>   if (unlink(info->socket_path))
>   pr_perror("failed to unlink %s", info->socket_path);
>  
> - spfs_cleanup_env(info, killed);
> + spfs_cleanup_env(info, failed);
>  
>   close_namespaces(info->ns_fds);
>  }
> diff --git a/manager/spfs.c b/manager/spfs.c
> index 3e0f667..99845b1 100644
> --- a/manager/spfs.c
> +++ b/manager/spfs.c
> @@ -409,9 +409,9 @@ int spfs_prepare_env(struct spfs_info_s *info, const char 
> *proxy_dir)
>   return err ? err : res;
>  }
>  
> -static int __spfs_cleanup_env(struct spfs_info_s *info, bool killed)
> +static int __spfs_cleanup_env(struct spfs_info_s *info, bool failed)
>  {
> - if (killed && umount(info->work_dir)) {
> + if (failed && umount(info->work_dir)) {
>   pr_perror("failed to umount %s", info->work_dir);
>   return -errno;
>   }
> @@ -423,7 +423,7 @@ static int __spfs_cleanup_env(struct spfs_info_s *info, 
> bool killed)
>   return 0;
>  }
>  
> -int spfs_cleanup_env(struct spfs_info_s *info, bool killed)
> +int spfs_cleanup_env(struct spfs_info_s *info, bool failed)
>  {
>   int err, res;
>   unsigned orig_ns_mask;
> @@ -432,7 +432,7 @@ int spfs_cleanup_env(struct spfs_info_s *info, bool 
> killed)
>   if (res)
>   return res;
>  
> - err = __spfs_cleanup_env(info, killed);
> + err = __spfs_cleanup_env(info, failed);
>  
>   res = leave_spfs_context(info, orig_ns_mask);
>  
> 
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH v3 1/2] tcache: Refactor tcache_shrink_scan()

2018-01-23 Thread Andrey Ryabinin


On 01/23/2018 11:55 AM, Kirill Tkhai wrote:
> Make the function have the only return.
> 
> Signed-off-by: Kirill Tkhai 

Acked-by: Andrey Ryabinin 
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH v3 2/2] tcache: Fix race between tcache_invalidate_node() and tcache_attach_page()

2018-01-23 Thread Andrey Ryabinin


On 01/23/2018 11:56 AM, Kirill Tkhai wrote:
> tcache_attach_page()  tcache_invalidate_node()
> ..__tcache_lookup_node()
> ..__tcache_delete_node()
> Check node->invalidated   ..
> tcache_page_tree_insert() ..
> tcache_lru_add()  ..
> ..tcache_invalidate_node_pages()
> ..  node->invalidated = true
> 
> Check nr_page to determ if there is a race and repeat
> node pages iterations if so.
> 
> v2: Move invalidate assignment down in tcache_invalidate_node_tree().
> v3: Synchronize sched in case of race with tcache_shrink_count() too
> to minimize repeats numbers.
> 
> Signed-off-by: Kirill Tkhai 
> ---


Acked-by: Andrey Ryabinin 
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel