Re: [PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-03-27 Thread Vlastimil Babka
On 02/26/2015 03:59 PM, Jerome Marchand wrote:
> On 02/26/2015 02:51 PM, Vlastimil Babka wrote:
>>  
>> +/* Optimized variant when page is already known not to be PageAnon */
>> +static inline int mm_counter_file(struct page *page)
> 
> Just a nitpick, but I don't like that name as it keeps the confusion we
> currently have between shmem and file backed pages. I'm not sure what
> other name to use though. mm_counter_shared() maybe? I'm not sure it is
> less confusing...

I think that's also confusing, but differently. Didn't come up with better name,
so leaving as it is for v2.

Thanks

> Jerome
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-03-27 Thread Vlastimil Babka
On 02/26/2015 03:59 PM, Jerome Marchand wrote:
 On 02/26/2015 02:51 PM, Vlastimil Babka wrote:
  
 +/* Optimized variant when page is already known not to be PageAnon */
 +static inline int mm_counter_file(struct page *page)
 
 Just a nitpick, but I don't like that name as it keeps the confusion we
 currently have between shmem and file backed pages. I'm not sure what
 other name to use though. mm_counter_shared() maybe? I'm not sure it is
 less confusing...

I think that's also confusing, but differently. Didn't come up with better name,
so leaving as it is for v2.

Thanks

 Jerome
 

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-02-27 Thread Michael Kerrisk
[CC += linux-api@]

On Thu, Feb 26, 2015 at 2:51 PM, Vlastimil Babka  wrote:
> From: Jerome Marchand 
>
> Currently looking at /proc//status or statm, there is no way to
> distinguish shmem pages from pages mapped to a regular file (shmem
> pages are mapped to /dev/zero), even though their implication in
> actual memory use is quite different.
> This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
> shmem pages instead of MM_FILEPAGES.
>
> [vba...@suse.cz: port to 4.0, add #ifdefs, mm_counter_file() variant]
> Signed-off-by: Jerome Marchand 
> Signed-off-by: Vlastimil Babka 
> ---
>  arch/s390/mm/pgtable.c   |  5 +
>  fs/proc/task_mmu.c   |  4 +++-
>  include/linux/mm.h   | 28 
>  include/linux/mm_types.h |  9 ++---
>  kernel/events/uprobes.c  |  2 +-
>  mm/memory.c  | 30 ++
>  mm/oom_kill.c|  5 +++--
>  mm/rmap.c| 15 ---
>  8 files changed, 56 insertions(+), 42 deletions(-)
>
> diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> index b2c1542..5bffd5d 100644
> --- a/arch/s390/mm/pgtable.c
> +++ b/arch/s390/mm/pgtable.c
> @@ -617,10 +617,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, 
> struct mm_struct *mm)
> else if (is_migration_entry(entry)) {
> struct page *page = migration_entry_to_page(entry);
>
> -   if (PageAnon(page))
> -   dec_mm_counter(mm, MM_ANONPAGES);
> -   else
> -   dec_mm_counter(mm, MM_FILEPAGES);
> +   dec_mm_counter(mm, mm_counter(page));
> }
> free_swap_and_cache(entry);
>  }
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 0410309..d70334c 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -81,7 +81,8 @@ unsigned long task_statm(struct mm_struct *mm,
>  unsigned long *shared, unsigned long *text,
>  unsigned long *data, unsigned long *resident)
>  {
> -   *shared = get_mm_counter(mm, MM_FILEPAGES);
> +   *shared = get_mm_counter(mm, MM_FILEPAGES) +
> +   get_mm_counter(mm, MM_SHMEMPAGES);
> *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
> >> PAGE_SHIFT;
> *data = mm->total_vm - mm->shared_vm;
> @@ -501,6 +502,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long 
> addr,
> pte_none(*pte) && vma->vm_file) {
> struct address_space *mapping =
> file_inode(vma->vm_file)->i_mapping;
> +   pgoff_t pgoff = linear_page_index(vma, addr);
>
> /*
>  * shmem does not use swap pte's so we have to consult
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 47a9392..adfbb5b 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1364,6 +1364,16 @@ static inline unsigned long get_mm_counter(struct 
> mm_struct *mm, int member)
> return (unsigned long)val;
>  }
>
> +/* A wrapper for the CONFIG_SHMEM dependent counter */
> +static inline unsigned long get_mm_counter_shmem(struct mm_struct *mm)
> +{
> +#ifdef CONFIG_SHMEM
> +   return get_mm_counter(mm, MM_SHMEMPAGES);
> +#else
> +   return 0;
> +#endif
> +}
> +
>  static inline void add_mm_counter(struct mm_struct *mm, int member, long 
> value)
>  {
> atomic_long_add(value, >rss_stat.count[member]);
> @@ -1379,9 +1389,27 @@ static inline void dec_mm_counter(struct mm_struct 
> *mm, int member)
> atomic_long_dec(>rss_stat.count[member]);
>  }
>
> +/* Optimized variant when page is already known not to be PageAnon */
> +static inline int mm_counter_file(struct page *page)
> +{
> +#ifdef CONFIG_SHMEM
> +   if (PageSwapBacked(page))
> +   return MM_SHMEMPAGES;
> +#endif
> +   return MM_FILEPAGES;
> +}
> +
> +static inline int mm_counter(struct page *page)
> +{
> +   if (PageAnon(page))
> +   return MM_ANONPAGES;
> +   return mm_counter_file(page);
> +}
> +
>  static inline unsigned long get_mm_rss(struct mm_struct *mm)
>  {
> return get_mm_counter(mm, MM_FILEPAGES) +
> +   get_mm_counter_shmem(mm) +
> get_mm_counter(mm, MM_ANONPAGES);
>  }
>
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 199a03a..d3c2372 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -327,9 +327,12 @@ struct core_state {
>  };
>
>  enum {
> -   MM_FILEPAGES,
> -   MM_ANONPAGES,
> -   MM_SWAPENTS,
> +   MM_FILEPAGES,   /* Resident file mapping pages */
> +   MM_ANONPAGES,   /* Resident anonymous pages */
> +   MM_SWAPENTS,/* Anonymous swap entries */
> +#ifdef CONFIG_SHMEM
> +   MM_SHMEMPAGES,  /* Resident shared memory pages */
> +#endif
> NR_MM_COUNTERS
>  };
>
> diff 

Re: [PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-02-27 Thread Michael Kerrisk
[CC += linux-api@]

On Thu, Feb 26, 2015 at 2:51 PM, Vlastimil Babka vba...@suse.cz wrote:
 From: Jerome Marchand jmarc...@redhat.com

 Currently looking at /proc/pid/status or statm, there is no way to
 distinguish shmem pages from pages mapped to a regular file (shmem
 pages are mapped to /dev/zero), even though their implication in
 actual memory use is quite different.
 This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
 shmem pages instead of MM_FILEPAGES.

 [vba...@suse.cz: port to 4.0, add #ifdefs, mm_counter_file() variant]
 Signed-off-by: Jerome Marchand jmarc...@redhat.com
 Signed-off-by: Vlastimil Babka vba...@suse.cz
 ---
  arch/s390/mm/pgtable.c   |  5 +
  fs/proc/task_mmu.c   |  4 +++-
  include/linux/mm.h   | 28 
  include/linux/mm_types.h |  9 ++---
  kernel/events/uprobes.c  |  2 +-
  mm/memory.c  | 30 ++
  mm/oom_kill.c|  5 +++--
  mm/rmap.c| 15 ---
  8 files changed, 56 insertions(+), 42 deletions(-)

 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
 index b2c1542..5bffd5d 100644
 --- a/arch/s390/mm/pgtable.c
 +++ b/arch/s390/mm/pgtable.c
 @@ -617,10 +617,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, 
 struct mm_struct *mm)
 else if (is_migration_entry(entry)) {
 struct page *page = migration_entry_to_page(entry);

 -   if (PageAnon(page))
 -   dec_mm_counter(mm, MM_ANONPAGES);
 -   else
 -   dec_mm_counter(mm, MM_FILEPAGES);
 +   dec_mm_counter(mm, mm_counter(page));
 }
 free_swap_and_cache(entry);
  }
 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
 index 0410309..d70334c 100644
 --- a/fs/proc/task_mmu.c
 +++ b/fs/proc/task_mmu.c
 @@ -81,7 +81,8 @@ unsigned long task_statm(struct mm_struct *mm,
  unsigned long *shared, unsigned long *text,
  unsigned long *data, unsigned long *resident)
  {
 -   *shared = get_mm_counter(mm, MM_FILEPAGES);
 +   *shared = get_mm_counter(mm, MM_FILEPAGES) +
 +   get_mm_counter(mm, MM_SHMEMPAGES);
 *text = (PAGE_ALIGN(mm-end_code) - (mm-start_code  PAGE_MASK))
  PAGE_SHIFT;
 *data = mm-total_vm - mm-shared_vm;
 @@ -501,6 +502,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long 
 addr,
 pte_none(*pte)  vma-vm_file) {
 struct address_space *mapping =
 file_inode(vma-vm_file)-i_mapping;
 +   pgoff_t pgoff = linear_page_index(vma, addr);

 /*
  * shmem does not use swap pte's so we have to consult
 diff --git a/include/linux/mm.h b/include/linux/mm.h
 index 47a9392..adfbb5b 100644
 --- a/include/linux/mm.h
 +++ b/include/linux/mm.h
 @@ -1364,6 +1364,16 @@ static inline unsigned long get_mm_counter(struct 
 mm_struct *mm, int member)
 return (unsigned long)val;
  }

 +/* A wrapper for the CONFIG_SHMEM dependent counter */
 +static inline unsigned long get_mm_counter_shmem(struct mm_struct *mm)
 +{
 +#ifdef CONFIG_SHMEM
 +   return get_mm_counter(mm, MM_SHMEMPAGES);
 +#else
 +   return 0;
 +#endif
 +}
 +
  static inline void add_mm_counter(struct mm_struct *mm, int member, long 
 value)
  {
 atomic_long_add(value, mm-rss_stat.count[member]);
 @@ -1379,9 +1389,27 @@ static inline void dec_mm_counter(struct mm_struct 
 *mm, int member)
 atomic_long_dec(mm-rss_stat.count[member]);
  }

 +/* Optimized variant when page is already known not to be PageAnon */
 +static inline int mm_counter_file(struct page *page)
 +{
 +#ifdef CONFIG_SHMEM
 +   if (PageSwapBacked(page))
 +   return MM_SHMEMPAGES;
 +#endif
 +   return MM_FILEPAGES;
 +}
 +
 +static inline int mm_counter(struct page *page)
 +{
 +   if (PageAnon(page))
 +   return MM_ANONPAGES;
 +   return mm_counter_file(page);
 +}
 +
  static inline unsigned long get_mm_rss(struct mm_struct *mm)
  {
 return get_mm_counter(mm, MM_FILEPAGES) +
 +   get_mm_counter_shmem(mm) +
 get_mm_counter(mm, MM_ANONPAGES);
  }

 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
 index 199a03a..d3c2372 100644
 --- a/include/linux/mm_types.h
 +++ b/include/linux/mm_types.h
 @@ -327,9 +327,12 @@ struct core_state {
  };

  enum {
 -   MM_FILEPAGES,
 -   MM_ANONPAGES,
 -   MM_SWAPENTS,
 +   MM_FILEPAGES,   /* Resident file mapping pages */
 +   MM_ANONPAGES,   /* Resident anonymous pages */
 +   MM_SWAPENTS,/* Anonymous swap entries */
 +#ifdef CONFIG_SHMEM
 +   MM_SHMEMPAGES,  /* Resident shared memory pages */
 +#endif
 NR_MM_COUNTERS
  };

 diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
 index 

Re: [PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-02-26 Thread Hillf Danton
> @@ -501,6 +502,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long 
> addr,
>   pte_none(*pte) && vma->vm_file) {
>   struct address_space *mapping =
>   file_inode(vma->vm_file)->i_mapping;
> + pgoff_t pgoff = linear_page_index(vma, addr);
> 
>   /*
>* shmem does not use swap pte's so we have to consult

This hunk should go to patch 2/4
Hillf

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-02-26 Thread Jerome Marchand
On 02/26/2015 02:51 PM, Vlastimil Babka wrote:
> From: Jerome Marchand 
> 
> Currently looking at /proc//status or statm, there is no way to
> distinguish shmem pages from pages mapped to a regular file (shmem
> pages are mapped to /dev/zero), even though their implication in
> actual memory use is quite different.
> This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
> shmem pages instead of MM_FILEPAGES.
> 
> [vba...@suse.cz: port to 4.0, add #ifdefs, mm_counter_file() variant]
> Signed-off-by: Jerome Marchand 
> Signed-off-by: Vlastimil Babka 
> ---
>  arch/s390/mm/pgtable.c   |  5 +
>  fs/proc/task_mmu.c   |  4 +++-
>  include/linux/mm.h   | 28 
>  include/linux/mm_types.h |  9 ++---
>  kernel/events/uprobes.c  |  2 +-
>  mm/memory.c  | 30 ++
>  mm/oom_kill.c|  5 +++--
>  mm/rmap.c| 15 ---
>  8 files changed, 56 insertions(+), 42 deletions(-)
> 
> diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> index b2c1542..5bffd5d 100644
> --- a/arch/s390/mm/pgtable.c
> +++ b/arch/s390/mm/pgtable.c
> @@ -617,10 +617,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, 
> struct mm_struct *mm)
>   else if (is_migration_entry(entry)) {
>   struct page *page = migration_entry_to_page(entry);
>  
> - if (PageAnon(page))
> - dec_mm_counter(mm, MM_ANONPAGES);
> - else
> - dec_mm_counter(mm, MM_FILEPAGES);
> + dec_mm_counter(mm, mm_counter(page));
>   }
>   free_swap_and_cache(entry);
>  }
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 0410309..d70334c 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -81,7 +81,8 @@ unsigned long task_statm(struct mm_struct *mm,
>unsigned long *shared, unsigned long *text,
>unsigned long *data, unsigned long *resident)
>  {
> - *shared = get_mm_counter(mm, MM_FILEPAGES);
> + *shared = get_mm_counter(mm, MM_FILEPAGES) +
> + get_mm_counter(mm, MM_SHMEMPAGES);
>   *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>   >> PAGE_SHIFT;
>   *data = mm->total_vm - mm->shared_vm;
> @@ -501,6 +502,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long 
> addr,
>   pte_none(*pte) && vma->vm_file) {
>   struct address_space *mapping =
>   file_inode(vma->vm_file)->i_mapping;
> + pgoff_t pgoff = linear_page_index(vma, addr);
>  
>   /*
>* shmem does not use swap pte's so we have to consult
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 47a9392..adfbb5b 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1364,6 +1364,16 @@ static inline unsigned long get_mm_counter(struct 
> mm_struct *mm, int member)
>   return (unsigned long)val;
>  }
>  
> +/* A wrapper for the CONFIG_SHMEM dependent counter */
> +static inline unsigned long get_mm_counter_shmem(struct mm_struct *mm)
> +{
> +#ifdef CONFIG_SHMEM
> + return get_mm_counter(mm, MM_SHMEMPAGES);
> +#else
> + return 0;
> +#endif
> +}
> +
>  static inline void add_mm_counter(struct mm_struct *mm, int member, long 
> value)
>  {
>   atomic_long_add(value, >rss_stat.count[member]);
> @@ -1379,9 +1389,27 @@ static inline void dec_mm_counter(struct mm_struct 
> *mm, int member)
>   atomic_long_dec(>rss_stat.count[member]);
>  }
>  
> +/* Optimized variant when page is already known not to be PageAnon */
> +static inline int mm_counter_file(struct page *page)

Just a nitpick, but I don't like that name as it keeps the confusion we
currently have between shmem and file backed pages. I'm not sure what
other name to use though. mm_counter_shared() maybe? I'm not sure it is
less confusing...

Jerome

> +{
> +#ifdef CONFIG_SHMEM
> + if (PageSwapBacked(page))
> + return MM_SHMEMPAGES;
> +#endif
> + return MM_FILEPAGES;
> +}
> +
> +static inline int mm_counter(struct page *page)
> +{
> + if (PageAnon(page))
> + return MM_ANONPAGES;
> + return mm_counter_file(page);
> +}
> +
>  static inline unsigned long get_mm_rss(struct mm_struct *mm)
>  {
>   return get_mm_counter(mm, MM_FILEPAGES) +
> + get_mm_counter_shmem(mm) +
>   get_mm_counter(mm, MM_ANONPAGES);
>  }
>  
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 199a03a..d3c2372 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -327,9 +327,12 @@ struct core_state {
>  };
>  
>  enum {
> - MM_FILEPAGES,
> - MM_ANONPAGES,
> - MM_SWAPENTS,
> + MM_FILEPAGES,   /* Resident file mapping pages */
> + MM_ANONPAGES,   /* Resident anonymous pages */
> + MM_SWAPENTS,/* Anonymous swap entries 

[PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-02-26 Thread Vlastimil Babka
From: Jerome Marchand 

Currently looking at /proc//status or statm, there is no way to
distinguish shmem pages from pages mapped to a regular file (shmem
pages are mapped to /dev/zero), even though their implication in
actual memory use is quite different.
This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
shmem pages instead of MM_FILEPAGES.

[vba...@suse.cz: port to 4.0, add #ifdefs, mm_counter_file() variant]
Signed-off-by: Jerome Marchand 
Signed-off-by: Vlastimil Babka 
---
 arch/s390/mm/pgtable.c   |  5 +
 fs/proc/task_mmu.c   |  4 +++-
 include/linux/mm.h   | 28 
 include/linux/mm_types.h |  9 ++---
 kernel/events/uprobes.c  |  2 +-
 mm/memory.c  | 30 ++
 mm/oom_kill.c|  5 +++--
 mm/rmap.c| 15 ---
 8 files changed, 56 insertions(+), 42 deletions(-)

diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b2c1542..5bffd5d 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -617,10 +617,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, struct 
mm_struct *mm)
else if (is_migration_entry(entry)) {
struct page *page = migration_entry_to_page(entry);
 
-   if (PageAnon(page))
-   dec_mm_counter(mm, MM_ANONPAGES);
-   else
-   dec_mm_counter(mm, MM_FILEPAGES);
+   dec_mm_counter(mm, mm_counter(page));
}
free_swap_and_cache(entry);
 }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 0410309..d70334c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -81,7 +81,8 @@ unsigned long task_statm(struct mm_struct *mm,
 unsigned long *shared, unsigned long *text,
 unsigned long *data, unsigned long *resident)
 {
-   *shared = get_mm_counter(mm, MM_FILEPAGES);
+   *shared = get_mm_counter(mm, MM_FILEPAGES) +
+   get_mm_counter(mm, MM_SHMEMPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
@@ -501,6 +502,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
pte_none(*pte) && vma->vm_file) {
struct address_space *mapping =
file_inode(vma->vm_file)->i_mapping;
+   pgoff_t pgoff = linear_page_index(vma, addr);
 
/*
 * shmem does not use swap pte's so we have to consult
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 47a9392..adfbb5b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1364,6 +1364,16 @@ static inline unsigned long get_mm_counter(struct 
mm_struct *mm, int member)
return (unsigned long)val;
 }
 
+/* A wrapper for the CONFIG_SHMEM dependent counter */
+static inline unsigned long get_mm_counter_shmem(struct mm_struct *mm)
+{
+#ifdef CONFIG_SHMEM
+   return get_mm_counter(mm, MM_SHMEMPAGES);
+#else
+   return 0;
+#endif
+}
+
 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
 {
atomic_long_add(value, >rss_stat.count[member]);
@@ -1379,9 +1389,27 @@ static inline void dec_mm_counter(struct mm_struct *mm, 
int member)
atomic_long_dec(>rss_stat.count[member]);
 }
 
+/* Optimized variant when page is already known not to be PageAnon */
+static inline int mm_counter_file(struct page *page)
+{
+#ifdef CONFIG_SHMEM
+   if (PageSwapBacked(page))
+   return MM_SHMEMPAGES;
+#endif
+   return MM_FILEPAGES;
+}
+
+static inline int mm_counter(struct page *page)
+{
+   if (PageAnon(page))
+   return MM_ANONPAGES;
+   return mm_counter_file(page);
+}
+
 static inline unsigned long get_mm_rss(struct mm_struct *mm)
 {
return get_mm_counter(mm, MM_FILEPAGES) +
+   get_mm_counter_shmem(mm) +
get_mm_counter(mm, MM_ANONPAGES);
 }
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 199a03a..d3c2372 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -327,9 +327,12 @@ struct core_state {
 };
 
 enum {
-   MM_FILEPAGES,
-   MM_ANONPAGES,
-   MM_SWAPENTS,
+   MM_FILEPAGES,   /* Resident file mapping pages */
+   MM_ANONPAGES,   /* Resident anonymous pages */
+   MM_SWAPENTS,/* Anonymous swap entries */
+#ifdef CONFIG_SHMEM
+   MM_SHMEMPAGES,  /* Resident shared memory pages */
+#endif
NR_MM_COUNTERS
 };
 
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index cb346f2..0a08fdd 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -188,7 +188,7 @@ static int __replace_page(struct vm_area_struct *vma, 
unsigned long addr,
lru_cache_add_active_or_unevictable(kpage, vma);
 
if (!PageAnon(page)) {
-

Re: [PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-02-26 Thread Jerome Marchand
On 02/26/2015 02:51 PM, Vlastimil Babka wrote:
 From: Jerome Marchand jmarc...@redhat.com
 
 Currently looking at /proc/pid/status or statm, there is no way to
 distinguish shmem pages from pages mapped to a regular file (shmem
 pages are mapped to /dev/zero), even though their implication in
 actual memory use is quite different.
 This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
 shmem pages instead of MM_FILEPAGES.
 
 [vba...@suse.cz: port to 4.0, add #ifdefs, mm_counter_file() variant]
 Signed-off-by: Jerome Marchand jmarc...@redhat.com
 Signed-off-by: Vlastimil Babka vba...@suse.cz
 ---
  arch/s390/mm/pgtable.c   |  5 +
  fs/proc/task_mmu.c   |  4 +++-
  include/linux/mm.h   | 28 
  include/linux/mm_types.h |  9 ++---
  kernel/events/uprobes.c  |  2 +-
  mm/memory.c  | 30 ++
  mm/oom_kill.c|  5 +++--
  mm/rmap.c| 15 ---
  8 files changed, 56 insertions(+), 42 deletions(-)
 
 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
 index b2c1542..5bffd5d 100644
 --- a/arch/s390/mm/pgtable.c
 +++ b/arch/s390/mm/pgtable.c
 @@ -617,10 +617,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, 
 struct mm_struct *mm)
   else if (is_migration_entry(entry)) {
   struct page *page = migration_entry_to_page(entry);
  
 - if (PageAnon(page))
 - dec_mm_counter(mm, MM_ANONPAGES);
 - else
 - dec_mm_counter(mm, MM_FILEPAGES);
 + dec_mm_counter(mm, mm_counter(page));
   }
   free_swap_and_cache(entry);
  }
 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
 index 0410309..d70334c 100644
 --- a/fs/proc/task_mmu.c
 +++ b/fs/proc/task_mmu.c
 @@ -81,7 +81,8 @@ unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
  {
 - *shared = get_mm_counter(mm, MM_FILEPAGES);
 + *shared = get_mm_counter(mm, MM_FILEPAGES) +
 + get_mm_counter(mm, MM_SHMEMPAGES);
   *text = (PAGE_ALIGN(mm-end_code) - (mm-start_code  PAGE_MASK))
PAGE_SHIFT;
   *data = mm-total_vm - mm-shared_vm;
 @@ -501,6 +502,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long 
 addr,
   pte_none(*pte)  vma-vm_file) {
   struct address_space *mapping =
   file_inode(vma-vm_file)-i_mapping;
 + pgoff_t pgoff = linear_page_index(vma, addr);
  
   /*
* shmem does not use swap pte's so we have to consult
 diff --git a/include/linux/mm.h b/include/linux/mm.h
 index 47a9392..adfbb5b 100644
 --- a/include/linux/mm.h
 +++ b/include/linux/mm.h
 @@ -1364,6 +1364,16 @@ static inline unsigned long get_mm_counter(struct 
 mm_struct *mm, int member)
   return (unsigned long)val;
  }
  
 +/* A wrapper for the CONFIG_SHMEM dependent counter */
 +static inline unsigned long get_mm_counter_shmem(struct mm_struct *mm)
 +{
 +#ifdef CONFIG_SHMEM
 + return get_mm_counter(mm, MM_SHMEMPAGES);
 +#else
 + return 0;
 +#endif
 +}
 +
  static inline void add_mm_counter(struct mm_struct *mm, int member, long 
 value)
  {
   atomic_long_add(value, mm-rss_stat.count[member]);
 @@ -1379,9 +1389,27 @@ static inline void dec_mm_counter(struct mm_struct 
 *mm, int member)
   atomic_long_dec(mm-rss_stat.count[member]);
  }
  
 +/* Optimized variant when page is already known not to be PageAnon */
 +static inline int mm_counter_file(struct page *page)

Just a nitpick, but I don't like that name as it keeps the confusion we
currently have between shmem and file backed pages. I'm not sure what
other name to use though. mm_counter_shared() maybe? I'm not sure it is
less confusing...

Jerome

 +{
 +#ifdef CONFIG_SHMEM
 + if (PageSwapBacked(page))
 + return MM_SHMEMPAGES;
 +#endif
 + return MM_FILEPAGES;
 +}
 +
 +static inline int mm_counter(struct page *page)
 +{
 + if (PageAnon(page))
 + return MM_ANONPAGES;
 + return mm_counter_file(page);
 +}
 +
  static inline unsigned long get_mm_rss(struct mm_struct *mm)
  {
   return get_mm_counter(mm, MM_FILEPAGES) +
 + get_mm_counter_shmem(mm) +
   get_mm_counter(mm, MM_ANONPAGES);
  }
  
 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
 index 199a03a..d3c2372 100644
 --- a/include/linux/mm_types.h
 +++ b/include/linux/mm_types.h
 @@ -327,9 +327,12 @@ struct core_state {
  };
  
  enum {
 - MM_FILEPAGES,
 - MM_ANONPAGES,
 - MM_SWAPENTS,
 + MM_FILEPAGES,   /* Resident file mapping pages */
 + MM_ANONPAGES,   /* Resident anonymous pages */
 + MM_SWAPENTS,/* Anonymous swap entries */
 +#ifdef CONFIG_SHMEM
 + MM_SHMEMPAGES,  /* Resident shared memory pages 

[PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-02-26 Thread Vlastimil Babka
From: Jerome Marchand jmarc...@redhat.com

Currently looking at /proc/pid/status or statm, there is no way to
distinguish shmem pages from pages mapped to a regular file (shmem
pages are mapped to /dev/zero), even though their implication in
actual memory use is quite different.
This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
shmem pages instead of MM_FILEPAGES.

[vba...@suse.cz: port to 4.0, add #ifdefs, mm_counter_file() variant]
Signed-off-by: Jerome Marchand jmarc...@redhat.com
Signed-off-by: Vlastimil Babka vba...@suse.cz
---
 arch/s390/mm/pgtable.c   |  5 +
 fs/proc/task_mmu.c   |  4 +++-
 include/linux/mm.h   | 28 
 include/linux/mm_types.h |  9 ++---
 kernel/events/uprobes.c  |  2 +-
 mm/memory.c  | 30 ++
 mm/oom_kill.c|  5 +++--
 mm/rmap.c| 15 ---
 8 files changed, 56 insertions(+), 42 deletions(-)

diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b2c1542..5bffd5d 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -617,10 +617,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, struct 
mm_struct *mm)
else if (is_migration_entry(entry)) {
struct page *page = migration_entry_to_page(entry);
 
-   if (PageAnon(page))
-   dec_mm_counter(mm, MM_ANONPAGES);
-   else
-   dec_mm_counter(mm, MM_FILEPAGES);
+   dec_mm_counter(mm, mm_counter(page));
}
free_swap_and_cache(entry);
 }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 0410309..d70334c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -81,7 +81,8 @@ unsigned long task_statm(struct mm_struct *mm,
 unsigned long *shared, unsigned long *text,
 unsigned long *data, unsigned long *resident)
 {
-   *shared = get_mm_counter(mm, MM_FILEPAGES);
+   *shared = get_mm_counter(mm, MM_FILEPAGES) +
+   get_mm_counter(mm, MM_SHMEMPAGES);
*text = (PAGE_ALIGN(mm-end_code) - (mm-start_code  PAGE_MASK))
 PAGE_SHIFT;
*data = mm-total_vm - mm-shared_vm;
@@ -501,6 +502,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
pte_none(*pte)  vma-vm_file) {
struct address_space *mapping =
file_inode(vma-vm_file)-i_mapping;
+   pgoff_t pgoff = linear_page_index(vma, addr);
 
/*
 * shmem does not use swap pte's so we have to consult
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 47a9392..adfbb5b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1364,6 +1364,16 @@ static inline unsigned long get_mm_counter(struct 
mm_struct *mm, int member)
return (unsigned long)val;
 }
 
+/* A wrapper for the CONFIG_SHMEM dependent counter */
+static inline unsigned long get_mm_counter_shmem(struct mm_struct *mm)
+{
+#ifdef CONFIG_SHMEM
+   return get_mm_counter(mm, MM_SHMEMPAGES);
+#else
+   return 0;
+#endif
+}
+
 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
 {
atomic_long_add(value, mm-rss_stat.count[member]);
@@ -1379,9 +1389,27 @@ static inline void dec_mm_counter(struct mm_struct *mm, 
int member)
atomic_long_dec(mm-rss_stat.count[member]);
 }
 
+/* Optimized variant when page is already known not to be PageAnon */
+static inline int mm_counter_file(struct page *page)
+{
+#ifdef CONFIG_SHMEM
+   if (PageSwapBacked(page))
+   return MM_SHMEMPAGES;
+#endif
+   return MM_FILEPAGES;
+}
+
+static inline int mm_counter(struct page *page)
+{
+   if (PageAnon(page))
+   return MM_ANONPAGES;
+   return mm_counter_file(page);
+}
+
 static inline unsigned long get_mm_rss(struct mm_struct *mm)
 {
return get_mm_counter(mm, MM_FILEPAGES) +
+   get_mm_counter_shmem(mm) +
get_mm_counter(mm, MM_ANONPAGES);
 }
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 199a03a..d3c2372 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -327,9 +327,12 @@ struct core_state {
 };
 
 enum {
-   MM_FILEPAGES,
-   MM_ANONPAGES,
-   MM_SWAPENTS,
+   MM_FILEPAGES,   /* Resident file mapping pages */
+   MM_ANONPAGES,   /* Resident anonymous pages */
+   MM_SWAPENTS,/* Anonymous swap entries */
+#ifdef CONFIG_SHMEM
+   MM_SHMEMPAGES,  /* Resident shared memory pages */
+#endif
NR_MM_COUNTERS
 };
 
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index cb346f2..0a08fdd 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -188,7 +188,7 @@ static int __replace_page(struct vm_area_struct *vma, 
unsigned long addr,

Re: [PATCH 3/4] mm, shmem: Add shmem resident memory accounting

2015-02-26 Thread Hillf Danton
 @@ -501,6 +502,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long 
 addr,
   pte_none(*pte)  vma-vm_file) {
   struct address_space *mapping =
   file_inode(vma-vm_file)-i_mapping;
 + pgoff_t pgoff = linear_page_index(vma, addr);
 
   /*
* shmem does not use swap pte's so we have to consult

This hunk should go to patch 2/4
Hillf

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/