[Patch] tmpfs accounting cleanup for -ac series

2001-05-17 Thread Christoph Rohland

Hi Alan,

While looking at the -ac version of ramfs I noticed that there is a
new address operation introduced which I can use to cleanup shmem.

This patch throws away some magic recalculation and makes the
accounting of shmem accurate.

It also encapsulates all accesses to the superblock_info into a macro.

The patch is on top of my previous ones.

Greetings
Christoph

diff -uNr 4-ac9/fs/proc/proc_misc.c c/fs/proc/proc_misc.c
--- 4-ac9/fs/proc/proc_misc.c   Thu May 17 13:17:37 2001
+++ c/fs/proc/proc_misc.c   Thu May 17 13:11:30 2001
@@ -140,17 +140,9 @@
 {
struct sysinfo i;
int len;
-   unsigned int cached, shmem;
+   unsigned int cached;
 
-   /*
-* There may be some inconsistency because shmem_nrpages
-* update is delayed to page_cache_size
-* We make sure the cached value does not get below zero 
-*/
-   cached = atomic_read(_cache_size);
-   shmem  = atomic_read(_nrpages);
-   if (shmem < cached)
-   cached -= shmem;
+   cached = atomic_read(_cache_size) - atomic_read(_nrpages);
 
 /*
  * display in kilobytes.
diff -uNr 4-ac9/mm/mmap.c c/mm/mmap.c
--- 4-ac9/mm/mmap.c Thu May 17 13:17:37 2001
+++ c/mm/mmap.c Thu May 17 10:54:22 2001
@@ -56,24 +56,14 @@
 */
 
long free;
-   unsigned long cached, shmem;
-
-   /*
-* There may be some inconsistency because shmem_nrpages
-* update is delayed to the page_cache_size
-* We make sure the cached value does not get below zero 
-*/
-   cached = atomic_read(_cache_size);
-   shmem  = atomic_read(_nrpages);
-   if (cached > shmem)
-   cached -= shmem;
 
 /* Sometimes we want to use more memory than we have. */
if (sysctl_overcommit_memory)
return 1;
 
free = atomic_read(_pages);
-   free += cached;
+   free += atomic_read(_cache_size) ;
+   free -= atomic_read(_nrpages);
free += nr_free_pages();
free += nr_swap_pages;
 
diff -uNr 4-ac9/mm/shmem.c c/mm/shmem.c
--- 4-ac9/mm/shmem.cThu May 17 13:17:37 2001
+++ c/mm/shmem.cThu May 17 10:54:03 2001
@@ -35,6 +35,8 @@
 
 #define ENTRIES_PER_PAGE (PAGE_SIZE/sizeof(unsigned long))
 
+#define SHMEM_SB(sb) (>u.shmem_sb)
+
 static struct super_operations shmem_ops;
 static struct address_space_operations shmem_aops;
 static struct file_operations shmem_file_operations;
@@ -50,44 +52,6 @@
 #define BLOCKS_PER_PAGE (PAGE_SIZE/512)
 
 /*
- * shmem_recalc_inode - recalculate the size of an inode
- *
- * @inode: inode to recalc
- * @swap:  additional swap pages freed externally
- *
- * We have to calculate the free blocks since the mm can drop pages
- * behind our back
- *
- * But we know that normally
- * inodes->i_blocks/BLOCKS_PER_PAGE == 
- * inode->i_mapping->nrpages + info->swapped
- *
- * So the mm freed 
- * inodes->i_blocks/BLOCKS_PER_PAGE - 
- * (inode->i_mapping->nrpages + info->swapped)
- *
- * It has to be called with the spinlock held.
- *
- * The swap parameter is a performance hack for truncate.
- */
-
-static void shmem_recalc_inode(struct inode * inode, unsigned long swap)
-{
-   unsigned long freed;
-
-   freed = (inode->i_blocks/BLOCKS_PER_PAGE) -
-   (inode->i_mapping->nrpages + SHMEM_I(inode)->swapped);
-   if (freed){
-   struct shmem_sb_info * info = >i_sb->u.shmem_sb;
-   inode->i_blocks -= freed*BLOCKS_PER_PAGE;
-   spin_lock (>stat_lock);
-   info->free_blocks += freed;
-   spin_unlock (>stat_lock);
-   atomic_sub(freed-swap, _nrpages);
-   }
-}
-
-/*
  * shmem_swp_entry - find the swap vector position in the info structure
  *
  * @info:  info structure for the inode
@@ -318,6 +282,7 @@
unsigned long index;
unsigned long freed = 0;
struct shmem_inode_info * info = SHMEM_I(inode);
+   struct shmem_sb_info * sbinfo = SHMEM_SB(inode->i_sb);
 
down(>sem);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
@@ -328,14 +293,28 @@
freed += shmem_truncate_indirect(info, index);
 
info->swapped -= freed;
-   shmem_recalc_inode(inode, freed);
+   spin_lock(>stat_lock);
+   sbinfo->free_blocks += freed;
+   spin_unlock(>stat_lock);
spin_unlock (>lock);
up(>sem);
 }
 
+static void shmem_truncatepage(struct page *page)
+{
+   struct inode *inode = (struct inode *)page->mapping->host;
+   struct shmem_sb_info * sbinfo = SHMEM_SB(inode->i_sb);
+
+   inode->i_blocks -= BLOCKS_PER_PAGE;
+   spin_lock (>stat_lock);
+   sbinfo->free_blocks++;
+   spin_unlock (>stat_lock);
+   atomic_dec(_nrpages);
+}
+
 static void shmem_delete_inode(struct inode * inode)
 {
-   struct shmem_sb_info *info = >i_sb->u.shmem_sb;
+   struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 
inode->i_size = 

[Patch] tmpfs accounting cleanup for -ac series

2001-05-17 Thread Christoph Rohland

Hi Alan,

While looking at the -ac version of ramfs I noticed that there is a
new address operation introduced which I can use to cleanup shmem.

This patch throws away some magic recalculation and makes the
accounting of shmem accurate.

It also encapsulates all accesses to the superblock_info into a macro.

The patch is on top of my previous ones.

Greetings
Christoph

diff -uNr 4-ac9/fs/proc/proc_misc.c c/fs/proc/proc_misc.c
--- 4-ac9/fs/proc/proc_misc.c   Thu May 17 13:17:37 2001
+++ c/fs/proc/proc_misc.c   Thu May 17 13:11:30 2001
@@ -140,17 +140,9 @@
 {
struct sysinfo i;
int len;
-   unsigned int cached, shmem;
+   unsigned int cached;
 
-   /*
-* There may be some inconsistency because shmem_nrpages
-* update is delayed to page_cache_size
-* We make sure the cached value does not get below zero 
-*/
-   cached = atomic_read(page_cache_size);
-   shmem  = atomic_read(shmem_nrpages);
-   if (shmem  cached)
-   cached -= shmem;
+   cached = atomic_read(page_cache_size) - atomic_read(shmem_nrpages);
 
 /*
  * display in kilobytes.
diff -uNr 4-ac9/mm/mmap.c c/mm/mmap.c
--- 4-ac9/mm/mmap.c Thu May 17 13:17:37 2001
+++ c/mm/mmap.c Thu May 17 10:54:22 2001
@@ -56,24 +56,14 @@
 */
 
long free;
-   unsigned long cached, shmem;
-
-   /*
-* There may be some inconsistency because shmem_nrpages
-* update is delayed to the page_cache_size
-* We make sure the cached value does not get below zero 
-*/
-   cached = atomic_read(page_cache_size);
-   shmem  = atomic_read(shmem_nrpages);
-   if (cached  shmem)
-   cached -= shmem;
 
 /* Sometimes we want to use more memory than we have. */
if (sysctl_overcommit_memory)
return 1;
 
free = atomic_read(buffermem_pages);
-   free += cached;
+   free += atomic_read(page_cache_size) ;
+   free -= atomic_read(shmem_nrpages);
free += nr_free_pages();
free += nr_swap_pages;
 
diff -uNr 4-ac9/mm/shmem.c c/mm/shmem.c
--- 4-ac9/mm/shmem.cThu May 17 13:17:37 2001
+++ c/mm/shmem.cThu May 17 10:54:03 2001
@@ -35,6 +35,8 @@
 
 #define ENTRIES_PER_PAGE (PAGE_SIZE/sizeof(unsigned long))
 
+#define SHMEM_SB(sb) (sb-u.shmem_sb)
+
 static struct super_operations shmem_ops;
 static struct address_space_operations shmem_aops;
 static struct file_operations shmem_file_operations;
@@ -50,44 +52,6 @@
 #define BLOCKS_PER_PAGE (PAGE_SIZE/512)
 
 /*
- * shmem_recalc_inode - recalculate the size of an inode
- *
- * @inode: inode to recalc
- * @swap:  additional swap pages freed externally
- *
- * We have to calculate the free blocks since the mm can drop pages
- * behind our back
- *
- * But we know that normally
- * inodes-i_blocks/BLOCKS_PER_PAGE == 
- * inode-i_mapping-nrpages + info-swapped
- *
- * So the mm freed 
- * inodes-i_blocks/BLOCKS_PER_PAGE - 
- * (inode-i_mapping-nrpages + info-swapped)
- *
- * It has to be called with the spinlock held.
- *
- * The swap parameter is a performance hack for truncate.
- */
-
-static void shmem_recalc_inode(struct inode * inode, unsigned long swap)
-{
-   unsigned long freed;
-
-   freed = (inode-i_blocks/BLOCKS_PER_PAGE) -
-   (inode-i_mapping-nrpages + SHMEM_I(inode)-swapped);
-   if (freed){
-   struct shmem_sb_info * info = inode-i_sb-u.shmem_sb;
-   inode-i_blocks -= freed*BLOCKS_PER_PAGE;
-   spin_lock (info-stat_lock);
-   info-free_blocks += freed;
-   spin_unlock (info-stat_lock);
-   atomic_sub(freed-swap, shmem_nrpages);
-   }
-}
-
-/*
  * shmem_swp_entry - find the swap vector position in the info structure
  *
  * @info:  info structure for the inode
@@ -318,6 +282,7 @@
unsigned long index;
unsigned long freed = 0;
struct shmem_inode_info * info = SHMEM_I(inode);
+   struct shmem_sb_info * sbinfo = SHMEM_SB(inode-i_sb);
 
down(info-sem);
inode-i_ctime = inode-i_mtime = CURRENT_TIME;
@@ -328,14 +293,28 @@
freed += shmem_truncate_indirect(info, index);
 
info-swapped -= freed;
-   shmem_recalc_inode(inode, freed);
+   spin_lock(sbinfo-stat_lock);
+   sbinfo-free_blocks += freed;
+   spin_unlock(sbinfo-stat_lock);
spin_unlock (info-lock);
up(info-sem);
 }
 
+static void shmem_truncatepage(struct page *page)
+{
+   struct inode *inode = (struct inode *)page-mapping-host;
+   struct shmem_sb_info * sbinfo = SHMEM_SB(inode-i_sb);
+
+   inode-i_blocks -= BLOCKS_PER_PAGE;
+   spin_lock (sbinfo-stat_lock);
+   sbinfo-free_blocks++;
+   spin_unlock (sbinfo-stat_lock);
+   atomic_dec(shmem_nrpages);
+}
+
 static void shmem_delete_inode(struct inode * inode)
 {
-   struct shmem_sb_info *info = inode-i_sb-u.shmem_sb;
+