Christoph Hellwig a écrit :
On Thu, Aug 25, 2005 at 11:17:23AM +0200, Eric Dumazet wrote:

But that's not true.  You need to write you own sysctl handler for it,
probably worth adding a generic atomic_t sysctl handler while you're
at it.


I checked linux-2.6.13-rc7 tree, and atomic_read() is just a wrapper to read v->counter.


That doesn't matter.  atomic_t is an opaqueue type and you must use the
atomic_* interfaces to access it.

OK, here is a new clean patch that address this problem (nothing assumed about atomics)

This patch removes filp_count_lock spinlock, used to protect 
files_stat.nr_files.

Introduce an atomic_t atomic_nr_files to keep the exact count, and mirror its value into nr_files.

Forcing atomic_nr_files to be in the same cache line than nr_files makes sure we dont dirty two cache lines.

There is still a locked memory operation on SMP, but it saves an sti/cli pair.

Thank you

Signed-off-by: Eric Dumazet <[EMAIL PROTECTED]>

diff -Nru linux-2.6.13-rc7/fs/file_table.c linux-2.6.13-rc7-ed/fs/file_table.c
--- linux-2.6.13-rc7/fs/file_table.c    2005-08-24 05:39:14.000000000 +0200
+++ linux-2.6.13-rc7-ed/fs/file_table.c 2005-08-25 12:18:02.000000000 +0200
@@ -19,38 +19,28 @@
 #include <linux/fsnotify.h>
 
 /* sysctl tunables... */
-struct files_stat_struct files_stat = {
-       .max_files = NR_FILE
-};
+struct files_stat_struct files_stat;
 
 EXPORT_SYMBOL(files_stat); /* Needed by unix.o */
 
 /* public. Not pretty! */
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
 
-static DEFINE_SPINLOCK(filp_count_lock);
 
 /* slab constructors and destructors are called from arbitrary
- * context and must be fully threaded - use a local spinlock
- * to protect files_stat.nr_files
+ * context and must be fully threaded
  */
 void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags)
 {
        if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
            SLAB_CTOR_CONSTRUCTOR) {
-               unsigned long flags;
-               spin_lock_irqsave(&filp_count_lock, flags);
-               files_stat.nr_files++;
-               spin_unlock_irqrestore(&filp_count_lock, flags);
+               files_stat.nr_files = atomic_add_return(1, 
&files_stat.atomic_nr_files);
        }
 }
 
 void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags)
 {
-       unsigned long flags;
-       spin_lock_irqsave(&filp_count_lock, flags);
-       files_stat.nr_files--;
-       spin_unlock_irqrestore(&filp_count_lock, flags);
+       files_stat.nr_files = atomic_sub_return(1, &files_stat.atomic_nr_files);
 }
 
 static inline void file_free(struct file *f)
@@ -258,4 +248,5 @@
        files_stat.max_files = n; 
        if (files_stat.max_files < NR_FILE)
                files_stat.max_files = NR_FILE;
+       atomic_set(&files_stat.atomic_nr_files, 0);
 } 
diff -Nru linux-2.6.13-rc7/include/linux/fs.h 
linux-2.6.13-rc7-ed/include/linux/fs.h
--- linux-2.6.13-rc7/include/linux/fs.h 2005-08-24 05:39:14.000000000 +0200
+++ linux-2.6.13-rc7-ed/include/linux/fs.h      2005-08-25 12:39:07.000000000 
+0200
@@ -9,6 +9,8 @@
 #include <linux/config.h>
 #include <linux/limits.h>
 #include <linux/ioctl.h>
+#include <linux/cache.h>
+#include <asm/atomic.h>
 
 /*
  * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
@@ -30,10 +32,12 @@
 
 /* And dynamically-tunable limits and defaults: */
 struct files_stat_struct {
-       int nr_files;           /* read only */
+       int nr_files;           /* mirrors atomic_nr_files value */
        int nr_free_files;      /* read only */
        int max_files;          /* tunable */
-};
+
+       atomic_t atomic_nr_files;
+} ____cacheline_aligned;
 extern struct files_stat_struct files_stat;
 
 struct inodes_stat_t {

Reply via email to