Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-02-02 Thread David Chinner
On Fri, Feb 02, 2007 at 01:24:40PM -0600, Matt Mackall wrote:
> On Fri, Feb 02, 2007 at 12:05:11PM +, Christoph Hellwig wrote:
> > On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
> > > > Yup.  Even better, use clear_highpage().
> > > 
> > > For even more goodness, clearmem_highpage_flush() does exactly
> > > the right thing for partial page zeroing ;)
> > 
> > Note that there are tons of places in buffer.c that could use
> > clearmem_highpage_flush().  See the so far untested patch below:
> > 
> 
> You probably need s/memclear/clearmem/g..

Not needed - as usual, the code is right and the comments
are wrong. ;)

Cheers,

Dave.
-- 
Dave Chinner
Principal Engineer
SGI Australian Software Group
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-02-02 Thread David Chinner
On Fri, Feb 02, 2007 at 12:05:11PM +, Christoph Hellwig wrote:
> On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
> > > Yup.  Even better, use clear_highpage().
> > 
> > For even more goodness, clearmem_highpage_flush() does exactly
> > the right thing for partial page zeroing ;)
> 
> Note that there are tons of places in buffer.c that could use
> clearmem_highpage_flush().  See the so far untested patch below:

Runs through XFSQA just fine. Looks good to me, Christoph.

Cheers,

Dave.
-- 
Dave Chinner
Principal Engineer
SGI Australian Software Group
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-02-02 Thread Matt Mackall
On Fri, Feb 02, 2007 at 12:05:11PM +, Christoph Hellwig wrote:
> On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
> > > Yup.  Even better, use clear_highpage().
> > 
> > For even more goodness, clearmem_highpage_flush() does exactly
> > the right thing for partial page zeroing ;)
> 
> Note that there are tons of places in buffer.c that could use
> clearmem_highpage_flush().  See the so far untested patch below:
> 

You probably need s/memclear/clearmem/g..
 
> Index: linux-2.6/fs/buffer.c
> ===
> --- linux-2.6.orig/fs/buffer.c2007-02-02 12:53:51.0 +0100
> +++ linux-2.6/fs/buffer.c 2007-02-02 12:59:42.0 +0100
> @@ -1858,13 +1858,8 @@
>   if (block_start >= to)
>   break;
>   if (buffer_new(bh)) {
> - void *kaddr;
> -
>   clear_buffer_new(bh);
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr+block_start, 0, bh->b_size);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, block_start, bh->b_size);
>   set_buffer_uptodate(bh);
>   mark_buffer_dirty(bh);
>   }
> @@ -1952,10 +1947,8 @@
>   SetPageError(page);
>   }
>   if (!buffer_mapped(bh)) {
> - void *kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr + i * blocksize, 0, blocksize);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, i * blocksize,
> + blocksize);
>   if (!err)
>   set_buffer_uptodate(bh);
>   continue;
> @@ -2098,7 +2091,6 @@
>   long status;
>   unsigned zerofrom;
>   unsigned blocksize = 1 << inode->i_blkbits;
> - void *kaddr;
>  
>   while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
>   status = -ENOMEM;
> @@ -2120,10 +2112,8 @@
>   PAGE_CACHE_SIZE, get_block);
>   if (status)
>   goto out_unmap;
> - kaddr = kmap_atomic(new_page, KM_USER0);
> - memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
> - flush_dcache_page(new_page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, zerofrom,
> + PAGE_CACHE_SIZE - zerofrom);
>   generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
>   unlock_page(new_page);
>   page_cache_release(new_page);
> @@ -2150,10 +2140,7 @@
>   if (status)
>   goto out1;
>   if (zerofrom < offset) {
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr+zerofrom, 0, offset-zerofrom);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, zerofrom, offset - zerofrom);
>   __block_commit_write(inode, page, zerofrom, offset);
>   }
>   return 0;
> @@ -2368,10 +2355,7 @@
>* Error recovery is pretty slack.  Clear the page and mark it dirty
>* so we'll later zero out any blocks which _were_ allocated.
>*/
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr, 0, PAGE_CACHE_SIZE);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
>   SetPageUptodate(page);
>   set_page_dirty(page);
>   return ret;
> @@ -2405,7 +2389,6 @@
>   loff_t i_size = i_size_read(inode);
>   const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
>   unsigned offset;
> - void *kaddr;
>   int ret;
>  
>   /* Is the page fully inside i_size? */
> @@ -2436,10 +2419,7 @@
>* the  page size, the remaining memory is zeroed when mapped, and
>* writes to that region are not written out to the file."
>*/
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
>  out:
>   ret = mpage_writepage(page, get_block, wbc);
>   if (ret == -EAGAIN)
> @@ -2460,7 +2440,6 @@
>   unsigned to;
>   struct page *page;
>   const struct address_space_operations *a_ops = mapping->a_ops;
> - char *kaddr;
>   int ret = 0;
>  
>   if ((offset & (blocksize - 1)) == 0)
> @@ 

Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-02-02 Thread Christoph Hellwig
On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
> > Yup.  Even better, use clear_highpage().
> 
> For even more goodness, clearmem_highpage_flush() does exactly
> the right thing for partial page zeroing ;)

Note that there are tons of places in buffer.c that could use
clearmem_highpage_flush().  See the so far untested patch below:


Index: linux-2.6/fs/buffer.c
===
--- linux-2.6.orig/fs/buffer.c  2007-02-02 12:53:51.0 +0100
+++ linux-2.6/fs/buffer.c   2007-02-02 12:59:42.0 +0100
@@ -1858,13 +1858,8 @@
if (block_start >= to)
break;
if (buffer_new(bh)) {
-   void *kaddr;
-
clear_buffer_new(bh);
-   kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr+block_start, 0, bh->b_size);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, block_start, bh->b_size);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
@@ -1952,10 +1947,8 @@
SetPageError(page);
}
if (!buffer_mapped(bh)) {
-   void *kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr + i * blocksize, 0, blocksize);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, i * blocksize,
+   blocksize);
if (!err)
set_buffer_uptodate(bh);
continue;
@@ -2098,7 +2091,6 @@
long status;
unsigned zerofrom;
unsigned blocksize = 1 << inode->i_blkbits;
-   void *kaddr;
 
while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
status = -ENOMEM;
@@ -2120,10 +2112,8 @@
PAGE_CACHE_SIZE, get_block);
if (status)
goto out_unmap;
-   kaddr = kmap_atomic(new_page, KM_USER0);
-   memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
-   flush_dcache_page(new_page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, zerofrom,
+   PAGE_CACHE_SIZE - zerofrom);
generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
unlock_page(new_page);
page_cache_release(new_page);
@@ -2150,10 +2140,7 @@
if (status)
goto out1;
if (zerofrom < offset) {
-   kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr+zerofrom, 0, offset-zerofrom);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, zerofrom, offset - zerofrom);
__block_commit_write(inode, page, zerofrom, offset);
}
return 0;
@@ -2368,10 +2355,7 @@
 * Error recovery is pretty slack.  Clear the page and mark it dirty
 * so we'll later zero out any blocks which _were_ allocated.
 */
-   kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr, 0, PAGE_CACHE_SIZE);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
set_page_dirty(page);
return ret;
@@ -2405,7 +2389,6 @@
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
-   void *kaddr;
int ret;
 
/* Is the page fully inside i_size? */
@@ -2436,10 +2419,7 @@
 * the  page size, the remaining memory is zeroed when mapped, and
 * writes to that region are not written out to the file."
 */
-   kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
 out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
@@ -2460,7 +2440,6 @@
unsigned to;
struct page *page;
const struct address_space_operations *a_ops = mapping->a_ops;
-   char *kaddr;
int ret = 0;
 
if ((offset & (blocksize - 1)) == 0)
@@ -2474,10 +2453,7 @@
to = (offset + blocksize) & ~(blocksize - 1);
ret = a_ops->prepare_write(NULL, page, offset, to);
if (ret == 0) {
-  

Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-02-02 Thread Christoph Hellwig
On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
  Yup.  Even better, use clear_highpage().
 
 For even more goodness, clearmem_highpage_flush() does exactly
 the right thing for partial page zeroing ;)

Note that there are tons of places in buffer.c that could use
clearmem_highpage_flush().  See the so far untested patch below:


Index: linux-2.6/fs/buffer.c
===
--- linux-2.6.orig/fs/buffer.c  2007-02-02 12:53:51.0 +0100
+++ linux-2.6/fs/buffer.c   2007-02-02 12:59:42.0 +0100
@@ -1858,13 +1858,8 @@
if (block_start = to)
break;
if (buffer_new(bh)) {
-   void *kaddr;
-
clear_buffer_new(bh);
-   kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr+block_start, 0, bh-b_size);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, block_start, bh-b_size);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
@@ -1952,10 +1947,8 @@
SetPageError(page);
}
if (!buffer_mapped(bh)) {
-   void *kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr + i * blocksize, 0, blocksize);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, i * blocksize,
+   blocksize);
if (!err)
set_buffer_uptodate(bh);
continue;
@@ -2098,7 +2091,6 @@
long status;
unsigned zerofrom;
unsigned blocksize = 1  inode-i_blkbits;
-   void *kaddr;
 
while(page-index  (pgpos = *bytesPAGE_CACHE_SHIFT)) {
status = -ENOMEM;
@@ -2120,10 +2112,8 @@
PAGE_CACHE_SIZE, get_block);
if (status)
goto out_unmap;
-   kaddr = kmap_atomic(new_page, KM_USER0);
-   memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
-   flush_dcache_page(new_page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, zerofrom,
+   PAGE_CACHE_SIZE - zerofrom);
generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
unlock_page(new_page);
page_cache_release(new_page);
@@ -2150,10 +2140,7 @@
if (status)
goto out1;
if (zerofrom  offset) {
-   kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr+zerofrom, 0, offset-zerofrom);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, zerofrom, offset - zerofrom);
__block_commit_write(inode, page, zerofrom, offset);
}
return 0;
@@ -2368,10 +2355,7 @@
 * Error recovery is pretty slack.  Clear the page and mark it dirty
 * so we'll later zero out any blocks which _were_ allocated.
 */
-   kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr, 0, PAGE_CACHE_SIZE);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
set_page_dirty(page);
return ret;
@@ -2405,7 +2389,6 @@
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size  PAGE_CACHE_SHIFT;
unsigned offset;
-   void *kaddr;
int ret;
 
/* Is the page fully inside i_size? */
@@ -2436,10 +2419,7 @@
 * the  page size, the remaining memory is zeroed when mapped, and
 * writes to that region are not written out to the file.
 */
-   kaddr = kmap_atomic(page, KM_USER0);
-   memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-   flush_dcache_page(page);
-   kunmap_atomic(kaddr, KM_USER0);
+   memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
 out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
@@ -2460,7 +2440,6 @@
unsigned to;
struct page *page;
const struct address_space_operations *a_ops = mapping-a_ops;
-   char *kaddr;
int ret = 0;
 
if ((offset  (blocksize - 1)) == 0)
@@ -2474,10 +2453,7 @@
to = (offset + blocksize)  ~(blocksize - 1);
ret = a_ops-prepare_write(NULL, page, offset, to);
if (ret == 0) {
-   kaddr = 

Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-02-02 Thread Matt Mackall
On Fri, Feb 02, 2007 at 12:05:11PM +, Christoph Hellwig wrote:
 On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
   Yup.  Even better, use clear_highpage().
  
  For even more goodness, clearmem_highpage_flush() does exactly
  the right thing for partial page zeroing ;)
 
 Note that there are tons of places in buffer.c that could use
 clearmem_highpage_flush().  See the so far untested patch below:
 

You probably need s/memclear/clearmem/g..
 
 Index: linux-2.6/fs/buffer.c
 ===
 --- linux-2.6.orig/fs/buffer.c2007-02-02 12:53:51.0 +0100
 +++ linux-2.6/fs/buffer.c 2007-02-02 12:59:42.0 +0100
 @@ -1858,13 +1858,8 @@
   if (block_start = to)
   break;
   if (buffer_new(bh)) {
 - void *kaddr;
 -
   clear_buffer_new(bh);
 - kaddr = kmap_atomic(page, KM_USER0);
 - memset(kaddr+block_start, 0, bh-b_size);
 - flush_dcache_page(page);
 - kunmap_atomic(kaddr, KM_USER0);
 + memclear_highpage_flush(page, block_start, bh-b_size);
   set_buffer_uptodate(bh);
   mark_buffer_dirty(bh);
   }
 @@ -1952,10 +1947,8 @@
   SetPageError(page);
   }
   if (!buffer_mapped(bh)) {
 - void *kaddr = kmap_atomic(page, KM_USER0);
 - memset(kaddr + i * blocksize, 0, blocksize);
 - flush_dcache_page(page);
 - kunmap_atomic(kaddr, KM_USER0);
 + memclear_highpage_flush(page, i * blocksize,
 + blocksize);
   if (!err)
   set_buffer_uptodate(bh);
   continue;
 @@ -2098,7 +2091,6 @@
   long status;
   unsigned zerofrom;
   unsigned blocksize = 1  inode-i_blkbits;
 - void *kaddr;
  
   while(page-index  (pgpos = *bytesPAGE_CACHE_SHIFT)) {
   status = -ENOMEM;
 @@ -2120,10 +2112,8 @@
   PAGE_CACHE_SIZE, get_block);
   if (status)
   goto out_unmap;
 - kaddr = kmap_atomic(new_page, KM_USER0);
 - memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
 - flush_dcache_page(new_page);
 - kunmap_atomic(kaddr, KM_USER0);
 + memclear_highpage_flush(page, zerofrom,
 + PAGE_CACHE_SIZE - zerofrom);
   generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
   unlock_page(new_page);
   page_cache_release(new_page);
 @@ -2150,10 +2140,7 @@
   if (status)
   goto out1;
   if (zerofrom  offset) {
 - kaddr = kmap_atomic(page, KM_USER0);
 - memset(kaddr+zerofrom, 0, offset-zerofrom);
 - flush_dcache_page(page);
 - kunmap_atomic(kaddr, KM_USER0);
 + memclear_highpage_flush(page, zerofrom, offset - zerofrom);
   __block_commit_write(inode, page, zerofrom, offset);
   }
   return 0;
 @@ -2368,10 +2355,7 @@
* Error recovery is pretty slack.  Clear the page and mark it dirty
* so we'll later zero out any blocks which _were_ allocated.
*/
 - kaddr = kmap_atomic(page, KM_USER0);
 - memset(kaddr, 0, PAGE_CACHE_SIZE);
 - flush_dcache_page(page);
 - kunmap_atomic(kaddr, KM_USER0);
 + memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
   SetPageUptodate(page);
   set_page_dirty(page);
   return ret;
 @@ -2405,7 +2389,6 @@
   loff_t i_size = i_size_read(inode);
   const pgoff_t end_index = i_size  PAGE_CACHE_SHIFT;
   unsigned offset;
 - void *kaddr;
   int ret;
  
   /* Is the page fully inside i_size? */
 @@ -2436,10 +2419,7 @@
* the  page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file.
*/
 - kaddr = kmap_atomic(page, KM_USER0);
 - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
 - flush_dcache_page(page);
 - kunmap_atomic(kaddr, KM_USER0);
 + memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
  out:
   ret = mpage_writepage(page, get_block, wbc);
   if (ret == -EAGAIN)
 @@ -2460,7 +2440,6 @@
   unsigned to;
   struct page *page;
   const struct address_space_operations *a_ops = mapping-a_ops;
 - char *kaddr;
   int ret = 0;
  
   if ((offset  (blocksize - 1)) == 0)
 @@ -2474,10 +2453,7 @@
   to = (offset + blocksize)  ~(blocksize - 1);
   ret = a_ops-prepare_write(NULL, page, offset, to);
   

Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-02-02 Thread David Chinner
On Fri, Feb 02, 2007 at 12:05:11PM +, Christoph Hellwig wrote:
 On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
   Yup.  Even better, use clear_highpage().
  
  For even more goodness, clearmem_highpage_flush() does exactly
  the right thing for partial page zeroing ;)
 
 Note that there are tons of places in buffer.c that could use
 clearmem_highpage_flush().  See the so far untested patch below:

Runs through XFSQA just fine. Looks good to me, Christoph.

Cheers,

Dave.
-- 
Dave Chinner
Principal Engineer
SGI Australian Software Group
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-02-02 Thread David Chinner
On Fri, Feb 02, 2007 at 01:24:40PM -0600, Matt Mackall wrote:
 On Fri, Feb 02, 2007 at 12:05:11PM +, Christoph Hellwig wrote:
  On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
Yup.  Even better, use clear_highpage().
   
   For even more goodness, clearmem_highpage_flush() does exactly
   the right thing for partial page zeroing ;)
  
  Note that there are tons of places in buffer.c that could use
  clearmem_highpage_flush().  See the so far untested patch below:
  
 
 You probably need s/memclear/clearmem/g..

Not needed - as usual, the code is right and the comments
are wrong. ;)

Cheers,

Dave.
-- 
Dave Chinner
Principal Engineer
SGI Australian Software Group
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-30 Thread David Chinner
On Tue, Jan 30, 2007 at 05:11:32PM -0800, Andrew Morton wrote:
> On Wed, 31 Jan 2007 11:44:36 +1100
> David Chinner <[EMAIL PROTECTED]> wrote:
> 
> > On Mon, Jan 29, 2007 at 06:15:57PM -0800, Andrew Morton wrote:
> > > We still don't know what is the source of kmap() activity which
> > > necessitated this patch btw.  AFAIK the busiest source is ext2 
> > > directories,
> > > but perhaps NFS under certain conditions?
> > > 
> > > 
> > > 
> > > ->prepare_write no longer requires that the caller kmap the page.
> > 
> > Agreed, but don't we (xfs_iozero) have to map it first to zero it?
> > 
> > I think what you are saying here, Andrew, is that we can
> > do something like:
> > 
> > page = grab_cache_page
> > ->prepare_write(page)
> > kaddr = kmap_atomic(page, KM_USER0)
> > memset(kaddr+offset, 0, bytes)
> > flush_dcache_page(page)
> > kunmap_atomic(kaddr, KM_USER0)
> > ->commit_write(page)
> > 
> > to avoid using kmap() altogether?
> 
> Yup.  Even better, use clear_highpage().

For even more goodness, clearmem_highpage_flush() does exactly
the right thing for partial page zeroing ;)

Thanks, Andrew, I've added a patch to my QA tree with this mod.

Cheers,

Dave.
-- 
Dave Chinner
Principal Engineer
SGI Australian Software Group
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-30 Thread Andrew Morton
On Wed, 31 Jan 2007 11:44:36 +1100
David Chinner <[EMAIL PROTECTED]> wrote:

> On Mon, Jan 29, 2007 at 06:15:57PM -0800, Andrew Morton wrote:
> > We still don't know what is the source of kmap() activity which
> > necessitated this patch btw.  AFAIK the busiest source is ext2 directories,
> > but perhaps NFS under certain conditions?
> > 
> > 
> > 
> > ->prepare_write no longer requires that the caller kmap the page.
> 
> Agreed, but don't we (xfs_iozero) have to map it first to zero it?
> 
> I think what you are saying here, Andrew, is that we can
> do something like:
> 
>   page = grab_cache_page
>   ->prepare_write(page)
>   kaddr = kmap_atomic(page, KM_USER0)
>   memset(kaddr+offset, 0, bytes)
>   flush_dcache_page(page)
>   kunmap_atomic(kaddr, KM_USER0)
>   ->commit_write(page)
> 
> to avoid using kmap() altogether?
> 

Yup.  Even better, use clear_highpage().
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-30 Thread David Chinner
On Mon, Jan 29, 2007 at 06:15:57PM -0800, Andrew Morton wrote:
> We still don't know what is the source of kmap() activity which
> necessitated this patch btw.  AFAIK the busiest source is ext2 directories,
> but perhaps NFS under certain conditions?
> 
> 
> 
> ->prepare_write no longer requires that the caller kmap the page.

Agreed, but don't we (xfs_iozero) have to map it first to zero it?

I think what you are saying here, Andrew, is that we can
do something like:

page = grab_cache_page
->prepare_write(page)
kaddr = kmap_atomic(page, KM_USER0)
memset(kaddr+offset, 0, bytes)
flush_dcache_page(page)
kunmap_atomic(kaddr, KM_USER0)
->commit_write(page)

to avoid using kmap() altogether?

Cheers,

Dave.
-- 
Dave Chinner
Principal Engineer
SGI Australian Software Group
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-30 Thread David Chinner
On Mon, Jan 29, 2007 at 06:15:57PM -0800, Andrew Morton wrote:
 We still don't know what is the source of kmap() activity which
 necessitated this patch btw.  AFAIK the busiest source is ext2 directories,
 but perhaps NFS under certain conditions?
 
 looks at xfs_iozero
 
 -prepare_write no longer requires that the caller kmap the page.

Agreed, but don't we (xfs_iozero) have to map it first to zero it?

I think what you are saying here, Andrew, is that we can
do something like:

page = grab_cache_page
-prepare_write(page)
kaddr = kmap_atomic(page, KM_USER0)
memset(kaddr+offset, 0, bytes)
flush_dcache_page(page)
kunmap_atomic(kaddr, KM_USER0)
-commit_write(page)

to avoid using kmap() altogether?

Cheers,

Dave.
-- 
Dave Chinner
Principal Engineer
SGI Australian Software Group
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-30 Thread Andrew Morton
On Wed, 31 Jan 2007 11:44:36 +1100
David Chinner [EMAIL PROTECTED] wrote:

 On Mon, Jan 29, 2007 at 06:15:57PM -0800, Andrew Morton wrote:
  We still don't know what is the source of kmap() activity which
  necessitated this patch btw.  AFAIK the busiest source is ext2 directories,
  but perhaps NFS under certain conditions?
  
  looks at xfs_iozero
  
  -prepare_write no longer requires that the caller kmap the page.
 
 Agreed, but don't we (xfs_iozero) have to map it first to zero it?
 
 I think what you are saying here, Andrew, is that we can
 do something like:
 
   page = grab_cache_page
   -prepare_write(page)
   kaddr = kmap_atomic(page, KM_USER0)
   memset(kaddr+offset, 0, bytes)
   flush_dcache_page(page)
   kunmap_atomic(kaddr, KM_USER0)
   -commit_write(page)
 
 to avoid using kmap() altogether?
 

Yup.  Even better, use clear_highpage().
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-30 Thread David Chinner
On Tue, Jan 30, 2007 at 05:11:32PM -0800, Andrew Morton wrote:
 On Wed, 31 Jan 2007 11:44:36 +1100
 David Chinner [EMAIL PROTECTED] wrote:
 
  On Mon, Jan 29, 2007 at 06:15:57PM -0800, Andrew Morton wrote:
   We still don't know what is the source of kmap() activity which
   necessitated this patch btw.  AFAIK the busiest source is ext2 
   directories,
   but perhaps NFS under certain conditions?
   
   looks at xfs_iozero
   
   -prepare_write no longer requires that the caller kmap the page.
  
  Agreed, but don't we (xfs_iozero) have to map it first to zero it?
  
  I think what you are saying here, Andrew, is that we can
  do something like:
  
  page = grab_cache_page
  -prepare_write(page)
  kaddr = kmap_atomic(page, KM_USER0)
  memset(kaddr+offset, 0, bytes)
  flush_dcache_page(page)
  kunmap_atomic(kaddr, KM_USER0)
  -commit_write(page)
  
  to avoid using kmap() altogether?
 
 Yup.  Even better, use clear_highpage().

For even more goodness, clearmem_highpage_flush() does exactly
the right thing for partial page zeroing ;)

Thanks, Andrew, I've added a patch to my QA tree with this mod.

Cheers,

Dave.
-- 
Dave Chinner
Principal Engineer
SGI Australian Software Group
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Andrew Morton
On Mon, 29 Jan 2007 17:49:14 -0800
"Martin J. Bligh" <[EMAIL PROTECTED]> wrote:

> Andrew Morton wrote:
> > On Mon, 29 Jan 2007 17:31:20 -0800
> > "Martin J. Bligh" <[EMAIL PROTECTED]> wrote:
> > 
> >> Peter Zijlstra wrote:
> >>> On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:
> >>>
>  As Christoph says, it's very much preferred that code be migrated over to
>  kmap_atomic().  Partly because kmap() is deadlockable in situations 
>  where a
>  large number of threads are trying to take two kmaps at the same time and
>  we run out.  This happened in the past, but incidences have gone away,
>  probably because of kmap->kmap_atomic conversions.
>  From which callsite have you measured problems?
> >>> CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
> >>> mainline.
> >>>
> >> CONFIG_HIGHPTE is always horrid -we've known that for years.
> > 
> > We have?  What's wrong with it?  
> 
> http://www.ussg.iu.edu/hypermail/linux/kernel/0307.0/0463.html

2% overhead for a pte-intensive workload for unknown reasons four years
ago.  Sort of a mini-horrid, no?

We still don't know what is the source of kmap() activity which
necessitated this patch btw.  AFAIK the busiest source is ext2 directories,
but perhaps NFS under certain conditions?



->prepare_write no longer requires that the caller kmap the page.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Nick Piggin

Ingo Molnar wrote:

For every 64-bit Fedora box there's more than seven 32-bit boxes. I 
think 32-bit is going to live with us far longer than many thought, so 
we might as well make it work better. Both HIGHMEM and HIGHPTE is the 
default on many distro kernels, which pushes the kmap infrastructure 
quite a bit.


I don't think anybody would argue against numbers, but just that there
are not many big 32-bit SMPs anymore. And if Bill Irwin didn't fix the
kmap problem back then, it would be interesting to see a system and
workload where it actually is a bottleneck.

Not that I'm against any patch to improve scalability, if it doesn't
hurt single-threaded performance ;)

the problem is that everything that was easy to migrate was migrated off 
kmap() already - and it's exactly those hard cases that cannot be 
converted (like the pagecache use) which is the most frequent kmap() 
users.


Which pagecache use? file_read_actor()?

--
SUSE Labs, Novell Inc.
Send instant messages to your online friends http://au.messenger.yahoo.com 


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Martin J. Bligh

Andrew Morton wrote:

On Mon, 29 Jan 2007 17:31:20 -0800
"Martin J. Bligh" <[EMAIL PROTECTED]> wrote:


Peter Zijlstra wrote:

On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:


As Christoph says, it's very much preferred that code be migrated over to
kmap_atomic().  Partly because kmap() is deadlockable in situations where a
large number of threads are trying to take two kmaps at the same time and
we run out.  This happened in the past, but incidences have gone away,
probably because of kmap->kmap_atomic conversions.
From which callsite have you measured problems?

CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
mainline.


CONFIG_HIGHPTE is always horrid -we've known that for years.


We have?  What's wrong with it?  


http://www.ussg.iu.edu/hypermail/linux/kernel/0307.0/0463.html

July 2003.


M.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Andrew Morton
On Mon, 29 Jan 2007 17:31:20 -0800
"Martin J. Bligh" <[EMAIL PROTECTED]> wrote:

> Peter Zijlstra wrote:
> > On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:
> > 
> >> As Christoph says, it's very much preferred that code be migrated over to
> >> kmap_atomic().  Partly because kmap() is deadlockable in situations where a
> >> large number of threads are trying to take two kmaps at the same time and
> >> we run out.  This happened in the past, but incidences have gone away,
> >> probably because of kmap->kmap_atomic conversions.
> > 
> >> From which callsite have you measured problems?
> > 
> > CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
> > mainline.
> > 
> 
> CONFIG_HIGHPTE is always horrid -we've known that for years.

We have?  What's wrong with it?  

> Don't use it.
> 
> If that's all we're fixing here, I'd be highly suspect ...

highpte uses atomic kmaps - it is unrelated to this work.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Martin J. Bligh

Peter Zijlstra wrote:

On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:


As Christoph says, it's very much preferred that code be migrated over to
kmap_atomic().  Partly because kmap() is deadlockable in situations where a
large number of threads are trying to take two kmaps at the same time and
we run out.  This happened in the past, but incidences have gone away,
probably because of kmap->kmap_atomic conversions.



From which callsite have you measured problems?


CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
mainline.



CONFIG_HIGHPTE is always horrid -we've known that for years.
Don't use it.

If that's all we're fixing here, I'd be highly suspect ...

M.

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Ingo Molnar

* Ingo Molnar <[EMAIL PROTECTED]> wrote:

> Here are the numbers that i think changes the picture:

i forgot to explain them:

current (estimated) total installed base of 32-bit (i686) Fedora:

>  http://www.fedoraproject.org/awstats/stats/updates-released-fc6-i386.total

current (estimated) total installed base of 64-bit (x86_64) Fedora:

>  http://www.fedoraproject.org/awstats/stats/updates-released-fc6-x86_64.total

current (estimated) total installed base of PPC(64) Fedora:

>  http://www.fedoraproject.org/awstats/stats/updates-released-fc6-ppc.total

these are updated daily i think. The counters started late October 2006, 
when FC6 was released.

Ingo
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Ingo Molnar

* Hugh Dickins <[EMAIL PROTECTED]> wrote:

> > For every 64-bit Fedora box there's more than seven 32-bit boxes. I 
> > think 32-bit is going to live with us far longer than many thought, 
> > so we might as well make it work better. Both HIGHMEM and HIGHPTE is 
> > the default on many distro kernels, which pushes the kmap 
> > infrastructure quite a bit.
> 
> But HIGHPTE uses kmap_atomic (in mainline: does -rt use kmap there?)

The contention i saw was on mainline and in the pagecache uses of 
kmap(). With HIGHPTE i only meant that typically every available highmem 
option is enabled on 32-bit distro kernel rpms, to make it work on as 
wide selection of hardware as possible. Sometimes PAE is split into a 
separate rpm, but mostly there's just one 32-bit kernel.

Ingo
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Hugh Dickins
On Mon, 29 Jan 2007, Ingo Molnar wrote:
> 
> For every 64-bit Fedora box there's more than seven 32-bit boxes. I 
> think 32-bit is going to live with us far longer than many thought, so 
> we might as well make it work better. Both HIGHMEM and HIGHPTE is the 
> default on many distro kernels, which pushes the kmap infrastructure 
> quite a bit.

But HIGHPTE uses kmap_atomic (in mainline: does -rt use kmap there?)

Hugh
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Ingo Molnar

* Andrew Morton <[EMAIL PROTECTED]> wrote:

> > Eradicate global locks.
> > 
> >  - kmap_lock is removed by extensive use of atomic_t, a new flush
> >scheme and modifying set_page_address to only allow NULL<->virt
> >transitions.

> I really don't recall any performance problems being reported out of 
> that code in recent years.

well, almost nobody profiles 32-bit boxes. I personally always knew that 
kmap() sucks on certain 32-bit SMP workloads (and -rt's scheduling model 
makes such bottlenecks even more apparent) - but many people acted in 
the belief that 64-bit is all that matters and 32-bit scalability is 
obsolete. Here are the numbers that i think changes the picture:

 http://www.fedoraproject.org/awstats/stats/updates-released-fc6-i386.total
 http://www.fedoraproject.org/awstats/stats/updates-released-fc6-x86_64.total

For every 64-bit Fedora box there's more than seven 32-bit boxes. I 
think 32-bit is going to live with us far longer than many thought, so 
we might as well make it work better. Both HIGHMEM and HIGHPTE is the 
default on many distro kernels, which pushes the kmap infrastructure 
quite a bit.

> As Christoph says, it's very much preferred that code be migrated over 
> to kmap_atomic().  Partly because kmap() is deadlockable in situations 
> where a large number of threads are trying to take two kmaps at the 
> same time and we run out.  This happened in the past, but incidences 
> have gone away, probably because of kmap->kmap_atomic conversions.

the problem is that everything that was easy to migrate was migrated off 
kmap() already - and it's exactly those hard cases that cannot be 
converted (like the pagecache use) which is the most frequent kmap() 
users.

While "it would be nice" to eliminate kmap(), but reality is that it's 
here and the patches from Peter to make it (quite a bit) more scalable 
are here as well.

plus, with these fixes kmap() is actually faster than kmap_atomic(). 
(because kunmap_atomic() necessiates an INVLPG instruction which is 
quite slow.)

Ingo
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Peter Zijlstra
On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:

> As Christoph says, it's very much preferred that code be migrated over to
> kmap_atomic().  Partly because kmap() is deadlockable in situations where a
> large number of threads are trying to take two kmaps at the same time and
> we run out.  This happened in the past, but incidences have gone away,
> probably because of kmap->kmap_atomic conversions.

> From which callsite have you measured problems?

CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
mainline.

> > Index: linux/include/linux/mm.h
> > ===
> > --- linux.orig/include/linux/mm.h
> > +++ linux/include/linux/mm.h
> > @@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
> >  #endif
> >  
> >  #if defined(WANT_PAGE_VIRTUAL)
> > -#define page_address(page) ((page)->virtual)
> > -#define set_page_address(page, address)\
> > -   do {\
> > -   (page)->virtual = (address);\
> > -   } while(0)
> > -#define page_address_init()  do { } while(0)
> > +/*
> > + * wrap page->virtual so it is safe to set/read locklessly
> > + */
> > +#define page_address(page) \
> > +   ({ typeof((page)->virtual) v = (page)->virtual; \
> > +smp_read_barrier_depends(); \
> > +v; })
> > +
> > +static inline int set_page_address(struct page *page, void *address)
> > +{
> > +   if (address)
> > +   return cmpxchg(>virtual, NULL, address) == NULL;
> > +   else {
> > +   /*
> > +* cmpxchg is a bit abused because it is not guaranteed
> > +* safe wrt direct assignment on all platforms.
> > +*/
> > +   void *virt = page->virtual;
> > +   return cmpxchg(>vitrual, virt, NULL) == virt;
> > +   }
> > +}
> 
> Have you verified that all architectures which can implement
> WANT_PAGE_VIRTUAL also implement cmpxchg?

It might have been my mistaken in understanding the latest cmpxchg
thread. My understanding was that since LL/SC is not exposable as a low
level primitive all platforms should implement a cmpxchg where some
would not be save against direct assignment.

Anyway, I'll do as Nick says and replace it with atomic_long_cmpxchg.

> Have you verified that sufficient headers are included for this to compile
> correctly on all WANT_PAGE_VIRTUAL-enabling architectures on all configs? 
> I don't see asm/system.h being included in mm.h and if I get yet another
> damned it-wont-compile patch I might do something irreversible.

Point taken.

> > +static int pkmap_get_free(void)
> >  {
> > -   unsigned long vaddr;
> > -   int count;
> > +   int i, pos, flush;
> > +   DECLARE_WAITQUEUE(wait, current);
> >  
> > -start:
> > -   count = LAST_PKMAP;
> > -   /* Find an empty entry */
> > -   for (;;) {
> > -   last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
> 
> The old code used masking.
> 
> > -   if (!last_pkmap_nr) {
> > -   flush_all_zero_pkmaps();
> > -   count = LAST_PKMAP;
> > -   }
> > -   if (!pkmap_count[last_pkmap_nr])
> > -   break;  /* Found a usable entry */
> > -   if (--count)
> > -   continue;
> > +restart:
> > +   for (i = 0; i < LAST_PKMAP; i++) {
> > +   pos = atomic_inc_return(_hand) % LAST_PKMAP;
> 
> The new code does more-expensive modulus.  Necessary?

I thought GCC would automagically use masking when presented with a
power-of-two constant. Can make it more explicit though.

> > +   flush = pkmap_try_free(pos);
> > +   if (flush >= 0)
> > +   goto got_one;
> > +   }
> > +
> > +   /*
> > +* wait for somebody else to unmap their entries
> > +*/
> > +   __set_current_state(TASK_UNINTERRUPTIBLE);
> > +   add_wait_queue(_map_wait, );
> > +   schedule();
> > +   remove_wait_queue(_map_wait, );
> 
> This looks wrong.  What happens if everyone else does their unmap between
> the __set_current_state() and the add_wait_queue()?

Eek, you are quite right.

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Peter Zijlstra
On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:

 As Christoph says, it's very much preferred that code be migrated over to
 kmap_atomic().  Partly because kmap() is deadlockable in situations where a
 large number of threads are trying to take two kmaps at the same time and
 we run out.  This happened in the past, but incidences have gone away,
 probably because of kmap-kmap_atomic conversions.

 From which callsite have you measured problems?

CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
mainline.

  Index: linux/include/linux/mm.h
  ===
  --- linux.orig/include/linux/mm.h
  +++ linux/include/linux/mm.h
  @@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
   #endif
   
   #if defined(WANT_PAGE_VIRTUAL)
  -#define page_address(page) ((page)-virtual)
  -#define set_page_address(page, address)\
  -   do {\
  -   (page)-virtual = (address);\
  -   } while(0)
  -#define page_address_init()  do { } while(0)
  +/*
  + * wrap page-virtual so it is safe to set/read locklessly
  + */
  +#define page_address(page) \
  +   ({ typeof((page)-virtual) v = (page)-virtual; \
  +smp_read_barrier_depends(); \
  +v; })
  +
  +static inline int set_page_address(struct page *page, void *address)
  +{
  +   if (address)
  +   return cmpxchg(page-virtual, NULL, address) == NULL;
  +   else {
  +   /*
  +* cmpxchg is a bit abused because it is not guaranteed
  +* safe wrt direct assignment on all platforms.
  +*/
  +   void *virt = page-virtual;
  +   return cmpxchg(page-vitrual, virt, NULL) == virt;
  +   }
  +}
 
 Have you verified that all architectures which can implement
 WANT_PAGE_VIRTUAL also implement cmpxchg?

It might have been my mistaken in understanding the latest cmpxchg
thread. My understanding was that since LL/SC is not exposable as a low
level primitive all platforms should implement a cmpxchg where some
would not be save against direct assignment.

Anyway, I'll do as Nick says and replace it with atomic_long_cmpxchg.

 Have you verified that sufficient headers are included for this to compile
 correctly on all WANT_PAGE_VIRTUAL-enabling architectures on all configs? 
 I don't see asm/system.h being included in mm.h and if I get yet another
 damned it-wont-compile patch I might do something irreversible.

Point taken.

  +static int pkmap_get_free(void)
   {
  -   unsigned long vaddr;
  -   int count;
  +   int i, pos, flush;
  +   DECLARE_WAITQUEUE(wait, current);
   
  -start:
  -   count = LAST_PKMAP;
  -   /* Find an empty entry */
  -   for (;;) {
  -   last_pkmap_nr = (last_pkmap_nr + 1)  LAST_PKMAP_MASK;
 
 The old code used masking.
 
  -   if (!last_pkmap_nr) {
  -   flush_all_zero_pkmaps();
  -   count = LAST_PKMAP;
  -   }
  -   if (!pkmap_count[last_pkmap_nr])
  -   break;  /* Found a usable entry */
  -   if (--count)
  -   continue;
  +restart:
  +   for (i = 0; i  LAST_PKMAP; i++) {
  +   pos = atomic_inc_return(pkmap_hand) % LAST_PKMAP;
 
 The new code does more-expensive modulus.  Necessary?

I thought GCC would automagically use masking when presented with a
power-of-two constant. Can make it more explicit though.

  +   flush = pkmap_try_free(pos);
  +   if (flush = 0)
  +   goto got_one;
  +   }
  +
  +   /*
  +* wait for somebody else to unmap their entries
  +*/
  +   __set_current_state(TASK_UNINTERRUPTIBLE);
  +   add_wait_queue(pkmap_map_wait, wait);
  +   schedule();
  +   remove_wait_queue(pkmap_map_wait, wait);
 
 This looks wrong.  What happens if everyone else does their unmap between
 the __set_current_state() and the add_wait_queue()?

Eek, you are quite right.

-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Ingo Molnar

* Andrew Morton [EMAIL PROTECTED] wrote:

  Eradicate global locks.
  
   - kmap_lock is removed by extensive use of atomic_t, a new flush
 scheme and modifying set_page_address to only allow NULL-virt
 transitions.

 I really don't recall any performance problems being reported out of 
 that code in recent years.

well, almost nobody profiles 32-bit boxes. I personally always knew that 
kmap() sucks on certain 32-bit SMP workloads (and -rt's scheduling model 
makes such bottlenecks even more apparent) - but many people acted in 
the belief that 64-bit is all that matters and 32-bit scalability is 
obsolete. Here are the numbers that i think changes the picture:

 http://www.fedoraproject.org/awstats/stats/updates-released-fc6-i386.total
 http://www.fedoraproject.org/awstats/stats/updates-released-fc6-x86_64.total

For every 64-bit Fedora box there's more than seven 32-bit boxes. I 
think 32-bit is going to live with us far longer than many thought, so 
we might as well make it work better. Both HIGHMEM and HIGHPTE is the 
default on many distro kernels, which pushes the kmap infrastructure 
quite a bit.

 As Christoph says, it's very much preferred that code be migrated over 
 to kmap_atomic().  Partly because kmap() is deadlockable in situations 
 where a large number of threads are trying to take two kmaps at the 
 same time and we run out.  This happened in the past, but incidences 
 have gone away, probably because of kmap-kmap_atomic conversions.

the problem is that everything that was easy to migrate was migrated off 
kmap() already - and it's exactly those hard cases that cannot be 
converted (like the pagecache use) which is the most frequent kmap() 
users.

While it would be nice to eliminate kmap(), but reality is that it's 
here and the patches from Peter to make it (quite a bit) more scalable 
are here as well.

plus, with these fixes kmap() is actually faster than kmap_atomic(). 
(because kunmap_atomic() necessiates an INVLPG instruction which is 
quite slow.)

Ingo
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Hugh Dickins
On Mon, 29 Jan 2007, Ingo Molnar wrote:
 
 For every 64-bit Fedora box there's more than seven 32-bit boxes. I 
 think 32-bit is going to live with us far longer than many thought, so 
 we might as well make it work better. Both HIGHMEM and HIGHPTE is the 
 default on many distro kernels, which pushes the kmap infrastructure 
 quite a bit.

But HIGHPTE uses kmap_atomic (in mainline: does -rt use kmap there?)

Hugh
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Ingo Molnar

* Hugh Dickins [EMAIL PROTECTED] wrote:

  For every 64-bit Fedora box there's more than seven 32-bit boxes. I 
  think 32-bit is going to live with us far longer than many thought, 
  so we might as well make it work better. Both HIGHMEM and HIGHPTE is 
  the default on many distro kernels, which pushes the kmap 
  infrastructure quite a bit.
 
 But HIGHPTE uses kmap_atomic (in mainline: does -rt use kmap there?)

The contention i saw was on mainline and in the pagecache uses of 
kmap(). With HIGHPTE i only meant that typically every available highmem 
option is enabled on 32-bit distro kernel rpms, to make it work on as 
wide selection of hardware as possible. Sometimes PAE is split into a 
separate rpm, but mostly there's just one 32-bit kernel.

Ingo
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Ingo Molnar

* Ingo Molnar [EMAIL PROTECTED] wrote:

 Here are the numbers that i think changes the picture:

i forgot to explain them:

current (estimated) total installed base of 32-bit (i686) Fedora:

  http://www.fedoraproject.org/awstats/stats/updates-released-fc6-i386.total

current (estimated) total installed base of 64-bit (x86_64) Fedora:

  http://www.fedoraproject.org/awstats/stats/updates-released-fc6-x86_64.total

current (estimated) total installed base of PPC(64) Fedora:

  http://www.fedoraproject.org/awstats/stats/updates-released-fc6-ppc.total

these are updated daily i think. The counters started late October 2006, 
when FC6 was released.

Ingo
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Martin J. Bligh

Peter Zijlstra wrote:

On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:


As Christoph says, it's very much preferred that code be migrated over to
kmap_atomic().  Partly because kmap() is deadlockable in situations where a
large number of threads are trying to take two kmaps at the same time and
we run out.  This happened in the past, but incidences have gone away,
probably because of kmap-kmap_atomic conversions.



From which callsite have you measured problems?


CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
mainline.



CONFIG_HIGHPTE is always horrid -we've known that for years.
Don't use it.

If that's all we're fixing here, I'd be highly suspect ...

M.

-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Andrew Morton
On Mon, 29 Jan 2007 17:31:20 -0800
Martin J. Bligh [EMAIL PROTECTED] wrote:

 Peter Zijlstra wrote:
  On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:
  
  As Christoph says, it's very much preferred that code be migrated over to
  kmap_atomic().  Partly because kmap() is deadlockable in situations where a
  large number of threads are trying to take two kmaps at the same time and
  we run out.  This happened in the past, but incidences have gone away,
  probably because of kmap-kmap_atomic conversions.
  
  From which callsite have you measured problems?
  
  CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
  mainline.
  
 
 CONFIG_HIGHPTE is always horrid -we've known that for years.

We have?  What's wrong with it?  looks around for bug reports

 Don't use it.
 
 If that's all we're fixing here, I'd be highly suspect ...

highpte uses atomic kmaps - it is unrelated to this work.
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Martin J. Bligh

Andrew Morton wrote:

On Mon, 29 Jan 2007 17:31:20 -0800
Martin J. Bligh [EMAIL PROTECTED] wrote:


Peter Zijlstra wrote:

On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:


As Christoph says, it's very much preferred that code be migrated over to
kmap_atomic().  Partly because kmap() is deadlockable in situations where a
large number of threads are trying to take two kmaps at the same time and
we run out.  This happened in the past, but incidences have gone away,
probably because of kmap-kmap_atomic conversions.
From which callsite have you measured problems?

CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
mainline.


CONFIG_HIGHPTE is always horrid -we've known that for years.


We have?  What's wrong with it?  looks around for bug reports


http://www.ussg.iu.edu/hypermail/linux/kernel/0307.0/0463.html

July 2003.


M.
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Nick Piggin

Ingo Molnar wrote:

For every 64-bit Fedora box there's more than seven 32-bit boxes. I 
think 32-bit is going to live with us far longer than many thought, so 
we might as well make it work better. Both HIGHMEM and HIGHPTE is the 
default on many distro kernels, which pushes the kmap infrastructure 
quite a bit.


I don't think anybody would argue against numbers, but just that there
are not many big 32-bit SMPs anymore. And if Bill Irwin didn't fix the
kmap problem back then, it would be interesting to see a system and
workload where it actually is a bottleneck.

Not that I'm against any patch to improve scalability, if it doesn't
hurt single-threaded performance ;)

the problem is that everything that was easy to migrate was migrated off 
kmap() already - and it's exactly those hard cases that cannot be 
converted (like the pagecache use) which is the most frequent kmap() 
users.


Which pagecache use? file_read_actor()?

--
SUSE Labs, Novell Inc.
Send instant messages to your online friends http://au.messenger.yahoo.com 


-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-29 Thread Andrew Morton
On Mon, 29 Jan 2007 17:49:14 -0800
Martin J. Bligh [EMAIL PROTECTED] wrote:

 Andrew Morton wrote:
  On Mon, 29 Jan 2007 17:31:20 -0800
  Martin J. Bligh [EMAIL PROTECTED] wrote:
  
  Peter Zijlstra wrote:
  On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:
 
  As Christoph says, it's very much preferred that code be migrated over to
  kmap_atomic().  Partly because kmap() is deadlockable in situations 
  where a
  large number of threads are trying to take two kmaps at the same time and
  we run out.  This happened in the past, but incidences have gone away,
  probably because of kmap-kmap_atomic conversions.
  From which callsite have you measured problems?
  CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
  mainline.
 
  CONFIG_HIGHPTE is always horrid -we've known that for years.
  
  We have?  What's wrong with it?  looks around for bug reports
 
 http://www.ussg.iu.edu/hypermail/linux/kernel/0307.0/0463.html

2% overhead for a pte-intensive workload for unknown reasons four years
ago.  Sort of a mini-horrid, no?

We still don't know what is the source of kmap() activity which
necessitated this patch btw.  AFAIK the busiest source is ext2 directories,
but perhaps NFS under certain conditions?

looks at xfs_iozero

-prepare_write no longer requires that the caller kmap the page.
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Nick Piggin

Andrew Morton wrote:

On Sun, 28 Jan 2007 15:11:34 +0100
Peter Zijlstra <[EMAIL PROTECTED]> wrote:



+static inline int set_page_address(struct page *page, void *address)
+{
+   if (address)
+   return cmpxchg(>virtual, NULL, address) == NULL;
+   else {
+   /*
+* cmpxchg is a bit abused because it is not guaranteed
+* safe wrt direct assignment on all platforms.
+*/
+   void *virt = page->virtual;
+   return cmpxchg(>vitrual, virt, NULL) == virt;
+   }
+}



Have you verified that all architectures which can implement
WANT_PAGE_VIRTUAL also implement cmpxchg?


Simple: we should not implement cmpxchg in generic code. We should
be able to use atomic_long_cmpxchg for this -- it will work perfectly
regardless of what anybody else tells you.

cmpxchg is only required for when that memory location may get modified
by some other means than under your control (eg. userspace, in the case
of drm, or hardware MMU in the case of Christoph's old page fault
scalability patches).

--
SUSE Labs, Novell Inc.
Send instant messages to your online friends http://au.messenger.yahoo.com 


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Andrew Morton
On Sun, 28 Jan 2007 15:11:34 +0100
Peter Zijlstra <[EMAIL PROTECTED]> wrote:

> Eradicate global locks.
> 
>  - kmap_lock is removed by extensive use of atomic_t, a new flush
>scheme and modifying set_page_address to only allow NULL<->virt
>transitions.
> 
> A count of 0 is an exclusive state acting as an entry lock. This is done
> using inc_not_zero and cmpxchg. The restriction on changing the virtual
> address closes the gap with concurrent additions of the same entry.
> 
>  - pool_lock is removed by using the pkmap index for the
>page_address_maps.
> 
> By using the pkmap index for the hash entries it is no longer needed to
> keep a free list.
> 
> This patch has been in -rt for a while but should also help regular
> highmem machines with multiple cores/cpus.

I really don't recall any performance problems being reported out of that
code in recent years.

As Christoph says, it's very much preferred that code be migrated over to
kmap_atomic().  Partly because kmap() is deadlockable in situations where a
large number of threads are trying to take two kmaps at the same time and
we run out.  This happened in the past, but incidences have gone away,
probably because of kmap->kmap_atomic conversions.

>From which callsite have you measured problems?

> Index: linux/include/linux/mm.h
> ===
> --- linux.orig/include/linux/mm.h
> +++ linux/include/linux/mm.h
> @@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
>  #endif
>  
>  #if defined(WANT_PAGE_VIRTUAL)
> -#define page_address(page) ((page)->virtual)
> -#define set_page_address(page, address)  \
> - do {\
> - (page)->virtual = (address);\
> - } while(0)
> -#define page_address_init()  do { } while(0)
> +/*
> + * wrap page->virtual so it is safe to set/read locklessly
> + */
> +#define page_address(page) \
> + ({ typeof((page)->virtual) v = (page)->virtual; \
> +  smp_read_barrier_depends(); \
> +  v; })
> +
> +static inline int set_page_address(struct page *page, void *address)
> +{
> + if (address)
> + return cmpxchg(>virtual, NULL, address) == NULL;
> + else {
> + /*
> +  * cmpxchg is a bit abused because it is not guaranteed
> +  * safe wrt direct assignment on all platforms.
> +  */
> + void *virt = page->virtual;
> + return cmpxchg(>vitrual, virt, NULL) == virt;
> + }
> +}

Have you verified that all architectures which can implement
WANT_PAGE_VIRTUAL also implement cmpxchg?

Have you verified that sufficient headers are included for this to compile
correctly on all WANT_PAGE_VIRTUAL-enabling architectures on all configs? 
I don't see asm/system.h being included in mm.h and if I get yet another
damned it-wont-compile patch I might do something irreversible.

> +static int pkmap_get_free(void)
>  {
> - unsigned long vaddr;
> - int count;
> + int i, pos, flush;
> + DECLARE_WAITQUEUE(wait, current);
>  
> -start:
> - count = LAST_PKMAP;
> - /* Find an empty entry */
> - for (;;) {
> - last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;

The old code used masking.

> - if (!last_pkmap_nr) {
> - flush_all_zero_pkmaps();
> - count = LAST_PKMAP;
> - }
> - if (!pkmap_count[last_pkmap_nr])
> - break;  /* Found a usable entry */
> - if (--count)
> - continue;
> +restart:
> + for (i = 0; i < LAST_PKMAP; i++) {
> + pos = atomic_inc_return(_hand) % LAST_PKMAP;

The new code does more-expensive modulus.  Necessary?

> + flush = pkmap_try_free(pos);
> + if (flush >= 0)
> + goto got_one;
> + }
> +
> + /*
> +  * wait for somebody else to unmap their entries
> +  */
> + __set_current_state(TASK_UNINTERRUPTIBLE);
> + add_wait_queue(_map_wait, );
> + schedule();
> + remove_wait_queue(_map_wait, );

This looks wrong.  What happens if everyone else does their unmap between
the __set_current_state() and the add_wait_queue()?


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Ingo Molnar

* Christoph Hellwig <[EMAIL PROTECTED]> wrote:

> On Sun, Jan 28, 2007 at 04:48:06PM +0100, Ingo Molnar wrote:
> > i'm sorry, but do you realize that files_lock is a global lock, 
> > triggered by /every single/ file close?
> 
> Please check which thread you're in before you start such lengthy 
> rants.

my reply applies to the other thread too, you made a similar comment 
there too:

* Christoph Hellwig <[EMAIL PROTECTED]> wrote:

> On Sun, Jan 28, 2007 at 12:51:18PM +0100, Peter Zijlstra wrote:
> > This patch-set breaks up the global file_list_lock which was found 
> > to be a severe contention point under basically any filesystem 
> > intensive workload.
>
> Benchmarks, please.  Where exactly do you see contention for this?

Ingo
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Christoph Hellwig
On Sun, Jan 28, 2007 at 04:48:06PM +0100, Ingo Molnar wrote:
> i'm sorry, but do you realize that files_lock is a global lock, 
> triggered by /every single/ file close?

Please check which thread you're in before you start such lengthy rants.

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Ingo Molnar

* Christoph Hellwig <[EMAIL PROTECTED]> wrote:

> On Sun, Jan 28, 2007 at 04:17:00PM +0100, Ingo Molnar wrote:
> > scalability. I did lock profiling on the -rt kernel, which exposes 
> > such things nicely. Half of the lock contention events during kernel 
> > compile were due to kmap(). (The system had 2 GB of RAM, so 40% 
> > lowmem, 60% highmem.)
> 
> Numbers please, and not on -rt but on mainline.  Please show the 
> profiles.

i'm sorry, but do you realize that files_lock is a global lock, 
triggered by /every single/ file close?

   " files_lock is a global lock and we touch it for every single
 sys_close() system call that the system does. "

You really dont need to be a rocket scientist to see that it's a 
globally bouncing cacheline that has a major effect on certain 
VFS-intense workloads. Peter has worked hard to eliminate its effects 
without having to couple this to an intrusive rewrite of the TTY layer.

( really, i personally find your dismissive style apalling and i think 
  such a reception of a nice patchset must be humiliating to Peter. I
  certainly try to avoid to be involved with any VFS internals, due to
  this unwelcoming tone of discussion. Had you been around when i
  started contributing to the Linux kernel i'd probably not be hacking
  the kernel today. You are a good hacker but the simultaneous
  collateral damage you are causing is significantly reducing the net
  benefit. )

> > ps. please fix your mailer to not emit Mail-Followup-To headers. In 
> > Mutt you can do this via "set followup_to=no" in your .muttrc.
> 
> I have told you last time that this is absolutely intentional and I 
> won't change it.

( You are messing up the reply headers, everyone is listed in the 'To:'
  field for any reply to your mail, instead of being added to the Cc:
  list. )

Ingo
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Christoph Hellwig
On Sun, Jan 28, 2007 at 04:17:00PM +0100, Ingo Molnar wrote:
> scalability. I did lock profiling on the -rt kernel, which exposes such 
> things nicely. Half of the lock contention events during kernel compile 
> were due to kmap(). (The system had 2 GB of RAM, so 40% lowmem, 60% 
> highmem.)

Numbers please, and not on -rt but on mainline.  Please show the profiles.

> ps. please fix your mailer to not emit Mail-Followup-To headers. In Mutt
> you can do this via "set followup_to=no" in your .muttrc.

I have told you last time that this is absolutely intentional and I won't
change it.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Ingo Molnar

* Christoph Hellwig <[EMAIL PROTECTED]> wrote:

> On Sun, Jan 28, 2007 at 03:11:34PM +0100, Peter Zijlstra wrote:
> > Eradicate global locks.
> > 
> >  - kmap_lock is removed by extensive use of atomic_t, a new flush
> >scheme and modifying set_page_address to only allow NULL<->virt
> >transitions.
> 
> What's the point for this? [...]

scalability. I did lock profiling on the -rt kernel, which exposes such 
things nicely. Half of the lock contention events during kernel compile 
were due to kmap(). (The system had 2 GB of RAM, so 40% lowmem, 60% 
highmem.)

> [...] In doubt we just need to convert that caller to kmap_atomic.

the pagecache ones cannot be converted to kmap_atomic, because we can 
block while holding them. Plus kmap_atomic is quite a bit slower than 
this scalable version of kmap().

Ingo

ps. please fix your mailer to not emit Mail-Followup-To headers. In Mutt
you can do this via "set followup_to=no" in your .muttrc.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Christoph Hellwig
On Sun, Jan 28, 2007 at 03:11:34PM +0100, Peter Zijlstra wrote:
> Eradicate global locks.
> 
>  - kmap_lock is removed by extensive use of atomic_t, a new flush
>scheme and modifying set_page_address to only allow NULL<->virt
>transitions.

What's the point for this?  Extensive atomic_t use is usually much worse
than spinlocks.  A spinlock region is just a single atomic instruction,
as soon as you do more than one atomic_t you tend to make scalability
worse.  Not to mention that atomic_t are much worse when you try to
profile scalability issues.

What benchmark shows a problem with the current locking, and from what
caller?  In doubt we just need to convert that caller to kmap_atomic.


> 
> A count of 0 is an exclusive state acting as an entry lock. This is done
> using inc_not_zero and cmpxchg. The restriction on changing the virtual
> address closes the gap with concurrent additions of the same entry.
> 
>  - pool_lock is removed by using the pkmap index for the
>page_address_maps.
> 
> By using the pkmap index for the hash entries it is no longer needed to
> keep a free list.
> 
> This patch has been in -rt for a while but should also help regular
> highmem machines with multiple cores/cpus.
> 
> Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]>
> ---
>  include/linux/mm.h |   32 ++-
>  mm/highmem.c   |  433 
> ++---
>  2 files changed, 276 insertions(+), 189 deletions(-)
> 
> Index: linux/include/linux/mm.h
> ===
> --- linux.orig/include/linux/mm.h
> +++ linux/include/linux/mm.h
> @@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
>  #endif
>  
>  #if defined(WANT_PAGE_VIRTUAL)
> -#define page_address(page) ((page)->virtual)
> -#define set_page_address(page, address)  \
> - do {\
> - (page)->virtual = (address);\
> - } while(0)
> -#define page_address_init()  do { } while(0)
> +/*
> + * wrap page->virtual so it is safe to set/read locklessly
> + */
> +#define page_address(page) \
> + ({ typeof((page)->virtual) v = (page)->virtual; \
> +  smp_read_barrier_depends(); \
> +  v; })
> +
> +static inline int set_page_address(struct page *page, void *address)
> +{
> + if (address)
> + return cmpxchg(>virtual, NULL, address) == NULL;
> + else {
> + /*
> +  * cmpxchg is a bit abused because it is not guaranteed
> +  * safe wrt direct assignment on all platforms.
> +  */
> + void *virt = page->virtual;
> + return cmpxchg(>vitrual, virt, NULL) == virt;
> + }
> +}
> +void page_address_init(void);
>  #endif
>  
>  #if defined(HASHED_PAGE_VIRTUAL)
>  void *page_address(struct page *page);
> -void set_page_address(struct page *page, void *virtual);
> +int set_page_address(struct page *page, void *virtual);
>  void page_address_init(void);
>  #endif
>  
>  #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
>  #define page_address(page) lowmem_page_address(page)
> -#define set_page_address(page, address)  do { } while(0)
> +#define set_page_address(page, address)  (0)
>  #define page_address_init()  do { } while(0)
>  #endif
>  
> Index: linux/mm/highmem.c
> ===
> --- linux.orig/mm/highmem.c
> +++ linux/mm/highmem.c
> @@ -14,6 +14,11 @@
>   * based on Linus' idea.
>   *
>   * Copyright (C) 1999 Ingo Molnar <[EMAIL PROTECTED]>
> + *
> + * Largely rewritten to get rid of all global locks
> + *
> + * Copyright (C) 2006 Red Hat, Inc., Peter Zijlstra <[EMAIL PROTECTED]>
> + *
>   */
>  
>  #include 
> @@ -27,18 +32,14 @@
>  #include 
>  #include 
>  #include 
> +
>  #include 
> +#include 
>  
> -/*
> - * Virtual_count is not a pure "count".
> - *  0 means that it is not mapped, and has not been mapped
> - *since a TLB flush - it is usable.
> - *  1 means that there are no users, but it has been mapped
> - *since the last TLB flush - so we can't use it.
> - *  n means that there are (n-1) current users of it.
> - */
>  #ifdef CONFIG_HIGHMEM
>  
> +static int __set_page_address(struct page *page, void *virtual, int pos);
> +
>  unsigned long totalhigh_pages __read_mostly;
>  
>  unsigned int nr_free_highpages (void)
> @@ -52,164 +53,208 @@ unsigned int nr_free_highpages (void)
>   return pages;
>  }
>  
> -static int pkmap_count[LAST_PKMAP];
> -static unsigned int last_pkmap_nr;
> -static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
> +/*
> + * count is not a pure "count".
> + *  0 means its owned exclusively by someone
> + *  1 means its free for use - either mapped or not.
> + *  n means that there are (n-1) current users of it.
> + */
> +static atomic_t pkmap_count[LAST_PKMAP];
> +static atomic_t pkmap_hand;
>  
>  pte_t * pkmap_page_table;
>  
>  static 

Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Christoph Hellwig
On Sun, Jan 28, 2007 at 03:11:34PM +0100, Peter Zijlstra wrote:
 Eradicate global locks.
 
  - kmap_lock is removed by extensive use of atomic_t, a new flush
scheme and modifying set_page_address to only allow NULL-virt
transitions.

What's the point for this?  Extensive atomic_t use is usually much worse
than spinlocks.  A spinlock region is just a single atomic instruction,
as soon as you do more than one atomic_t you tend to make scalability
worse.  Not to mention that atomic_t are much worse when you try to
profile scalability issues.

What benchmark shows a problem with the current locking, and from what
caller?  In doubt we just need to convert that caller to kmap_atomic.


 
 A count of 0 is an exclusive state acting as an entry lock. This is done
 using inc_not_zero and cmpxchg. The restriction on changing the virtual
 address closes the gap with concurrent additions of the same entry.
 
  - pool_lock is removed by using the pkmap index for the
page_address_maps.
 
 By using the pkmap index for the hash entries it is no longer needed to
 keep a free list.
 
 This patch has been in -rt for a while but should also help regular
 highmem machines with multiple cores/cpus.
 
 Signed-off-by: Peter Zijlstra [EMAIL PROTECTED]
 ---
  include/linux/mm.h |   32 ++-
  mm/highmem.c   |  433 
 ++---
  2 files changed, 276 insertions(+), 189 deletions(-)
 
 Index: linux/include/linux/mm.h
 ===
 --- linux.orig/include/linux/mm.h
 +++ linux/include/linux/mm.h
 @@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
  #endif
  
  #if defined(WANT_PAGE_VIRTUAL)
 -#define page_address(page) ((page)-virtual)
 -#define set_page_address(page, address)  \
 - do {\
 - (page)-virtual = (address);\
 - } while(0)
 -#define page_address_init()  do { } while(0)
 +/*
 + * wrap page-virtual so it is safe to set/read locklessly
 + */
 +#define page_address(page) \
 + ({ typeof((page)-virtual) v = (page)-virtual; \
 +  smp_read_barrier_depends(); \
 +  v; })
 +
 +static inline int set_page_address(struct page *page, void *address)
 +{
 + if (address)
 + return cmpxchg(page-virtual, NULL, address) == NULL;
 + else {
 + /*
 +  * cmpxchg is a bit abused because it is not guaranteed
 +  * safe wrt direct assignment on all platforms.
 +  */
 + void *virt = page-virtual;
 + return cmpxchg(page-vitrual, virt, NULL) == virt;
 + }
 +}
 +void page_address_init(void);
  #endif
  
  #if defined(HASHED_PAGE_VIRTUAL)
  void *page_address(struct page *page);
 -void set_page_address(struct page *page, void *virtual);
 +int set_page_address(struct page *page, void *virtual);
  void page_address_init(void);
  #endif
  
  #if !defined(HASHED_PAGE_VIRTUAL)  !defined(WANT_PAGE_VIRTUAL)
  #define page_address(page) lowmem_page_address(page)
 -#define set_page_address(page, address)  do { } while(0)
 +#define set_page_address(page, address)  (0)
  #define page_address_init()  do { } while(0)
  #endif
  
 Index: linux/mm/highmem.c
 ===
 --- linux.orig/mm/highmem.c
 +++ linux/mm/highmem.c
 @@ -14,6 +14,11 @@
   * based on Linus' idea.
   *
   * Copyright (C) 1999 Ingo Molnar [EMAIL PROTECTED]
 + *
 + * Largely rewritten to get rid of all global locks
 + *
 + * Copyright (C) 2006 Red Hat, Inc., Peter Zijlstra [EMAIL PROTECTED]
 + *
   */
  
  #include linux/mm.h
 @@ -27,18 +32,14 @@
  #include linux/hash.h
  #include linux/highmem.h
  #include linux/blktrace_api.h
 +
  #include asm/tlbflush.h
 +#include asm/pgtable.h
  
 -/*
 - * Virtual_count is not a pure count.
 - *  0 means that it is not mapped, and has not been mapped
 - *since a TLB flush - it is usable.
 - *  1 means that there are no users, but it has been mapped
 - *since the last TLB flush - so we can't use it.
 - *  n means that there are (n-1) current users of it.
 - */
  #ifdef CONFIG_HIGHMEM
  
 +static int __set_page_address(struct page *page, void *virtual, int pos);
 +
  unsigned long totalhigh_pages __read_mostly;
  
  unsigned int nr_free_highpages (void)
 @@ -52,164 +53,208 @@ unsigned int nr_free_highpages (void)
   return pages;
  }
  
 -static int pkmap_count[LAST_PKMAP];
 -static unsigned int last_pkmap_nr;
 -static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
 +/*
 + * count is not a pure count.
 + *  0 means its owned exclusively by someone
 + *  1 means its free for use - either mapped or not.
 + *  n means that there are (n-1) current users of it.
 + */
 +static atomic_t pkmap_count[LAST_PKMAP];
 +static atomic_t pkmap_hand;
  
  pte_t * pkmap_page_table;
  
  static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
  
 -static void flush_all_zero_pkmaps(void)
 +/*
 + 

Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Ingo Molnar

* Christoph Hellwig [EMAIL PROTECTED] wrote:

 On Sun, Jan 28, 2007 at 03:11:34PM +0100, Peter Zijlstra wrote:
  Eradicate global locks.
  
   - kmap_lock is removed by extensive use of atomic_t, a new flush
 scheme and modifying set_page_address to only allow NULL-virt
 transitions.
 
 What's the point for this? [...]

scalability. I did lock profiling on the -rt kernel, which exposes such 
things nicely. Half of the lock contention events during kernel compile 
were due to kmap(). (The system had 2 GB of RAM, so 40% lowmem, 60% 
highmem.)

 [...] In doubt we just need to convert that caller to kmap_atomic.

the pagecache ones cannot be converted to kmap_atomic, because we can 
block while holding them. Plus kmap_atomic is quite a bit slower than 
this scalable version of kmap().

Ingo

ps. please fix your mailer to not emit Mail-Followup-To headers. In Mutt
you can do this via set followup_to=no in your .muttrc.
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Christoph Hellwig
On Sun, Jan 28, 2007 at 04:17:00PM +0100, Ingo Molnar wrote:
 scalability. I did lock profiling on the -rt kernel, which exposes such 
 things nicely. Half of the lock contention events during kernel compile 
 were due to kmap(). (The system had 2 GB of RAM, so 40% lowmem, 60% 
 highmem.)

Numbers please, and not on -rt but on mainline.  Please show the profiles.

 ps. please fix your mailer to not emit Mail-Followup-To headers. In Mutt
 you can do this via set followup_to=no in your .muttrc.

I have told you last time that this is absolutely intentional and I won't
change it.
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Ingo Molnar

* Christoph Hellwig [EMAIL PROTECTED] wrote:

 On Sun, Jan 28, 2007 at 04:17:00PM +0100, Ingo Molnar wrote:
  scalability. I did lock profiling on the -rt kernel, which exposes 
  such things nicely. Half of the lock contention events during kernel 
  compile were due to kmap(). (The system had 2 GB of RAM, so 40% 
  lowmem, 60% highmem.)
 
 Numbers please, and not on -rt but on mainline.  Please show the 
 profiles.

i'm sorry, but do you realize that files_lock is a global lock, 
triggered by /every single/ file close?

files_lock is a global lock and we touch it for every single
 sys_close() system call that the system does. 

You really dont need to be a rocket scientist to see that it's a 
globally bouncing cacheline that has a major effect on certain 
VFS-intense workloads. Peter has worked hard to eliminate its effects 
without having to couple this to an intrusive rewrite of the TTY layer.

( really, i personally find your dismissive style apalling and i think 
  such a reception of a nice patchset must be humiliating to Peter. I
  certainly try to avoid to be involved with any VFS internals, due to
  this unwelcoming tone of discussion. Had you been around when i
  started contributing to the Linux kernel i'd probably not be hacking
  the kernel today. You are a good hacker but the simultaneous
  collateral damage you are causing is significantly reducing the net
  benefit. )

  ps. please fix your mailer to not emit Mail-Followup-To headers. In 
  Mutt you can do this via set followup_to=no in your .muttrc.
 
 I have told you last time that this is absolutely intentional and I 
 won't change it.

( You are messing up the reply headers, everyone is listed in the 'To:'
  field for any reply to your mail, instead of being added to the Cc:
  list. )

Ingo
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Christoph Hellwig
On Sun, Jan 28, 2007 at 04:48:06PM +0100, Ingo Molnar wrote:
 i'm sorry, but do you realize that files_lock is a global lock, 
 triggered by /every single/ file close?

Please check which thread you're in before you start such lengthy rants.

-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Ingo Molnar

* Christoph Hellwig [EMAIL PROTECTED] wrote:

 On Sun, Jan 28, 2007 at 04:48:06PM +0100, Ingo Molnar wrote:
  i'm sorry, but do you realize that files_lock is a global lock, 
  triggered by /every single/ file close?
 
 Please check which thread you're in before you start such lengthy 
 rants.

my reply applies to the other thread too, you made a similar comment 
there too:

* Christoph Hellwig [EMAIL PROTECTED] wrote:

 On Sun, Jan 28, 2007 at 12:51:18PM +0100, Peter Zijlstra wrote:
  This patch-set breaks up the global file_list_lock which was found 
  to be a severe contention point under basically any filesystem 
  intensive workload.

 Benchmarks, please.  Where exactly do you see contention for this?

Ingo
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Andrew Morton
On Sun, 28 Jan 2007 15:11:34 +0100
Peter Zijlstra [EMAIL PROTECTED] wrote:

 Eradicate global locks.
 
  - kmap_lock is removed by extensive use of atomic_t, a new flush
scheme and modifying set_page_address to only allow NULL-virt
transitions.
 
 A count of 0 is an exclusive state acting as an entry lock. This is done
 using inc_not_zero and cmpxchg. The restriction on changing the virtual
 address closes the gap with concurrent additions of the same entry.
 
  - pool_lock is removed by using the pkmap index for the
page_address_maps.
 
 By using the pkmap index for the hash entries it is no longer needed to
 keep a free list.
 
 This patch has been in -rt for a while but should also help regular
 highmem machines with multiple cores/cpus.

I really don't recall any performance problems being reported out of that
code in recent years.

As Christoph says, it's very much preferred that code be migrated over to
kmap_atomic().  Partly because kmap() is deadlockable in situations where a
large number of threads are trying to take two kmaps at the same time and
we run out.  This happened in the past, but incidences have gone away,
probably because of kmap-kmap_atomic conversions.

From which callsite have you measured problems?

 Index: linux/include/linux/mm.h
 ===
 --- linux.orig/include/linux/mm.h
 +++ linux/include/linux/mm.h
 @@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
  #endif
  
  #if defined(WANT_PAGE_VIRTUAL)
 -#define page_address(page) ((page)-virtual)
 -#define set_page_address(page, address)  \
 - do {\
 - (page)-virtual = (address);\
 - } while(0)
 -#define page_address_init()  do { } while(0)
 +/*
 + * wrap page-virtual so it is safe to set/read locklessly
 + */
 +#define page_address(page) \
 + ({ typeof((page)-virtual) v = (page)-virtual; \
 +  smp_read_barrier_depends(); \
 +  v; })
 +
 +static inline int set_page_address(struct page *page, void *address)
 +{
 + if (address)
 + return cmpxchg(page-virtual, NULL, address) == NULL;
 + else {
 + /*
 +  * cmpxchg is a bit abused because it is not guaranteed
 +  * safe wrt direct assignment on all platforms.
 +  */
 + void *virt = page-virtual;
 + return cmpxchg(page-vitrual, virt, NULL) == virt;
 + }
 +}

Have you verified that all architectures which can implement
WANT_PAGE_VIRTUAL also implement cmpxchg?

Have you verified that sufficient headers are included for this to compile
correctly on all WANT_PAGE_VIRTUAL-enabling architectures on all configs? 
I don't see asm/system.h being included in mm.h and if I get yet another
damned it-wont-compile patch I might do something irreversible.

 +static int pkmap_get_free(void)
  {
 - unsigned long vaddr;
 - int count;
 + int i, pos, flush;
 + DECLARE_WAITQUEUE(wait, current);
  
 -start:
 - count = LAST_PKMAP;
 - /* Find an empty entry */
 - for (;;) {
 - last_pkmap_nr = (last_pkmap_nr + 1)  LAST_PKMAP_MASK;

The old code used masking.

 - if (!last_pkmap_nr) {
 - flush_all_zero_pkmaps();
 - count = LAST_PKMAP;
 - }
 - if (!pkmap_count[last_pkmap_nr])
 - break;  /* Found a usable entry */
 - if (--count)
 - continue;
 +restart:
 + for (i = 0; i  LAST_PKMAP; i++) {
 + pos = atomic_inc_return(pkmap_hand) % LAST_PKMAP;

The new code does more-expensive modulus.  Necessary?

 + flush = pkmap_try_free(pos);
 + if (flush = 0)
 + goto got_one;
 + }
 +
 + /*
 +  * wait for somebody else to unmap their entries
 +  */
 + __set_current_state(TASK_UNINTERRUPTIBLE);
 + add_wait_queue(pkmap_map_wait, wait);
 + schedule();
 + remove_wait_queue(pkmap_map_wait, wait);

This looks wrong.  What happens if everyone else does their unmap between
the __set_current_state() and the add_wait_queue()?


-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] mm: remove global locks from mm/highmem.c

2007-01-28 Thread Nick Piggin

Andrew Morton wrote:

On Sun, 28 Jan 2007 15:11:34 +0100
Peter Zijlstra [EMAIL PROTECTED] wrote:



+static inline int set_page_address(struct page *page, void *address)
+{
+   if (address)
+   return cmpxchg(page-virtual, NULL, address) == NULL;
+   else {
+   /*
+* cmpxchg is a bit abused because it is not guaranteed
+* safe wrt direct assignment on all platforms.
+*/
+   void *virt = page-virtual;
+   return cmpxchg(page-vitrual, virt, NULL) == virt;
+   }
+}



Have you verified that all architectures which can implement
WANT_PAGE_VIRTUAL also implement cmpxchg?


Simple: we should not implement cmpxchg in generic code. We should
be able to use atomic_long_cmpxchg for this -- it will work perfectly
regardless of what anybody else tells you.

cmpxchg is only required for when that memory location may get modified
by some other means than under your control (eg. userspace, in the case
of drm, or hardware MMU in the case of Christoph's old page fault
scalability patches).

--
SUSE Labs, Novell Inc.
Send instant messages to your online friends http://au.messenger.yahoo.com 


-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/