Re: [PATCH V2 1/2] powerpc/mm/slice: Move slice_mask struct definition to slice.c

2017-02-13 Thread Aneesh Kumar K.V



On Tuesday 14 February 2017 11:55 AM, Michael Ellerman wrote:

"Aneesh Kumar K.V"  writes:


diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index b3f45e413a60..08ac27eae408 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -37,7 +37,16 @@
  #include 
  
  static DEFINE_SPINLOCK(slice_convert_lock);

-
+/*
+ * One bit per slice. We have lower slices which cover 256MB segments
+ * upto 4G range. That gets us 16 low slices. For the rest we track slices
+ * in 1TB size.

Can we tighten this comment up a bit.

What about:


+ * One bit per slice. The low slices cover the range 0 - 4GB, each
   * slice being 256MB in size, for 16 low slices. The high slices
   * cover the rest of the address space at 1TB granularity, with the
   * exception of high slice 0 which covers the range 4GB - 1TB.

OK?



good.




+ * 64 below is actually SLICE_NUM_HIGH to fixup complie errros

That line is bogus AFAICS, it refers to the old hardcoded value (prior
to 512), I'll drop it.



Thanks


-aneesh




Re: [PATCH V2 1/2] powerpc/mm/slice: Move slice_mask struct definition to slice.c

2017-02-13 Thread Michael Ellerman
"Aneesh Kumar K.V"  writes:

> diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
> index b3f45e413a60..08ac27eae408 100644
> --- a/arch/powerpc/mm/slice.c
> +++ b/arch/powerpc/mm/slice.c
> @@ -37,7 +37,16 @@
>  #include 
>  
>  static DEFINE_SPINLOCK(slice_convert_lock);
> -
> +/*
> + * One bit per slice. We have lower slices which cover 256MB segments
> + * upto 4G range. That gets us 16 low slices. For the rest we track slices
> + * in 1TB size.

Can we tighten this comment up a bit.

What about:

> + * One bit per slice. The low slices cover the range 0 - 4GB, each
>   * slice being 256MB in size, for 16 low slices. The high slices
>   * cover the rest of the address space at 1TB granularity, with the
>   * exception of high slice 0 which covers the range 4GB - 1TB.

OK?

> + * 64 below is actually SLICE_NUM_HIGH to fixup complie errros

That line is bogus AFAICS, it refers to the old hardcoded value (prior
to 512), I'll drop it.

> + */
> +struct slice_mask {
> + u64 low_slices;
> + DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
> +};


cheers


[PATCH V2 1/2] powerpc/mm/slice: Move slice_mask struct definition to slice.c

2017-02-13 Thread Aneesh Kumar K.V
This structure definition need not be in a header since this is used only by
slice.c file. So move it to slice.c. This also allow us to use SLICE_NUM_HIGH
instead of 512 and also helps in getting rid of one BUILD_BUG_ON().

I also switch the low_slices type to u64 from u16. This doesn't have an impact
on size of struct due to padding added with u16 type. This helps in using
bitmap printing function for printing slice mask.

Signed-off-by: Aneesh Kumar K.V 
---
 arch/powerpc/include/asm/page_64.h | 11 ---
 arch/powerpc/mm/slice.c| 13 ++---
 2 files changed, 10 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/include/asm/page_64.h 
b/arch/powerpc/include/asm/page_64.h
index 9b60e9455c6e..3ecfc2734b50 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -99,17 +99,6 @@ extern u64 ppc64_pft_size;
 #define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
 
 #ifndef __ASSEMBLY__
-/*
- * One bit per slice. We have lower slices which cover 256MB segments
- * upto 4G range. That gets us 16 low slices. For the rest we track slices
- * in 1TB size.
- * 64 below is actually SLICE_NUM_HIGH to fixup complie errros
- */
-struct slice_mask {
-   u16 low_slices;
-   DECLARE_BITMAP(high_slices, 512);
-};
-
 struct mm_struct;
 
 extern unsigned long slice_get_unmapped_area(unsigned long addr,
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index b3f45e413a60..08ac27eae408 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -37,7 +37,16 @@
 #include 
 
 static DEFINE_SPINLOCK(slice_convert_lock);
-
+/*
+ * One bit per slice. We have lower slices which cover 256MB segments
+ * upto 4G range. That gets us 16 low slices. For the rest we track slices
+ * in 1TB size.
+ * 64 below is actually SLICE_NUM_HIGH to fixup complie errros
+ */
+struct slice_mask {
+   u64 low_slices;
+   DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
+};
 
 #ifdef DEBUG
 int _slice_debug = 1;
@@ -407,8 +416,6 @@ unsigned long slice_get_unmapped_area(unsigned long addr, 
unsigned long len,
struct mm_struct *mm = current->mm;
unsigned long newaddr;
 
-   /* Make sure high_slices bitmap size is same as we expected */
-   BUILD_BUG_ON(512 != SLICE_NUM_HIGH);
/*
 * init different masks
 */
-- 
2.7.4