Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, Oct 27 2013, Greg Thelen wrote: > this_cpu_sub() is implemented as negation and addition. > > This patch casts the adjustment to the counter type before negation to > sign extend the adjustment. This helps in cases where the counter > type is wider than an unsigned adjustment. An alternative to this > patch is to declare such operations unsupported, but it seemed useful > to avoid surprises. > > This patch specifically helps the following example: > unsigned int delta = 1 > preempt_disable() > this_cpu_write(long_counter, 0) > this_cpu_sub(long_counter, delta) > preempt_enable() > > Before this change long_counter on a 64 bit machine ends with value > 0x, rather than 0x. This is because > this_cpu_sub(pcp, delta) boils down to this_cpu_add(pcp, -delta), > which is basically: > long_counter = 0 + 0x > > Also apply the same cast to: > __this_cpu_sub() > this_cpu_sub_return() > and __this_cpu_sub_return() > > All percpu_test.ko passes, especially the following cases which > previously failed: > > l -= ui_one; > __this_cpu_sub(long_counter, ui_one); > CHECK(l, long_counter, -1); > > l -= ui_one; > this_cpu_sub(long_counter, ui_one); > CHECK(l, long_counter, -1); > CHECK(l, long_counter, 0x); > > ul -= ui_one; > __this_cpu_sub(ulong_counter, ui_one); > CHECK(ul, ulong_counter, -1); > CHECK(ul, ulong_counter, 0x); > > ul = this_cpu_sub_return(ulong_counter, ui_one); > CHECK(ul, ulong_counter, 2); > > ul = __this_cpu_sub_return(ulong_counter, ui_one); > CHECK(ul, ulong_counter, 1); > > Signed-off-by: Greg Thelen > --- > arch/x86/include/asm/percpu.h | 3 ++- > include/linux/percpu.h| 8 > lib/percpu_test.c | 2 +- > 3 files changed, 7 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h > index 0da5200..b3e18f8 100644 > --- a/arch/x86/include/asm/percpu.h > +++ b/arch/x86/include/asm/percpu.h > @@ -128,7 +128,8 @@ do { > \ > do { \ > typedef typeof(var) pao_T__;\ > const int pao_ID__ = (__builtin_constant_p(val) && \ > - ((val) == 1 || (val) == -1)) ? (val) : 0; \ > + ((val) == 1 || (val) == -1)) ?\ > + (int)(val) : 0; \ > if (0) {\ > pao_T__ pao_tmp__; \ > pao_tmp__ = (val); \ > diff --git a/include/linux/percpu.h b/include/linux/percpu.h > index cc88172..c74088a 100644 > --- a/include/linux/percpu.h > +++ b/include/linux/percpu.h > @@ -332,7 +332,7 @@ do { > \ > #endif > > #ifndef this_cpu_sub > -# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) > +# define this_cpu_sub(pcp, val) this_cpu_add((pcp), > -(typeof(pcp))(val)) > #endif > > #ifndef this_cpu_inc > @@ -418,7 +418,7 @@ do { > \ > # define this_cpu_add_return(pcp, val) > __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) > #endif > > -#define this_cpu_sub_return(pcp, val)this_cpu_add_return(pcp, -(val)) > +#define this_cpu_sub_return(pcp, val)this_cpu_add_return(pcp, > -(typeof(pcp))(val)) > #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) > #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) > > @@ -586,7 +586,7 @@ do { > \ > #endif > > #ifndef __this_cpu_sub > -# define __this_cpu_sub(pcp, val)__this_cpu_add((pcp), -(val)) > +# define __this_cpu_sub(pcp, val)__this_cpu_add((pcp), > -(typeof(pcp))(val)) > #endif > > #ifndef __this_cpu_inc > @@ -668,7 +668,7 @@ do { > \ > __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) > #endif > > -#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, > -(val)) > +#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, > -(typeof(pcp))(val)) > #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) > #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) > > diff --git a/lib/percpu_test.c b/lib/percpu_test.c > index 1ebeb44..8ab4231 100644 > --- a/lib/percpu_test.c > +++ b/lib/percpu_test.c > @@ -118,7 +118,7 @@ static int __init percpu_test_init(void) > CHECK(ul, ulong_counter, 2); > > ul = __this_cpu_sub_return(ulong_counter, ui_one); > - CHECK(ul,
Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, Oct 27 2013, Tejun Heo wrote: > On Sun, Oct 27, 2013 at 05:04:29AM -0700, Andrew Morton wrote: >> On Sun, 27 Oct 2013 07:22:55 -0400 Tejun Heo wrote: >> >> > We probably want to cc stable for this and the next one. How should >> > these be routed? I can take these through percpu tree or mm works >> > too. Either way, it'd be best to route them together. >> >> Yes, all three look like -stable material to me. I'll grab them later >> in the week if you haven't ;) > > Tried to apply to percpu but the third one is a fix for a patch which > was added to -mm during v3.12-rc1, so these are yours. :) I don't object to stable for the first two non-memcg patches, but it's probably unnecessary. I should have made it more clear, but an audit of v3.12-rc6 shows that only new memcg code is affected - the new mem_cgroup_move_account_page_stat() is the only place where an unsigned adjustment is used. All other callers (e.g. shrink_dcache_sb) already use a signed adjustment, so no problems before v3.12. Though I did not audit the stable kernel trees, so there could be something hiding in there. >> The names of the first two patches distress me. They rather clearly >> assert that the code affects percpu_counter.[ch], but that is not the case. >> Massaging is needed to fix that up. > > Yeah, something like the following would be better > > percpu: add test module for various percpu operations > percpu: fix this_cpu_sub() subtrahend casting for unsigneds > memcg: use __this_cpu_sub() to dec stats to avoid incorrect subtrahend > casting No objection to renaming. Let me know if you want these reposed with updated titles. -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, Oct 27, 2013 at 05:04:29AM -0700, Andrew Morton wrote: > On Sun, 27 Oct 2013 07:22:55 -0400 Tejun Heo wrote: > > > We probably want to cc stable for this and the next one. How should > > these be routed? I can take these through percpu tree or mm works > > too. Either way, it'd be best to route them together. > > Yes, all three look like -stable material to me. I'll grab them later > in the week if you haven't ;) Tried to apply to percpu but the third one is a fix for a patch which was added to -mm during v3.12-rc1, so these are yours. :) > The names of the first two patches distress me. They rather clearly > assert that the code affects percpu_counter.[ch], but that is not the case. > Massaging is needed to fix that up. Yeah, something like the following would be better percpu: add test module for various percpu operations percpu: fix this_cpu_sub() subtrahend casting for unsigneds memcg: use __this_cpu_sub() to dec stats to avoid incorrect subtrahend casting Thanks. -- tejun -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, 27 Oct 2013 07:22:55 -0400 Tejun Heo wrote: > We probably want to cc stable for this and the next one. How should > these be routed? I can take these through percpu tree or mm works > too. Either way, it'd be best to route them together. Yes, all three look like -stable material to me. I'll grab them later in the week if you haven't ;) The names of the first two patches distress me. They rather clearly assert that the code affects percpu_counter.[ch], but that is not the case. Massaging is needed to fix that up. -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, Oct 27, 2013 at 12:44:35AM -0700, Greg Thelen wrote: > this_cpu_sub() is implemented as negation and addition. > > This patch casts the adjustment to the counter type before negation to > sign extend the adjustment. This helps in cases where the counter > type is wider than an unsigned adjustment. An alternative to this > patch is to declare such operations unsupported, but it seemed useful > to avoid surprises. > > This patch specifically helps the following example: > unsigned int delta = 1 > preempt_disable() > this_cpu_write(long_counter, 0) > this_cpu_sub(long_counter, delta) > preempt_enable() > > Before this change long_counter on a 64 bit machine ends with value > 0x, rather than 0x. This is because > this_cpu_sub(pcp, delta) boils down to this_cpu_add(pcp, -delta), > which is basically: > long_counter = 0 + 0x > > Also apply the same cast to: > __this_cpu_sub() > this_cpu_sub_return() > and __this_cpu_sub_return() > > All percpu_test.ko passes, especially the following cases which > previously failed: > > l -= ui_one; > __this_cpu_sub(long_counter, ui_one); > CHECK(l, long_counter, -1); > > l -= ui_one; > this_cpu_sub(long_counter, ui_one); > CHECK(l, long_counter, -1); > CHECK(l, long_counter, 0x); > > ul -= ui_one; > __this_cpu_sub(ulong_counter, ui_one); > CHECK(ul, ulong_counter, -1); > CHECK(ul, ulong_counter, 0x); > > ul = this_cpu_sub_return(ulong_counter, ui_one); > CHECK(ul, ulong_counter, 2); > > ul = __this_cpu_sub_return(ulong_counter, ui_one); > CHECK(ul, ulong_counter, 1); > > Signed-off-by: Greg Thelen Ouch, nice catch. Acked-by: Tejun Heo We probably want to cc stable for this and the next one. How should these be routed? I can take these through percpu tree or mm works too. Either way, it'd be best to route them together. Thanks. -- tejun -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
[PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
this_cpu_sub() is implemented as negation and addition. This patch casts the adjustment to the counter type before negation to sign extend the adjustment. This helps in cases where the counter type is wider than an unsigned adjustment. An alternative to this patch is to declare such operations unsupported, but it seemed useful to avoid surprises. This patch specifically helps the following example: unsigned int delta = 1 preempt_disable() this_cpu_write(long_counter, 0) this_cpu_sub(long_counter, delta) preempt_enable() Before this change long_counter on a 64 bit machine ends with value 0x, rather than 0x. This is because this_cpu_sub(pcp, delta) boils down to this_cpu_add(pcp, -delta), which is basically: long_counter = 0 + 0x Also apply the same cast to: __this_cpu_sub() this_cpu_sub_return() and __this_cpu_sub_return() All percpu_test.ko passes, especially the following cases which previously failed: l -= ui_one; __this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); l -= ui_one; this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); CHECK(l, long_counter, 0x); ul -= ui_one; __this_cpu_sub(ulong_counter, ui_one); CHECK(ul, ulong_counter, -1); CHECK(ul, ulong_counter, 0x); ul = this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 2); ul = __this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 1); Signed-off-by: Greg Thelen --- arch/x86/include/asm/percpu.h | 3 ++- include/linux/percpu.h| 8 lib/percpu_test.c | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0da5200..b3e18f8 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -128,7 +128,8 @@ do { \ do { \ typedef typeof(var) pao_T__;\ const int pao_ID__ = (__builtin_constant_p(val) && \ - ((val) == 1 || (val) == -1)) ? (val) : 0; \ + ((val) == 1 || (val) == -1)) ?\ + (int)(val) : 0; \ if (0) {\ pao_T__ pao_tmp__; \ pao_tmp__ = (val); \ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index cc88172..c74088a 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -332,7 +332,7 @@ do { \ #endif #ifndef this_cpu_sub -# define this_cpu_sub(pcp, val)this_cpu_add((pcp), -(val)) +# define this_cpu_sub(pcp, val)this_cpu_add((pcp), -(typeof(pcp))(val)) #endif #ifndef this_cpu_inc @@ -418,7 +418,7 @@ do { \ # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) #endif -#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) @@ -586,7 +586,7 @@ do { \ #endif #ifndef __this_cpu_sub -# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) +# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) #endif #ifndef __this_cpu_inc @@ -668,7 +668,7 @@ do { \ __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) #endif -#define __this_cpu_sub_return(pcp, val)__this_cpu_add_return(pcp, -(val)) +#define __this_cpu_sub_return(pcp, val)__this_cpu_add_return(pcp, -(typeof(pcp))(val)) #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) diff --git a/lib/percpu_test.c b/lib/percpu_test.c index 1ebeb44..8ab4231 100644 --- a/lib/percpu_test.c +++ b/lib/percpu_test.c @@ -118,7 +118,7 @@ static int __init percpu_test_init(void) CHECK(ul, ulong_counter, 2); ul = __this_cpu_sub_return(ulong_counter, ui_one); - CHECK(ul, ulong_counter, 0); + CHECK(ul, ulong_counter, 1); preempt_enable(); -- 1.8.4.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at
[PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
this_cpu_sub() is implemented as negation and addition. This patch casts the adjustment to the counter type before negation to sign extend the adjustment. This helps in cases where the counter type is wider than an unsigned adjustment. An alternative to this patch is to declare such operations unsupported, but it seemed useful to avoid surprises. This patch specifically helps the following example: unsigned int delta = 1 preempt_disable() this_cpu_write(long_counter, 0) this_cpu_sub(long_counter, delta) preempt_enable() Before this change long_counter on a 64 bit machine ends with value 0x, rather than 0x. This is because this_cpu_sub(pcp, delta) boils down to this_cpu_add(pcp, -delta), which is basically: long_counter = 0 + 0x Also apply the same cast to: __this_cpu_sub() this_cpu_sub_return() and __this_cpu_sub_return() All percpu_test.ko passes, especially the following cases which previously failed: l -= ui_one; __this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); l -= ui_one; this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); CHECK(l, long_counter, 0x); ul -= ui_one; __this_cpu_sub(ulong_counter, ui_one); CHECK(ul, ulong_counter, -1); CHECK(ul, ulong_counter, 0x); ul = this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 2); ul = __this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 1); Signed-off-by: Greg Thelen gthe...@google.com --- arch/x86/include/asm/percpu.h | 3 ++- include/linux/percpu.h| 8 lib/percpu_test.c | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0da5200..b3e18f8 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -128,7 +128,8 @@ do { \ do { \ typedef typeof(var) pao_T__;\ const int pao_ID__ = (__builtin_constant_p(val) \ - ((val) == 1 || (val) == -1)) ? (val) : 0; \ + ((val) == 1 || (val) == -1)) ?\ + (int)(val) : 0; \ if (0) {\ pao_T__ pao_tmp__; \ pao_tmp__ = (val); \ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index cc88172..c74088a 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -332,7 +332,7 @@ do { \ #endif #ifndef this_cpu_sub -# define this_cpu_sub(pcp, val)this_cpu_add((pcp), -(val)) +# define this_cpu_sub(pcp, val)this_cpu_add((pcp), -(typeof(pcp))(val)) #endif #ifndef this_cpu_inc @@ -418,7 +418,7 @@ do { \ # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) #endif -#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) @@ -586,7 +586,7 @@ do { \ #endif #ifndef __this_cpu_sub -# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) +# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) #endif #ifndef __this_cpu_inc @@ -668,7 +668,7 @@ do { \ __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) #endif -#define __this_cpu_sub_return(pcp, val)__this_cpu_add_return(pcp, -(val)) +#define __this_cpu_sub_return(pcp, val)__this_cpu_add_return(pcp, -(typeof(pcp))(val)) #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) diff --git a/lib/percpu_test.c b/lib/percpu_test.c index 1ebeb44..8ab4231 100644 --- a/lib/percpu_test.c +++ b/lib/percpu_test.c @@ -118,7 +118,7 @@ static int __init percpu_test_init(void) CHECK(ul, ulong_counter, 2); ul = __this_cpu_sub_return(ulong_counter, ui_one); - CHECK(ul, ulong_counter, 0); + CHECK(ul, ulong_counter, 1); preempt_enable(); -- 1.8.4.1 -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo
Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, Oct 27, 2013 at 12:44:35AM -0700, Greg Thelen wrote: this_cpu_sub() is implemented as negation and addition. This patch casts the adjustment to the counter type before negation to sign extend the adjustment. This helps in cases where the counter type is wider than an unsigned adjustment. An alternative to this patch is to declare such operations unsupported, but it seemed useful to avoid surprises. This patch specifically helps the following example: unsigned int delta = 1 preempt_disable() this_cpu_write(long_counter, 0) this_cpu_sub(long_counter, delta) preempt_enable() Before this change long_counter on a 64 bit machine ends with value 0x, rather than 0x. This is because this_cpu_sub(pcp, delta) boils down to this_cpu_add(pcp, -delta), which is basically: long_counter = 0 + 0x Also apply the same cast to: __this_cpu_sub() this_cpu_sub_return() and __this_cpu_sub_return() All percpu_test.ko passes, especially the following cases which previously failed: l -= ui_one; __this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); l -= ui_one; this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); CHECK(l, long_counter, 0x); ul -= ui_one; __this_cpu_sub(ulong_counter, ui_one); CHECK(ul, ulong_counter, -1); CHECK(ul, ulong_counter, 0x); ul = this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 2); ul = __this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 1); Signed-off-by: Greg Thelen gthe...@google.com Ouch, nice catch. Acked-by: Tejun Heo t...@kernel.org We probably want to cc stable for this and the next one. How should these be routed? I can take these through percpu tree or mm works too. Either way, it'd be best to route them together. Thanks. -- tejun -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, 27 Oct 2013 07:22:55 -0400 Tejun Heo t...@kernel.org wrote: We probably want to cc stable for this and the next one. How should these be routed? I can take these through percpu tree or mm works too. Either way, it'd be best to route them together. Yes, all three look like -stable material to me. I'll grab them later in the week if you haven't ;) The names of the first two patches distress me. They rather clearly assert that the code affects percpu_counter.[ch], but that is not the case. Massaging is needed to fix that up. -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, Oct 27, 2013 at 05:04:29AM -0700, Andrew Morton wrote: On Sun, 27 Oct 2013 07:22:55 -0400 Tejun Heo t...@kernel.org wrote: We probably want to cc stable for this and the next one. How should these be routed? I can take these through percpu tree or mm works too. Either way, it'd be best to route them together. Yes, all three look like -stable material to me. I'll grab them later in the week if you haven't ;) Tried to apply to percpu but the third one is a fix for a patch which was added to -mm during v3.12-rc1, so these are yours. :) The names of the first two patches distress me. They rather clearly assert that the code affects percpu_counter.[ch], but that is not the case. Massaging is needed to fix that up. Yeah, something like the following would be better percpu: add test module for various percpu operations percpu: fix this_cpu_sub() subtrahend casting for unsigneds memcg: use __this_cpu_sub() to dec stats to avoid incorrect subtrahend casting Thanks. -- tejun -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, Oct 27 2013, Tejun Heo wrote: On Sun, Oct 27, 2013 at 05:04:29AM -0700, Andrew Morton wrote: On Sun, 27 Oct 2013 07:22:55 -0400 Tejun Heo t...@kernel.org wrote: We probably want to cc stable for this and the next one. How should these be routed? I can take these through percpu tree or mm works too. Either way, it'd be best to route them together. Yes, all three look like -stable material to me. I'll grab them later in the week if you haven't ;) Tried to apply to percpu but the third one is a fix for a patch which was added to -mm during v3.12-rc1, so these are yours. :) I don't object to stable for the first two non-memcg patches, but it's probably unnecessary. I should have made it more clear, but an audit of v3.12-rc6 shows that only new memcg code is affected - the new mem_cgroup_move_account_page_stat() is the only place where an unsigned adjustment is used. All other callers (e.g. shrink_dcache_sb) already use a signed adjustment, so no problems before v3.12. Though I did not audit the stable kernel trees, so there could be something hiding in there. The names of the first two patches distress me. They rather clearly assert that the code affects percpu_counter.[ch], but that is not the case. Massaging is needed to fix that up. Yeah, something like the following would be better percpu: add test module for various percpu operations percpu: fix this_cpu_sub() subtrahend casting for unsigneds memcg: use __this_cpu_sub() to dec stats to avoid incorrect subtrahend casting No objection to renaming. Let me know if you want these reposed with updated titles. -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 2/3] percpu counter: cast this_cpu_sub() adjustment
On Sun, Oct 27 2013, Greg Thelen wrote: this_cpu_sub() is implemented as negation and addition. This patch casts the adjustment to the counter type before negation to sign extend the adjustment. This helps in cases where the counter type is wider than an unsigned adjustment. An alternative to this patch is to declare such operations unsupported, but it seemed useful to avoid surprises. This patch specifically helps the following example: unsigned int delta = 1 preempt_disable() this_cpu_write(long_counter, 0) this_cpu_sub(long_counter, delta) preempt_enable() Before this change long_counter on a 64 bit machine ends with value 0x, rather than 0x. This is because this_cpu_sub(pcp, delta) boils down to this_cpu_add(pcp, -delta), which is basically: long_counter = 0 + 0x Also apply the same cast to: __this_cpu_sub() this_cpu_sub_return() and __this_cpu_sub_return() All percpu_test.ko passes, especially the following cases which previously failed: l -= ui_one; __this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); l -= ui_one; this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); CHECK(l, long_counter, 0x); ul -= ui_one; __this_cpu_sub(ulong_counter, ui_one); CHECK(ul, ulong_counter, -1); CHECK(ul, ulong_counter, 0x); ul = this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 2); ul = __this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 1); Signed-off-by: Greg Thelen gthe...@google.com --- arch/x86/include/asm/percpu.h | 3 ++- include/linux/percpu.h| 8 lib/percpu_test.c | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0da5200..b3e18f8 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -128,7 +128,8 @@ do { \ do { \ typedef typeof(var) pao_T__;\ const int pao_ID__ = (__builtin_constant_p(val) \ - ((val) == 1 || (val) == -1)) ? (val) : 0; \ + ((val) == 1 || (val) == -1)) ?\ + (int)(val) : 0; \ if (0) {\ pao_T__ pao_tmp__; \ pao_tmp__ = (val); \ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index cc88172..c74088a 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -332,7 +332,7 @@ do { \ #endif #ifndef this_cpu_sub -# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) +# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val)) #endif #ifndef this_cpu_inc @@ -418,7 +418,7 @@ do { \ # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) #endif -#define this_cpu_sub_return(pcp, val)this_cpu_add_return(pcp, -(val)) +#define this_cpu_sub_return(pcp, val)this_cpu_add_return(pcp, -(typeof(pcp))(val)) #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) @@ -586,7 +586,7 @@ do { \ #endif #ifndef __this_cpu_sub -# define __this_cpu_sub(pcp, val)__this_cpu_add((pcp), -(val)) +# define __this_cpu_sub(pcp, val)__this_cpu_add((pcp), -(typeof(pcp))(val)) #endif #ifndef __this_cpu_inc @@ -668,7 +668,7 @@ do { \ __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) #endif -#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val)) +#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) diff --git a/lib/percpu_test.c b/lib/percpu_test.c index 1ebeb44..8ab4231 100644 --- a/lib/percpu_test.c +++ b/lib/percpu_test.c @@ -118,7 +118,7 @@ static int __init percpu_test_init(void) CHECK(ul, ulong_counter, 2); ul = __this_cpu_sub_return(ulong_counter, ui_one); - CHECK(ul, ulong_counter, 0); + CHECK(ul, ulong_counter, 1); preempt_enable(); Oops. This update to percpu_test.c