On Tue, Nov 12, 2013 at 10:15:41AM -0500, Vivek Goyal wrote: > I see that we allocate per cpu stats but don't do any initializations. > > static void tg_stats_alloc_fn(struct work_struct *work) > { > static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */ > struct delayed_work *dwork = to_delayed_work(work); > bool empty = false; > > alloc_stats: > if (!stats_cpu) { > stats_cpu = alloc_percpu(struct tg_stats_cpu); > if (!stats_cpu) { > /* allocation failed, try again after some time */ > schedule_delayed_work(dwork, msecs_to_jiffies(10)); > return; > } > } > > spin_lock_irq(&tg_stats_alloc_lock);
Absolutely! Something like this perhaps? Did I miss more blkg_[rw]stats? If I read the git grep output right, this was the last one. --- block/blk-throttle.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8331aba9426f..fd743d98c41d 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -256,6 +256,12 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) } \ } while (0) +static void tg_stats_init(struct tg_stats_cpu *tg_stats) +{ + blkg_rwstat_init(&tg_stats->service_bytes); + blkg_rwstat_init(&tg_stats->serviced); +} + /* * Worker for allocating per cpu stat for tgs. This is scheduled on the * system_wq once there are some groups on the alloc_list waiting for @@ -269,12 +275,16 @@ static void tg_stats_alloc_fn(struct work_struct *work) alloc_stats: if (!stats_cpu) { + int cpu; + stats_cpu = alloc_percpu(struct tg_stats_cpu); if (!stats_cpu) { /* allocation failed, try again after some time */ schedule_delayed_work(dwork, msecs_to_jiffies(10)); return; } + for_each_possible_cpu(cpu) + tg_stats_init(per_cpu(stats_cpu, cpu)); } spin_lock_irq(&tg_stats_alloc_lock); -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/