Re: [PATCH V5 03/17] blk-throttle: add .low interface

2017-01-09 Thread Tejun Heo
Happy new year, Shaohua.

Sorry about the long delay.

On Thu, Dec 15, 2016 at 12:32:54PM -0800, Shaohua Li wrote:
> Add low limit for cgroup and corresponding cgroup interface.

It'd be nice to explain why we're adding separate _conf fields.

> +static void blk_throtl_update_valid_limit(struct throtl_data *td)

I think blk_throtl_update_limit_valid() would be more consistent.

> +{
> + struct cgroup_subsys_state *pos_css;
> + struct blkcg_gq *blkg;
> + bool low_valid = false;
> +
> + rcu_read_lock();
> + blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
> + struct throtl_grp *tg = blkg_to_tg(blkg);
> +
> + if (tg->bps[READ][LIMIT_LOW] != U64_MAX ||
> + tg->bps[WRITE][LIMIT_LOW] != U64_MAX ||
> + tg->iops[READ][LIMIT_LOW] != UINT_MAX ||
> + tg->iops[WRITE][LIMIT_LOW] != UINT_MAX)
> + low_valid = true;

It's weird that it's defaulting to MAX.  Shouldn't it default to 0?
When we enable these limits on a cgroup, we want it to not affect the
operation without further configuration.  For max limit, MAX does that
as being over the limit is what changes the behavior.  For low, it's
the other way around.  We enforce latency target if cgroups are under
the low limit, and thus 0 should be the noop default value, which is
the same in memcg.

> + }
> + rcu_read_unlock();
> +
> + if (low_valid)
> + td->limit_valid[LIMIT_LOW] = true;
> + else
> + td->limit_valid[LIMIT_LOW] = false;

Maybe
td->limit_valid[LIMIT_LOW] = low_valid;

Thanks.

-- 
tejun
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V5 03/17] blk-throttle: add .low interface

2016-12-15 Thread Shaohua Li
Add low limit for cgroup and corresponding cgroup interface.

Signed-off-by: Shaohua Li 
---
 block/blk-throttle.c | 134 ---
 1 file changed, 106 insertions(+), 28 deletions(-)

diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a75bfa2..fcc4199 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -84,6 +84,7 @@ enum tg_state_flags {
 #define rb_entry_tg(node)  rb_entry((node), struct throtl_grp, rb_node)
 
 enum {
+   LIMIT_LOW,
LIMIT_MAX,
LIMIT_CNT,
 };
@@ -124,11 +125,15 @@ struct throtl_grp {
/* are there any throtl rules between this group and td? */
bool has_rules[2];
 
-   /* bytes per second rate limits */
+   /* internally used bytes per second rate limits */
uint64_t bps[2][LIMIT_CNT];
+   /* user configured bps limits */
+   uint64_t bps_conf[2][LIMIT_CNT];
 
-   /* IOPS limits */
+   /* internally used IOPS limits */
unsigned int iops[2][LIMIT_CNT];
+   /* user configured IOPS limits */
+   unsigned int iops_conf[2][LIMIT_CNT];
 
/* Number of bytes disptached in current slice */
uint64_t bytes_disp[2];
@@ -355,6 +360,8 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, 
int node)
for (index = 0; index < LIMIT_CNT; index++) {
tg->bps[rw][index] = U64_MAX;
tg->iops[rw][index] = UINT_MAX;
+   tg->bps_conf[rw][index] = U64_MAX;
+   tg->iops_conf[rw][index] = UINT_MAX;
}
}
 
@@ -414,6 +421,46 @@ static void throtl_pd_online(struct blkg_policy_data *pd)
tg_update_has_rules(pd_to_tg(pd));
 }
 
+static void blk_throtl_update_valid_limit(struct throtl_data *td)
+{
+   struct cgroup_subsys_state *pos_css;
+   struct blkcg_gq *blkg;
+   bool low_valid = false;
+
+   rcu_read_lock();
+   blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
+   struct throtl_grp *tg = blkg_to_tg(blkg);
+
+   if (tg->bps[READ][LIMIT_LOW] != U64_MAX ||
+   tg->bps[WRITE][LIMIT_LOW] != U64_MAX ||
+   tg->iops[READ][LIMIT_LOW] != UINT_MAX ||
+   tg->iops[WRITE][LIMIT_LOW] != UINT_MAX)
+   low_valid = true;
+   }
+   rcu_read_unlock();
+
+   if (low_valid)
+   td->limit_valid[LIMIT_LOW] = true;
+   else
+   td->limit_valid[LIMIT_LOW] = false;
+}
+
+static void throtl_pd_offline(struct blkg_policy_data *pd)
+{
+   struct throtl_grp *tg = pd_to_tg(pd);
+
+   tg->bps[READ][LIMIT_LOW] = U64_MAX;
+   tg->bps[WRITE][LIMIT_LOW] = U64_MAX;
+   tg->iops[READ][LIMIT_LOW] = UINT_MAX;
+   tg->iops[WRITE][LIMIT_LOW] = UINT_MAX;
+
+   blk_throtl_update_valid_limit(tg->td);
+
+   if (tg->td->limit_index == LIMIT_LOW &&
+   !tg->td->limit_valid[LIMIT_LOW])
+   tg->td->limit_index = LIMIT_MAX;
+}
+
 static void throtl_pd_free(struct blkg_policy_data *pd)
 {
struct throtl_grp *tg = pd_to_tg(pd);
@@ -1284,7 +1331,7 @@ static struct cftype throtl_legacy_files[] = {
{ } /* terminate */
 };
 
-static u64 tg_prfill_max(struct seq_file *sf, struct blkg_policy_data *pd,
+static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
 int off)
 {
struct throtl_grp *tg = pd_to_tg(pd);
@@ -1294,38 +1341,38 @@ static u64 tg_prfill_max(struct seq_file *sf, struct 
blkg_policy_data *pd,
if (!dname)
return 0;
 
-   if (tg->bps[READ][LIMIT_MAX] == U64_MAX &&
-   tg->bps[WRITE][LIMIT_MAX] == U64_MAX &&
-   tg->iops[READ][LIMIT_MAX] == UINT_MAX &&
-   tg->iops[WRITE][LIMIT_MAX] == UINT_MAX)
+   if (tg->bps_conf[READ][off] == U64_MAX &&
+   tg->bps_conf[WRITE][off] == U64_MAX &&
+   tg->iops_conf[READ][off] == UINT_MAX &&
+   tg->iops_conf[WRITE][off] == UINT_MAX)
return 0;
 
-   if (tg->bps[READ][LIMIT_MAX] != U64_MAX)
+   if (tg->bps_conf[READ][off] != U64_MAX)
snprintf(bufs[0], sizeof(bufs[0]), "%llu",
-   tg->bps[READ][LIMIT_MAX]);
-   if (tg->bps[WRITE][LIMIT_MAX] != U64_MAX)
+   tg->bps_conf[READ][off]);
+   if (tg->bps_conf[WRITE][off] != U64_MAX)
snprintf(bufs[1], sizeof(bufs[1]), "%llu",
-   tg->bps[WRITE][LIMIT_MAX]);
-   if (tg->iops[READ][LIMIT_MAX] != UINT_MAX)
+   tg->bps_conf[WRITE][off]);
+   if (tg->iops_conf[READ][off] != UINT_MAX)
snprintf(bufs[2], sizeof(bufs[2]), "%u",
-   tg->iops[READ][LIMIT_MAX]);
-   if (tg->iops[WRITE][LIMIT_MAX] != UINT_MAX)
+   tg->iops_conf[READ][off]);
+   if (tg->iops_conf[WRITE][off] != UINT_MAX)
snprintf(bufs[3], siz