RE: [PATCH v6 03/10] scsi: ufshpb: Add region's reads counter

2021-03-27 Thread Avri Altman
> > > @@ -596,12 +615,43 @@ int ufshpb_prep(struct ufs_hba *hba, struct
> > > ufshcd_lrb *lrbp)
> > >   ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
> > >transfer_len);
> > >   spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> > > +
> > > + if (hpb->is_hcm) {
> > > + spin_lock(&rgn->rgn_lock);
> > > + rgn->reads = 0;
> > > + spin_unlock(&rgn->rgn_lock);
> Here also.
> 
> > > + }
> > > +
> > >   return 0;
> > >   }
> > >
> > >   if (!ufshpb_is_support_chunk(hpb, transfer_len))
> > >   return 0;
> > >
> > > + if (hpb->is_hcm) {
> > > + bool activate = false;
> > > + /*
> > > +  * in host control mode, reads are the main source for
> > > +  * activation trials.
> > > +  */
> > > + spin_lock(&rgn->rgn_lock);
> > > + rgn->reads++;
> > > + if (rgn->reads == ACTIVATION_THRESHOLD)
> > > + activate = true;
> > > + spin_unlock(&rgn->rgn_lock);
> > > + if (activate) {
> > > + spin_lock_irqsave(&hpb->rsp_list_lock, flags);
> > > + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
> >
> > If a transfer_len (possible with HPB2.0) sits accross two
> > regions/sub-regions,
> > here it only updates active info of the first region/sub-region.
> Yes.  Will fix.
Giving it another look, I noticed that the current design only support a single 
subregion per region.
As activation is done per subregion, we need to count reads per subregion and 
make those activation decisions accordingly. 
Still, the read counter per region needs to stay, as well as its spinlock for 
the inactivation decision.

Will fix it in my next version.  Waiting for v32.

Thanks,
Avri

> 
> Thanks,
> Avri
> 
> >
> > Thanks,
> > Can Guo.
> >


RE: [PATCH v6 03/10] scsi: ufshpb: Add region's reads counter

2021-03-24 Thread Avri Altman
> > @@ -596,12 +615,43 @@ int ufshpb_prep(struct ufs_hba *hba, struct
> > ufshcd_lrb *lrbp)
> >   ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
> >transfer_len);
> >   spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> > +
> > + if (hpb->is_hcm) {
> > + spin_lock(&rgn->rgn_lock);
> > + rgn->reads = 0;
> > + spin_unlock(&rgn->rgn_lock);
Here also.

> > + }
> > +
> >   return 0;
> >   }
> >
> >   if (!ufshpb_is_support_chunk(hpb, transfer_len))
> >   return 0;
> >
> > + if (hpb->is_hcm) {
> > + bool activate = false;
> > + /*
> > +  * in host control mode, reads are the main source for
> > +  * activation trials.
> > +  */
> > + spin_lock(&rgn->rgn_lock);
> > + rgn->reads++;
> > + if (rgn->reads == ACTIVATION_THRESHOLD)
> > + activate = true;
> > + spin_unlock(&rgn->rgn_lock);
> > + if (activate) {
> > + spin_lock_irqsave(&hpb->rsp_list_lock, flags);
> > + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
> 
> If a transfer_len (possible with HPB2.0) sits accross two
> regions/sub-regions,
> here it only updates active info of the first region/sub-region.
Yes.  Will fix.

Thanks,
Avri

> 
> Thanks,
> Can Guo.
> 


Re: [PATCH v6 03/10] scsi: ufshpb: Add region's reads counter

2021-03-24 Thread Can Guo

On 2021-03-22 16:10, Avri Altman wrote:

In host control mode, reads are the major source of activation trials.
Keep track of those reads counters, for both active as well inactive
regions.

We reset the read counter upon write - we are only interested in 
"clean"

reads.

Keep those counters normalized, as we are using those reads as a
comparative score, to make various decisions.
If during consecutive normalizations an active region has exhaust its
reads - inactivate it.

while at it, protect the {active,inactive}_count stats by adding them
into the applicable handler.

Signed-off-by: Avri Altman 
---
 drivers/scsi/ufs/ufshpb.c | 100 +++---
 drivers/scsi/ufs/ufshpb.h |   5 ++
 2 files changed, 88 insertions(+), 17 deletions(-)

diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index d4f0bb6d8fa1..a1519cbb4ce0 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -16,6 +16,8 @@
 #include "ufshpb.h"
 #include "../sd.h"

+#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
+
 /* memory management */
 static struct kmem_cache *ufshpb_mctx_cache;
 static mempool_t *ufshpb_mctx_pool;
@@ -546,6 +548,23 @@ static int ufshpb_issue_pre_req(struct ufshpb_lu
*hpb, struct scsi_cmnd *cmd,
return ret;
 }

+static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int 
rgn_idx,

+ int srgn_idx)
+{
+   struct ufshpb_region *rgn;
+   struct ufshpb_subregion *srgn;
+
+   rgn = hpb->rgn_tbl + rgn_idx;
+   srgn = rgn->srgn_tbl + srgn_idx;
+
+   list_del_init(&rgn->list_inact_rgn);
+
+   if (list_empty(&srgn->list_act_srgn))
+   list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+
+   hpb->stats.rb_active_cnt++;
+}
+
 /*
  * This function will set up HPB read command using host-side L2P map 
data.

  */
@@ -596,12 +615,43 @@ int ufshpb_prep(struct ufs_hba *hba, struct
ufshcd_lrb *lrbp)
ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
 transfer_len);
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+   if (hpb->is_hcm) {
+   spin_lock(&rgn->rgn_lock);
+   rgn->reads = 0;
+   spin_unlock(&rgn->rgn_lock);
+   }
+
return 0;
}

if (!ufshpb_is_support_chunk(hpb, transfer_len))
return 0;

+   if (hpb->is_hcm) {
+   bool activate = false;
+   /*
+* in host control mode, reads are the main source for
+* activation trials.
+*/
+   spin_lock(&rgn->rgn_lock);
+   rgn->reads++;
+   if (rgn->reads == ACTIVATION_THRESHOLD)
+   activate = true;
+   spin_unlock(&rgn->rgn_lock);
+   if (activate) {
+   spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+   ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);


If a transfer_len (possible with HPB2.0) sits accross two 
regions/sub-regions,

here it only updates active info of the first region/sub-region.

Thanks,
Can Guo.


+   spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+   dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+   "activate region %d-%d\n", rgn_idx, srgn_idx);
+   }
+
+   /* keep those counters normalized */
+   if (rgn->reads > hpb->entries_per_srgn)
+   schedule_work(&hpb->ufshpb_normalization_work);
+   }
+
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
   transfer_len)) {
@@ -741,21 +791,6 @@ static int ufshpb_clear_dirty_bitmap(struct 
ufshpb_lu *hpb,

return 0;
 }

-static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int 
rgn_idx,

- int srgn_idx)
-{
-   struct ufshpb_region *rgn;
-   struct ufshpb_subregion *srgn;
-
-   rgn = hpb->rgn_tbl + rgn_idx;
-   srgn = rgn->srgn_tbl + srgn_idx;
-
-   list_del_init(&rgn->list_inact_rgn);
-
-   if (list_empty(&srgn->list_act_srgn))
-   list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
-}
-
 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int 
rgn_idx)

 {
struct ufshpb_region *rgn;
@@ -769,6 +804,8 @@ static void ufshpb_update_inactive_info(struct
ufshpb_lu *hpb, int rgn_idx)

if (list_empty(&rgn->list_inact_rgn))
list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
+
+   hpb->stats.rb_inactive_cnt++;
 }

 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
@@ -1089,6 +1126,7 @@ static int ufshpb_evict_region(struct ufshpb_lu
*hpb, struct ufshpb_region *rgn)
 rgn->rgn_idx);