On 2021-03-15 17:20, Avri Altman wrote:
> +
> +             if (hpb->is_hcm) {
> +                     spin_lock_irqsave(&rgn->rgn_lock, flags);

rgn_lock is never used in IRQ contexts, so no need of irqsave and
irqrestore everywhere, which can impact performance. Please correct
me if I am wrong.
Thanks.  Will do.


Meanwhile, have you ever initialized the rgn_lock before use it???
Yep - forgot to do that here (but not in gs20 and mi10).  Thanks.

You mean you didn't test this specific series before upload?
I haven't moved to the test stage, but this will definitely
cause you error...

Can Guo.


Thanks,
Avri


Thanks,
Can Guo.

> +                     rgn->reads = 0;
> +                     spin_unlock_irqrestore(&rgn->rgn_lock, flags);
> +             }
> +
>               return 0;
>       }
>
>       if (!ufshpb_is_support_chunk(hpb, transfer_len))
>               return 0;
>
> +     if (hpb->is_hcm) {
> +             bool activate = false;
> +             /*
> +              * in host control mode, reads are the main source for
> +              * activation trials.
> +              */
> +             spin_lock_irqsave(&rgn->rgn_lock, flags);
> +             rgn->reads++;
> +             if (rgn->reads == ACTIVATION_THRESHOLD)
> +                     activate = true;
> +             spin_unlock_irqrestore(&rgn->rgn_lock, flags);
> +             if (activate) {
> +                     spin_lock_irqsave(&hpb->rsp_list_lock, flags);
> +                     ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
> +                     hpb->stats.rb_active_cnt++;
> +                     spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
> +                     dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
> +                             "activate region %d-%d\n", rgn_idx, srgn_idx);
> +             }
> +
> +             /* keep those counters normalized */
> +             if (rgn->reads > hpb->entries_per_srgn)
> +                     schedule_work(&hpb->ufshpb_normalization_work);
> +     }
> +
>       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
>       if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
>                                  transfer_len)) {
> @@ -745,21 +794,6 @@ static int ufshpb_clear_dirty_bitmap(struct
> ufshpb_lu *hpb,
>       return 0;
>  }
>
> -static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int
> rgn_idx,
> -                                   int srgn_idx)
> -{
> -     struct ufshpb_region *rgn;
> -     struct ufshpb_subregion *srgn;
> -
> -     rgn = hpb->rgn_tbl + rgn_idx;
> -     srgn = rgn->srgn_tbl + srgn_idx;
> -
> -     list_del_init(&rgn->list_inact_rgn);
> -
> -     if (list_empty(&srgn->list_act_srgn))
> -             list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
> -}
> -
>  static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int
> rgn_idx)
>  {
>       struct ufshpb_region *rgn;
> @@ -1079,6 +1113,14 @@ static void __ufshpb_evict_region(struct
> ufshpb_lu *hpb,
>
>       ufshpb_cleanup_lru_info(lru_info, rgn);
>
> +     if (hpb->is_hcm) {
> +             unsigned long flags;
> +
> +             spin_lock_irqsave(&rgn->rgn_lock, flags);
> +             rgn->reads = 0;
> +             spin_unlock_irqrestore(&rgn->rgn_lock, flags);
> +     }
> +
>       for_each_sub_region(rgn, srgn_idx, srgn)
>               ufshpb_purge_active_subregion(hpb, srgn);
>  }
> @@ -1523,6 +1565,31 @@ static void
> ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
>       spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
>  }
>
> +static void ufshpb_normalization_work_handler(struct work_struct
> *work)
> +{
> +     struct ufshpb_lu *hpb;
> +     int rgn_idx;
> +     unsigned long flags;
> +
> +     hpb = container_of(work, struct ufshpb_lu,
> ufshpb_normalization_work);
> +
> +     for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
> +             struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
> +
> +             spin_lock_irqsave(&rgn->rgn_lock, flags);
> +             rgn->reads = (rgn->reads >> 1);
> +             spin_unlock_irqrestore(&rgn->rgn_lock, flags);
> +
> +             if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
> +                     continue;
> +
> +             /* if region is active but has no reads - inactivate it */
> +             spin_lock(&hpb->rsp_list_lock);
> +             ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
> +             spin_unlock(&hpb->rsp_list_lock);
> +     }
> +}
> +
>  static void ufshpb_map_work_handler(struct work_struct *work)
>  {
>       struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
> map_work);
> @@ -1913,6 +1980,9 @@ static int ufshpb_lu_hpb_init(struct ufs_hba
> *hba, struct ufshpb_lu *hpb)
>       INIT_LIST_HEAD(&hpb->list_hpb_lu);
>
>       INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
> +     if (hpb->is_hcm)
> +             INIT_WORK(&hpb->ufshpb_normalization_work,
> +                       ufshpb_normalization_work_handler);
>
>       hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
>                         sizeof(struct ufshpb_req), 0, 0, NULL);
> @@ -2012,6 +2082,8 @@ static void ufshpb_discard_rsp_lists(struct
> ufshpb_lu *hpb)
>
>  static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
>  {
> +     if (hpb->is_hcm)
> +             cancel_work_sync(&hpb->ufshpb_normalization_work);
>       cancel_work_sync(&hpb->map_work);
>  }
>
> diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
> index 8119b1a3d1e5..bd4308010466 100644
> --- a/drivers/scsi/ufs/ufshpb.h
> +++ b/drivers/scsi/ufs/ufshpb.h
> @@ -121,6 +121,10 @@ struct ufshpb_region {
>       struct list_head list_lru_rgn;
>       unsigned long rgn_flags;
>  #define RGN_FLAG_DIRTY 0
> +
> +     /* region reads - for host mode */
> +     spinlock_t rgn_lock;
> +     unsigned int reads;
>  };
>
>  #define for_each_sub_region(rgn, i, srgn)                            \
> @@ -211,6 +215,7 @@ struct ufshpb_lu {
>
>       /* for selecting victim */
>       struct victim_select_info lru_info;
> +     struct work_struct ufshpb_normalization_work;
>
>       /* pinned region information */
>       u32 lu_pinned_start;

Reply via email to