RE: [RFC PATCH v3 4/5] scsi: ufs: L2P map management for HPB read

2020-06-24 Thread Daejun Park
>  static struct ufshpb_driver ufshpb_drv;
> > +unsigned int ufshpb_host_map_kbytes = 1 * 1024;
> I think you've already declared this as a module parameter in 3/5.
> 
> No need to fix this now, unless there will be some more comments,
> And you'll issue a v4.

OK, thanks!


RE: [RFC PATCH v3 4/5] scsi: ufs: L2P map management for HPB read

2020-06-24 Thread Avri Altman
>  static struct ufshpb_driver ufshpb_drv;
> +unsigned int ufshpb_host_map_kbytes = 1 * 1024;
I think you've already declared this as a module parameter in 3/5.

No need to fix this now, unless there will be some more comments,
And you'll issue a v4.

Thanks,
Avri 


[RFC PATCH v3 4/5] scsi: ufs: L2P map management for HPB read

2020-06-22 Thread Daejun Park
This is a patch for managing L2P map in HPB module.

The HPB divides logical addresses into several regions. A region consists
of several sub-regions. The sub-region is a basic unit where L2P mapping is
managed. The driver loads L2P mapping data of each sub-region. The loaded
sub-region is called active-state. The HPB driver unloads L2P mapping data
as region unit. The unloaded region is called inactive-state.

Sub-region/region candidates to be loaded and unloaded are delivered from
the UFS device. The UFS device delivers the recommended active sub-region
and inactivate region to the driver using sensedata.
The HPB module performs L2P mapping management on the host through the
delivered information.

A pinned region is a pre-set regions on the UFS device that is always
activate-state.

The data structure for map data request and L2P map uses mempool API,
minimizing allocation overhead while avoiding static allocation.

The map_work manages active/inactive by 2 "to-do" lists.
Each hpb lun maintains 2 "to-do" lists:
  hpb->lh_inact_rgn - regions to be inactivated, and
  hpb->lh_act_srgn - subregions to be activated
Those lists are maintained on IO completion.

Signed-off-by: Daejun Park 
---
 drivers/scsi/ufs/ufshpb.c | 992 +-
 drivers/scsi/ufs/ufshpb.h |  72 +++
 2 files changed, 1060 insertions(+), 4 deletions(-)

diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 84a9af2fdc80..e1af4c2ed9ab 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -15,6 +15,7 @@
 #include "ufshpb.h"
 
 static struct ufshpb_driver ufshpb_drv;
+unsigned int ufshpb_host_map_kbytes = 1 * 1024;
 
 static int ufshpb_create_sysfs(struct ufs_hba *hba, struct ufshpb_lu *hpb);
 
@@ -25,6 +26,63 @@ static inline int ufshpb_is_valid_srgn(struct ufshpb_region 
*rgn,
srgn->srgn_state == HPB_SRGN_VALID;
 }
 
+static inline bool ufshpb_is_general_lun(int lun)
+{
+   return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
+}
+
+static inline bool
+ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
+{
+   if (hpb->lu_pinned_end != PINNED_NOT_SET &&
+   rgn_idx >= hpb->lu_pinned_start &&
+   rgn_idx <= hpb->lu_pinned_end)
+   return true;
+
+   return false;
+}
+
+static bool ufshpb_is_empty_rsp_lists(struct ufshpb_lu *hpb)
+{
+   bool ret = true;
+   unsigned long flags;
+
+   spin_lock_irqsave(>rsp_list_lock, flags);
+   if (!list_empty(>lh_inact_rgn) || !list_empty(>lh_act_srgn))
+   ret = false;
+   spin_unlock_irqrestore(>rsp_list_lock, flags);
+
+   return ret;
+}
+
+static inline int ufshpb_may_field_valid(struct ufs_hba *hba,
+struct ufshcd_lrb *lrbp,
+struct ufshpb_rsp_field *rsp_field)
+{
+   if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
+   rsp_field->desc_type != DEV_DES_TYPE ||
+   rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
+   rsp_field->hpb_type == HPB_RSP_NONE ||
+   rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
+   rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
+   (!rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
+   return -EINVAL;
+
+   if (!ufshpb_is_general_lun(lrbp->lun)) {
+   dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
+lrbp->lun);
+   return -EINVAL;
+   }
+
+   return 0;
+}
+
+
+static inline struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_cmnd *cmd)
+{
+   return cmd->device->hostdata;
+}
+
 static inline int ufshpb_get_state(struct ufshpb_lu *hpb)
 {
return atomic_read(>hpb_state);
@@ -59,6 +117,765 @@ static inline void ufshpb_lu_put(struct ufshpb_lu *hpb)
put_device(>hpb_lu_dev);
 }
 
+static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
+struct ufshpb_subregion *srgn)
+{
+   struct ufshpb_req *map_req;
+   struct request *req;
+   struct bio *bio;
+
+   map_req = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
+   if (!map_req)
+   return NULL;
+
+   req = blk_get_request(hpb->sdev_ufs_lu->request_queue,
+ REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
+   if (IS_ERR(req))
+   goto free_map_req;
+
+   bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
+   if (!bio) {
+   blk_put_request(req);
+   goto free_map_req;
+   }
+
+   map_req->hpb = hpb;
+   map_req->req = req;
+   map_req->bio = bio;
+
+   map_req->rgn_idx = srgn->rgn_idx;
+   map_req->srgn_idx = srgn->srgn_idx;
+   map_req->mctx = srgn->mctx;
+   map_req->lun = hpb->lun;
+
+   return map_req;
+
+free_map_req:
+   kmem_cache_free(hpb->map_req_cache, map_req);
+   return NULL;
+}
+
+static inline void