Re: [Freedreno] [PATCH 09/25] drm/msm/dpu: make RM iterator static

2018-10-10 Thread Sean Paul
On Mon, Oct 08, 2018 at 09:27:26PM -0700, Jeykumar Sankaran wrote:
> HW blocks reserved for a display are stored in crtc state.
> No one outside RM is interested in using these API's for
> HW block list iterations.
> 
> Signed-off-by: Jeykumar Sankaran 

Reviewed-by: Sean Paul 

> ---
>  drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 37 ++-
>  drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | 46 
> --
>  2 files changed, 20 insertions(+), 63 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> index 619b596..24fc1c7 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> @@ -49,12 +49,26 @@ struct dpu_rm_hw_blk {
>   struct dpu_hw_blk *hw;
>  };
>  
> +/**
> + * struct dpu_rm_hw_iter - iterator for use with dpu_rm
> + * @hw: dpu_hw object requested, or NULL on failure
> + * @blk: dpu_rm internal block representation. Clients ignore. Used as 
> iterator.
> + * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any 
> Encoder
> + * @type: Hardware Block Type client wishes to search for.
> + */
> +struct dpu_rm_hw_iter {
> + void *hw;
> + struct dpu_rm_hw_blk *blk;
> + uint32_t enc_id;
> + enum dpu_hw_blk_type type;
> +};
> +
>  struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
>  {
>   return rm->hw_mdp;
>  }
>  
> -void dpu_rm_init_hw_iter(
> +static void _dpu_rm_init_hw_iter(
>   struct dpu_rm_hw_iter *iter,
>   uint32_t enc_id,
>   enum dpu_hw_blk_type type)
> @@ -97,17 +111,6 @@ static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, 
> struct dpu_rm_hw_iter *i)
>   return false;
>  }
>  
> -bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
> -{
> - bool ret;
> -
> - mutex_lock(>rm_lock);
> - ret = _dpu_rm_get_hw_locked(rm, i);
> - mutex_unlock(>rm_lock);
> -
> - return ret;
> -}
> -
>  static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
>  {
>   switch (type) {
> @@ -365,7 +368,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
>   return false;
>   }
>  
> - dpu_rm_init_hw_iter(, 0, DPU_HW_BLK_PINGPONG);
> + _dpu_rm_init_hw_iter(, 0, DPU_HW_BLK_PINGPONG);
>   while (_dpu_rm_get_hw_locked(rm, )) {
>   if (iter.blk->id == lm_cfg->pingpong) {
>   *pp = iter.blk;
> @@ -404,7 +407,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 
> uint32_t enc_id,
>   }
>  
>   /* Find a primary mixer */
> - dpu_rm_init_hw_iter(_i, 0, DPU_HW_BLK_LM);
> + _dpu_rm_init_hw_iter(_i, 0, DPU_HW_BLK_LM);
>   while (lm_count != reqs->topology.num_lm &&
>   _dpu_rm_get_hw_locked(rm, _i)) {
>   memset(, 0, sizeof(lm));
> @@ -421,7 +424,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 
> uint32_t enc_id,
>   ++lm_count;
>  
>   /* Valid primary mixer found, find matching peers */
> - dpu_rm_init_hw_iter(_j, 0, DPU_HW_BLK_LM);
> + _dpu_rm_init_hw_iter(_j, 0, DPU_HW_BLK_LM);
>  
>   while (lm_count != reqs->topology.num_lm &&
>   _dpu_rm_get_hw_locked(rm, _j)) {
> @@ -480,7 +483,7 @@ static int _dpu_rm_reserve_ctls(
>  
>   needs_split_display = _dpu_rm_needs_split_display(top);
>  
> - dpu_rm_init_hw_iter(, 0, DPU_HW_BLK_CTL);
> + _dpu_rm_init_hw_iter(, 0, DPU_HW_BLK_CTL);
>   while (_dpu_rm_get_hw_locked(rm, )) {
>   const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
>   unsigned long features = ctl->caps->features;
> @@ -528,7 +531,7 @@ static struct dpu_rm_hw_blk *_dpu_rm_reserve_intf(
>   struct dpu_rm_hw_iter iter;
>  
>   /* Find the block entry in the rm, and note the reservation */
> - dpu_rm_init_hw_iter(, 0, type);
> + _dpu_rm_init_hw_iter(, 0, type);
>   while (_dpu_rm_get_hw_locked(rm, )) {
>   if (iter.blk->id != id)
>   continue;
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h 
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
> index e48e8f2..c7e3b2b 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
> @@ -36,26 +36,6 @@ struct dpu_rm {
>  };
>  
>  /**
> - *  struct dpu_rm_hw_blk - resource manager internal structure
> - *   forward declaration for single iterator definition without void pointer
> - */
> -struct dpu_rm_hw_blk;
> -
> -/**
> - * struct dpu_rm_hw_iter - iterator for use with dpu_rm
> - * @hw: dpu_hw object requested, or NULL on failure
> - * @blk: dpu_rm internal block representation. Clients ignore. Used as 
> iterator.
> - * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any 
> Encoder
> - * @type: Hardware Block Type client wishes to search for.
> - */
> -struct dpu_rm_hw_iter {
> - void *hw;
> - struct dpu_rm_hw_blk *blk;
> - 

[Freedreno] [PATCH 09/25] drm/msm/dpu: make RM iterator static

2018-10-08 Thread Jeykumar Sankaran
HW blocks reserved for a display are stored in crtc state.
No one outside RM is interested in using these API's for
HW block list iterations.

Signed-off-by: Jeykumar Sankaran 
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 37 ++-
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | 46 --
 2 files changed, 20 insertions(+), 63 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 619b596..24fc1c7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -49,12 +49,26 @@ struct dpu_rm_hw_blk {
struct dpu_hw_blk *hw;
 };
 
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as 
iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+   void *hw;
+   struct dpu_rm_hw_blk *blk;
+   uint32_t enc_id;
+   enum dpu_hw_blk_type type;
+};
+
 struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
 {
return rm->hw_mdp;
 }
 
-void dpu_rm_init_hw_iter(
+static void _dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
enum dpu_hw_blk_type type)
@@ -97,17 +111,6 @@ static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct 
dpu_rm_hw_iter *i)
return false;
 }
 
-bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
-{
-   bool ret;
-
-   mutex_lock(>rm_lock);
-   ret = _dpu_rm_get_hw_locked(rm, i);
-   mutex_unlock(>rm_lock);
-
-   return ret;
-}
-
 static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
 {
switch (type) {
@@ -365,7 +368,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
return false;
}
 
-   dpu_rm_init_hw_iter(, 0, DPU_HW_BLK_PINGPONG);
+   _dpu_rm_init_hw_iter(, 0, DPU_HW_BLK_PINGPONG);
while (_dpu_rm_get_hw_locked(rm, )) {
if (iter.blk->id == lm_cfg->pingpong) {
*pp = iter.blk;
@@ -404,7 +407,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t 
enc_id,
}
 
/* Find a primary mixer */
-   dpu_rm_init_hw_iter(_i, 0, DPU_HW_BLK_LM);
+   _dpu_rm_init_hw_iter(_i, 0, DPU_HW_BLK_LM);
while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, _i)) {
memset(, 0, sizeof(lm));
@@ -421,7 +424,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t 
enc_id,
++lm_count;
 
/* Valid primary mixer found, find matching peers */
-   dpu_rm_init_hw_iter(_j, 0, DPU_HW_BLK_LM);
+   _dpu_rm_init_hw_iter(_j, 0, DPU_HW_BLK_LM);
 
while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, _j)) {
@@ -480,7 +483,7 @@ static int _dpu_rm_reserve_ctls(
 
needs_split_display = _dpu_rm_needs_split_display(top);
 
-   dpu_rm_init_hw_iter(, 0, DPU_HW_BLK_CTL);
+   _dpu_rm_init_hw_iter(, 0, DPU_HW_BLK_CTL);
while (_dpu_rm_get_hw_locked(rm, )) {
const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
unsigned long features = ctl->caps->features;
@@ -528,7 +531,7 @@ static struct dpu_rm_hw_blk *_dpu_rm_reserve_intf(
struct dpu_rm_hw_iter iter;
 
/* Find the block entry in the rm, and note the reservation */
-   dpu_rm_init_hw_iter(, 0, type);
+   _dpu_rm_init_hw_iter(, 0, type);
while (_dpu_rm_get_hw_locked(rm, )) {
if (iter.blk->id != id)
continue;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index e48e8f2..c7e3b2b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -36,26 +36,6 @@ struct dpu_rm {
 };
 
 /**
- *  struct dpu_rm_hw_blk - resource manager internal structure
- * forward declaration for single iterator definition without void pointer
- */
-struct dpu_rm_hw_blk;
-
-/**
- * struct dpu_rm_hw_iter - iterator for use with dpu_rm
- * @hw: dpu_hw object requested, or NULL on failure
- * @blk: dpu_rm internal block representation. Clients ignore. Used as 
iterator.
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-struct dpu_rm_hw_iter {
-   void *hw;
-   struct dpu_rm_hw_blk *blk;
-   uint32_t enc_id;
-   enum dpu_hw_blk_type type;
-};
-
-/**
  * dpu_rm_init - Read hardware catalog and create reservation tracking objects
  * for all HW blocks.
  * @rm: DPU Resource Manager handle
@@ -110,30 +90,4 @@ int dpu_rm_reserve(struct dpu_rm *rm,
  *