Re: [PATCH 2/4] drm/scheduler: move entity handling into separate file

2018-08-14 Thread Huang Rui
On Tue, Aug 14, 2018 at 10:12:24AM +0200, Christian König wrote:
> This is complex enough on it's own. Move it into a separate C file.
> 
> Signed-off-by: Christian König 

For series:
Reviewed-by: Huang Rui 

> ---
>  drivers/gpu/drm/scheduler/Makefile|   2 +-
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 441 +---
>  drivers/gpu/drm/scheduler/sched_entity.c  | 459 
> ++
>  include/drm/gpu_scheduler.h   |  28 +-
>  4 files changed, 484 insertions(+), 446 deletions(-)
>  create mode 100644 drivers/gpu/drm/scheduler/sched_entity.c
> 
> diff --git a/drivers/gpu/drm/scheduler/Makefile 
> b/drivers/gpu/drm/scheduler/Makefile
> index 7665883f81d4..f23785d4b3c8 100644
> --- a/drivers/gpu/drm/scheduler/Makefile
> +++ b/drivers/gpu/drm/scheduler/Makefile
> @@ -20,6 +20,6 @@
>  # OTHER DEALINGS IN THE SOFTWARE.
>  #
>  #
> -gpu-sched-y := gpu_scheduler.o sched_fence.o
> +gpu-sched-y := gpu_scheduler.o sched_fence.o sched_entity.o
>  
>  obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 85c1f95752cc..9ca741f3a0bc 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -58,8 +58,6 @@
>  #define to_drm_sched_job(sched_job)  \
>   container_of((sched_job), struct drm_sched_job, queue_node)
>  
> -static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
> -static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
>  static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb 
> *cb);
>  
>  /**
> @@ -86,8 +84,8 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler 
> *sched,
>   *
>   * Adds a scheduler entity to the run queue.
>   */
> -static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
> - struct drm_sched_entity *entity)
> +void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
> +  struct drm_sched_entity *entity)
>  {
>   if (!list_empty(>list))
>   return;
> @@ -104,8 +102,8 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq 
> *rq,
>   *
>   * Removes a scheduler entity from the run queue.
>   */
> -static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
> -struct drm_sched_entity *entity)
> +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
> + struct drm_sched_entity *entity)
>  {
>   if (list_empty(>list))
>   return;
> @@ -158,301 +156,6 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
>   return NULL;
>  }
>  
> -/**
> - * drm_sched_entity_init - Init a context entity used by scheduler when
> - * submit to HW ring.
> - *
> - * @entity: scheduler entity to init
> - * @rq_list: the list of run queue on which jobs from this
> - *   entity can be submitted
> - * @num_rq_list: number of run queue in rq_list
> - * @guilty: atomic_t set to 1 when a job on this queue
> - *  is found to be guilty causing a timeout
> - *
> - * Note: the rq_list should have atleast one element to schedule
> - *   the entity
> - *
> - * Returns 0 on success or a negative error code on failure.
> -*/
> -int drm_sched_entity_init(struct drm_sched_entity *entity,
> -   struct drm_sched_rq **rq_list,
> -   unsigned int num_rq_list,
> -   atomic_t *guilty)
> -{
> - int i;
> -
> - if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
> - return -EINVAL;
> -
> - memset(entity, 0, sizeof(struct drm_sched_entity));
> - INIT_LIST_HEAD(>list);
> - entity->rq = rq_list[0];
> - entity->guilty = guilty;
> - entity->num_rq_list = num_rq_list;
> - entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
> - GFP_KERNEL);
> - if (!entity->rq_list)
> - return -ENOMEM;
> -
> - for (i = 0; i < num_rq_list; ++i)
> - entity->rq_list[i] = rq_list[i];
> - entity->last_scheduled = NULL;
> -
> - spin_lock_init(>rq_lock);
> - spsc_queue_init(>job_queue);
> -
> - atomic_set(>fence_seq, 0);
> - entity->fence_context = dma_fence_context_alloc(2);
> -
> - return 0;
> -}
> -EXPORT_SYMBOL(drm_sched_entity_init);
> -
> -/**
> - * drm_sched_entity_is_idle - Check if entity is idle
> - *
> - * @entity: scheduler entity
> - *
> - * Returns true if the entity does not have any unscheduled jobs.
> - */
> -static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
> -{
> - rmb();
> -
> - if (list_empty(>list) ||
> - spsc_queue_peek(>job_queue) == NULL)
> - return true;
> -
> - return false;
> -}
> -
> -/**
> - * drm_sched_entity_is_ready - Check if entity is ready
> - *
> - * @entity: scheduler entity
> - *
> - * Return true if entity 

[PATCH 2/4] drm/scheduler: move entity handling into separate file

2018-08-14 Thread Christian König
This is complex enough on it's own. Move it into a separate C file.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/scheduler/Makefile|   2 +-
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 441 +---
 drivers/gpu/drm/scheduler/sched_entity.c  | 459 ++
 include/drm/gpu_scheduler.h   |  28 +-
 4 files changed, 484 insertions(+), 446 deletions(-)
 create mode 100644 drivers/gpu/drm/scheduler/sched_entity.c

diff --git a/drivers/gpu/drm/scheduler/Makefile 
b/drivers/gpu/drm/scheduler/Makefile
index 7665883f81d4..f23785d4b3c8 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -20,6 +20,6 @@
 # OTHER DEALINGS IN THE SOFTWARE.
 #
 #
-gpu-sched-y := gpu_scheduler.o sched_fence.o
+gpu-sched-y := gpu_scheduler.o sched_fence.o sched_entity.o
 
 obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 85c1f95752cc..9ca741f3a0bc 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -58,8 +58,6 @@
 #define to_drm_sched_job(sched_job)\
container_of((sched_job), struct drm_sched_job, queue_node)
 
-static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
-static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb 
*cb);
 
 /**
@@ -86,8 +84,8 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
  *
  * Adds a scheduler entity to the run queue.
  */
-static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
-   struct drm_sched_entity *entity)
+void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+struct drm_sched_entity *entity)
 {
if (!list_empty(>list))
return;
@@ -104,8 +102,8 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
  *
  * Removes a scheduler entity from the run queue.
  */
-static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
-  struct drm_sched_entity *entity)
+void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+   struct drm_sched_entity *entity)
 {
if (list_empty(>list))
return;
@@ -158,301 +156,6 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
return NULL;
 }
 
-/**
- * drm_sched_entity_init - Init a context entity used by scheduler when
- * submit to HW ring.
- *
- * @entity: scheduler entity to init
- * @rq_list: the list of run queue on which jobs from this
- *   entity can be submitted
- * @num_rq_list: number of run queue in rq_list
- * @guilty: atomic_t set to 1 when a job on this queue
- *  is found to be guilty causing a timeout
- *
- * Note: the rq_list should have atleast one element to schedule
- *   the entity
- *
- * Returns 0 on success or a negative error code on failure.
-*/
-int drm_sched_entity_init(struct drm_sched_entity *entity,
- struct drm_sched_rq **rq_list,
- unsigned int num_rq_list,
- atomic_t *guilty)
-{
-   int i;
-
-   if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
-   return -EINVAL;
-
-   memset(entity, 0, sizeof(struct drm_sched_entity));
-   INIT_LIST_HEAD(>list);
-   entity->rq = rq_list[0];
-   entity->guilty = guilty;
-   entity->num_rq_list = num_rq_list;
-   entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
-   GFP_KERNEL);
-   if (!entity->rq_list)
-   return -ENOMEM;
-
-   for (i = 0; i < num_rq_list; ++i)
-   entity->rq_list[i] = rq_list[i];
-   entity->last_scheduled = NULL;
-
-   spin_lock_init(>rq_lock);
-   spsc_queue_init(>job_queue);
-
-   atomic_set(>fence_seq, 0);
-   entity->fence_context = dma_fence_context_alloc(2);
-
-   return 0;
-}
-EXPORT_SYMBOL(drm_sched_entity_init);
-
-/**
- * drm_sched_entity_is_idle - Check if entity is idle
- *
- * @entity: scheduler entity
- *
- * Returns true if the entity does not have any unscheduled jobs.
- */
-static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
-{
-   rmb();
-
-   if (list_empty(>list) ||
-   spsc_queue_peek(>job_queue) == NULL)
-   return true;
-
-   return false;
-}
-
-/**
- * drm_sched_entity_is_ready - Check if entity is ready
- *
- * @entity: scheduler entity
- *
- * Return true if entity could provide a job.
- */
-static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
-{
-   if (spsc_queue_peek(>job_queue) == NULL)
-   return false;
-
-   if (READ_ONCE(entity->dependency))
-   return false;
-
-   return true;
-}
-
-/**
- *