RE: [PATCH v2 1/5] drm/amdgpu: reverts commit b01245ff54db66073b104ac9d9fbefb7b264b36d.

2019-12-17 Thread Ma, Le
[AMD Official Use Only - Internal Distribution Only]


Hi Andry



Please check the 3 minor comments in this patch. With that addressed, the V2s 
series is Reviewed-by: Le Ma mailto:le...@amd.com>>



Regards,

Ma Le



-Original Message-
From: Andrey Grodzovsky 
Sent: Saturday, December 14, 2019 12:54 AM
To: dri-devel@lists.freedesktop.org; amd-...@lists.freedesktop.org
Cc: Deucher, Alexander ; Ma, Le ; 
Zhang, Hawking ; Quan, Evan ; 
Grodzovsky, Andrey 
Subject: [PATCH v2 1/5] drm/amdgpu: reverts commit 
b01245ff54db66073b104ac9d9fbefb7b264b36d.



In preparation for doing XGMI reset synchronization using task barrier.



Signed-off-by: Andrey Grodzovsky 
mailto:andrey.grodzov...@amd.com>>

Reviewed-by: Le Ma mailto:le...@amd.com>>

---

drivers/gpu/drm/amd/amdgpu/amdgpu.h|  2 -

drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 76 +-

2 files changed, 12 insertions(+), 66 deletions(-)



diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index a78a363..50bab33 100644

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h

+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -1001,8 +1001,6 @@ struct amdgpu_device {



boolpm_sysfs_en;

   boolucode_sysfs_en;

-

-   boolin_baco;

};



 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device 
*bdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 7324a5f..1d19edfa 100644

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -2667,7 +2667,7 @@ static void amdgpu_device_xgmi_reset_func(struct 
work_struct *__work)

   if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)

   adev->asic_reset_res = (adev->in_baco == false) ?

   
amdgpu_device_baco_enter(adev->ddev) :

-   
amdgpu_device_baco_exit(adev->ddev);

+  
qamdgpu_device_baco_exit(adev->ddev);

[Le] 1/3: Still unnecessary typo here, although it will be removed in patch #4.

   else

   adev->asic_reset_res = amdgpu_asic_reset(adev);



@@ -3796,18 +3796,13 @@ static int amdgpu_device_pre_asic_reset(struct 
amdgpu_device *adev,

   return r;

}



-static int amdgpu_do_asic_reset(struct amdgpu_device *adev,

-  struct amdgpu_hive_info *hive,

+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,

  struct list_head *device_list_handle,

  bool *need_full_reset_arg)

{

   struct amdgpu_device *tmp_adev = NULL;

   bool need_full_reset = *need_full_reset_arg, vram_lost = false;

   int r = 0;

-   int cpu = smp_processor_id();

-   bool use_baco =

-   (amdgpu_asic_reset_method(adev) == 
AMD_RESET_METHOD_BACO) ?

-   true : false;



/*

* ASIC reset has to be done on all HGMI hive nodes ASAP @@ -3815,62 
+3810,22 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev,

*/

   if (need_full_reset) {

   list_for_each_entry(tmp_adev, device_list_handle, 
gmc.xgmi.head) {

-   /*

-   * For XGMI run all resets in parallel to 
speed up the

-   * process by scheduling the highpri wq on 
different

-   * cpus. For XGMI with baco reset, all nodes 
must enter

-   * baco within close proximity before anyone 
exit.

-   */

+  /* For XGMI run all resets in parallel to 
speed up the process */

   if (tmp_adev->gmc.xgmi.num_physical_nodes > 
1) {

-   if (!queue_work_on(cpu, 
system_highpri_wq,

-  
_adev->xgmi_reset_work))

+  if 
(!queue_work(system_highpri_wq, _adev->xgmi_reset_work))

   r = -EALREADY;

-   cpu = cpumask_next(cpu, 
cpu_online_mask);

   } else

   r = amdgpu_asic_reset(tmp_adev);

-   if (r)

-   break;

-   }

-

-   /* For XGMI wait for all work to complete be

RE: [RESEND PATCH 5/5] drm/amdgpu: Switch from system_highpri_wq to system_unbound_wq

2019-12-11 Thread Ma, Le
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Le Ma 

Regards,
Ma Le

-Original Message-
From: Andrey Grodzovsky  
Sent: Thursday, December 12, 2019 4:39 AM
To: dri-devel@lists.freedesktop.org; amd-...@lists.freedesktop.org
Cc: Deucher, Alexander ; Ma, Le ; 
Zhang, Hawking ; Quan, Evan ; 
Grodzovsky, Andrey 
Subject: [RESEND PATCH 5/5] drm/amdgpu: Switch from system_highpri_wq to 
system_unbound_wq

This is to avoid queueing jobs to same CPU during XGMI hive reset because there 
is a strict timeline for when the reset commands must reach all the GPUs in the 
hive.

Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e4089a0..1518565 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3842,7 +3842,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info 
*hive,
list_for_each_entry(tmp_adev, device_list_handle, 
gmc.xgmi.head) {
/* For XGMI run all resets in parallel to speed up the 
process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
-   if (!queue_work(system_highpri_wq, 
_adev->xgmi_reset_work))
+   if (!queue_work(system_unbound_wq, 
_adev->xgmi_reset_work))
r = -EALREADY;
} else
r = amdgpu_asic_reset(tmp_adev);
--
2.7.4
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


RE: [RESEND PATCH 4/5] Subject: drm/amdgpu: Redo XGMI reset synchronization.

2019-12-11 Thread Ma, Le
[AMD Official Use Only - Internal Distribution Only]






-Original Message-
From: Andrey Grodzovsky 
Sent: Thursday, December 12, 2019 4:39 AM
To: dri-devel@lists.freedesktop.org; amd-...@lists.freedesktop.org
Cc: Deucher, Alexander ; Ma, Le ; 
Zhang, Hawking ; Quan, Evan ; 
Grodzovsky, Andrey 
Subject: [RESEND PATCH 4/5] Subject: drm/amdgpu: Redo XGMI reset 
synchronization.



Use task barrier in XGMI hive to synchronize ASIC resets across devices in XGMI 
hive.



Signed-off-by: Andrey Grodzovsky 
mailto:andrey.grodzov...@amd.com>>

---

drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 42 +-

1 file changed, 36 insertions(+), 6 deletions(-)



diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 1d19edfa..e4089a0 100644

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -67,6 +67,7 @@

#include "amdgpu_tmz.h"



 #include 

+#include 



 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");

MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");

@@ -2663,14 +2664,43 @@ static void amdgpu_device_xgmi_reset_func(struct 
work_struct *__work)  {

   struct amdgpu_device *adev =

   container_of(__work, struct amdgpu_device, 
xgmi_reset_work);

+  struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);



-   if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)

-   adev->asic_reset_res = (adev->in_baco == false) ?

-   
amdgpu_device_baco_enter(adev->ddev) :

-   
qamdgpu_device_baco_exit(adev->ddev);

-   else

-   adev->asic_reset_res = amdgpu_asic_reset(adev);

+  /*

+  * Use task barrier to synchronize all xgmi reset works across the

+  * hive.

+  * task_barrier_enter and task_barrier_exit will block untill all the

+  * threads running the xgmi reset works reach those points. I assume

+  * guarantee of progress here for all the threads as the workqueue 
code

+  * creates new worker threads as needed by amount of work items in 
queue

+  * (see worker_thread) and also each thread sleeps in the barrir and 
by

+  * this yielding the CPU for other work threads to make progress.

+  */

[Le]: This comments can be adjusted since we switch to system_unbound_wq in 
patch #5.

+  if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {

+

+  if (hive)

+  task_barrier_enter(>tb);

[Le]: The multiple hive condition can be checked only once and moved to the 
location right after the assignment.

+

+  adev->asic_reset_res = 
amdgpu_device_baco_enter(adev->ddev);

+

+  if (adev->asic_reset_res)

+  goto fail;

+

+  if (hive)

+  task_barrier_exit(>tb);

[Le]: Same as above.

+

+  adev->asic_reset_res = 
amdgpu_device_baco_exit(adev->ddev);

+

+  if (adev->asic_reset_res)

+  goto fail;

+  } else {

+  if (hive)

+  task_barrier_full(>tb);

[Le]: Same as above.



With above addressed, Reviewed-by: Le Ma mailto:le...@amd.com>>



Regards,

Ma Le

+

+  adev->asic_reset_res =  amdgpu_asic_reset(adev);

+  }



+fail:

   if (adev->asic_reset_res)

   DRM_WARN("ASIC reset failed with error, %d for drm dev, 
%s",

adev->asic_reset_res, adev->ddev->unique);

--

2.7.4


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


RE: [RESEND PATCH 3/5] drm/amdgpu: Add task barrier to XGMI hive.

2019-12-11 Thread Ma, Le
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Le Ma 

Regards,
Ma Le

-Original Message-
From: Andrey Grodzovsky  
Sent: Thursday, December 12, 2019 4:39 AM
To: dri-devel@lists.freedesktop.org; amd-...@lists.freedesktop.org
Cc: Deucher, Alexander ; Ma, Le ; 
Zhang, Hawking ; Quan, Evan ; 
Grodzovsky, Andrey 
Subject: [RESEND PATCH 3/5] drm/amdgpu: Add task barrier to XGMI hive.

Signed-off-by: Andrey Grodzovsky 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 4   
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h | 2 ++
 2 files changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 61d13d8..5cf920d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -261,6 +261,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct 
amdgpu_device *adev, int lo
INIT_LIST_HEAD(>device_list);
mutex_init(>hive_lock);
mutex_init(>reset_lock);
+   task_barrier_init(>tb);
 
if (lock)
mutex_lock(>hive_lock);
@@ -408,6 +409,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
top_info->num_nodes = count;
hive->number_devices = count;
 
+   task_barrier_add_task(>tb);
+
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
list_for_each_entry(tmp_adev, >device_list, 
gmc.xgmi.head) {
/* update node list for other device in the hive */ @@ 
-470,6 +473,7 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
mutex_destroy(>hive_lock);
mutex_destroy(>reset_lock);
} else {
+   task_barrier_rem_task(>tb);
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(>hive_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index bbf504f..74011fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -22,6 +22,7 @@
 #ifndef __AMDGPU_XGMI_H__
 #define __AMDGPU_XGMI_H__
 
+#include 
 #include "amdgpu_psp.h"
 
 struct amdgpu_hive_info {
@@ -33,6 +34,7 @@ struct amdgpu_hive_info {
struct device_attribute dev_attr;
struct amdgpu_device *adev;
int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+   struct task_barrier tb;
 };
 
 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int 
lock);
--
2.7.4
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


RE: [RESEND PATCH 1/5] drm/amdgpu: reverts commit b01245ff54db66073b104ac9d9fbefb7b264b36d.

2019-12-11 Thread Ma, Le
[AMD Official Use Only - Internal Distribution Only]




-Original Message-
From: Andrey Grodzovsky 
Sent: Thursday, December 12, 2019 4:39 AM
To: dri-devel@lists.freedesktop.org; amd-...@lists.freedesktop.org
Cc: Deucher, Alexander ; Ma, Le ; 
Zhang, Hawking ; Quan, Evan ; 
Grodzovsky, Andrey 
Subject: [RESEND PATCH 1/5] drm/amdgpu: reverts commit 
b01245ff54db66073b104ac9d9fbefb7b264b36d.



In preparation for doing XGMI reset synchronization using task barrier.



Signed-off-by: Andrey Grodzovsky 

---

drivers/gpu/drm/amd/amdgpu/amdgpu.h|  2 -

drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 76 +-

2 files changed, 12 insertions(+), 66 deletions(-)



diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

index a78a363..50bab33 100644

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h

+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -1001,8 +1001,6 @@ struct amdgpu_device {



boolpm_sysfs_en;

   boolucode_sysfs_en;

-

-   boolin_baco;

};



 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device 
*bdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

index 7324a5f..1d19edfa 100644

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -2667,7 +2667,7 @@ static void amdgpu_device_xgmi_reset_func(struct 
work_struct *__work)

   if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)

   adev->asic_reset_res = (adev->in_baco == false) ?

   
amdgpu_device_baco_enter(adev->ddev) :

-   
amdgpu_device_baco_exit(adev->ddev);

+  
qamdgpu_device_baco_exit(adev->ddev);

[Le]: Typo here. With it fixed, Reviewed-by: Le Ma 
mailto:le...@amd.com>>



Regards,

Ma Le

   else

   adev->asic_reset_res = amdgpu_asic_reset(adev);



@@ -3796,18 +3796,13 @@ static int amdgpu_device_pre_asic_reset(struct 
amdgpu_device *adev,

   return r;

}



-static int amdgpu_do_asic_reset(struct amdgpu_device *adev,

-  struct amdgpu_hive_info *hive,

+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,

  struct list_head *device_list_handle,

  bool *need_full_reset_arg)

{

   struct amdgpu_device *tmp_adev = NULL;

   bool need_full_reset = *need_full_reset_arg, vram_lost = false;

   int r = 0;

-   int cpu = smp_processor_id();

-   bool use_baco =

-   (amdgpu_asic_reset_method(adev) == 
AMD_RESET_METHOD_BACO) ?

-   true : false;



/*

* ASIC reset has to be done on all HGMI hive nodes ASAP @@ -3815,62 
+3810,22 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev,

*/

   if (need_full_reset) {

   list_for_each_entry(tmp_adev, device_list_handle, 
gmc.xgmi.head) {

-   /*

-   * For XGMI run all resets in parallel to 
speed up the

-   * process by scheduling the highpri wq on 
different

-   * cpus. For XGMI with baco reset, all nodes 
must enter

-   * baco within close proximity before anyone 
exit.

-   */

+  /* For XGMI run all resets in parallel to 
speed up the process */

   if (tmp_adev->gmc.xgmi.num_physical_nodes > 
1) {

-   if (!queue_work_on(cpu, 
system_highpri_wq,

-  
_adev->xgmi_reset_work))

+  if 
(!queue_work(system_highpri_wq, _adev->xgmi_reset_work))

   r = -EALREADY;

-   cpu = cpumask_next(cpu, 
cpu_online_mask);

   } else

   r = amdgpu_asic_reset(tmp_adev);

-   if (r)

-   break;

-   }

-

-   /* For XGMI wait for all work to complete before 
proceed */

-   if (!r) {

-   list_for_each_entry(tmp_adev, 
device_list_handle,

-

RE: [RESEND PATCH 2/5] drm: Add Reusable task barrier.

2019-12-11 Thread Ma, Le
[AMD Official Use Only - Internal Distribution Only]






-Original Message-
From: Andrey Grodzovsky 
Sent: Thursday, December 12, 2019 4:39 AM
To: dri-devel@lists.freedesktop.org; amd-...@lists.freedesktop.org
Cc: Deucher, Alexander ; Ma, Le ; 
Zhang, Hawking ; Quan, Evan ; 
Grodzovsky, Andrey 
Subject: [RESEND PATCH 2/5] drm: Add Reusable task barrier.



It is used to synchronize N threads at a rendevouz point before execution of 
critical code that has to be started by all the threads at approximatly the 
same time.



Signed-off-by: Andrey Grodzovsky 
mailto:andrey.grodzov...@amd.com>>

---

include/drm/task_barrier.h | 106 +

1 file changed, 106 insertions(+)

create mode 100644 include/drm/task_barrier.h



diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h new file 
mode 100644 index 000..81fb0f7

--- /dev/null

+++ b/include/drm/task_barrier.h

@@ -0,0 +1,106 @@

+/*

+ * Copyright 2019 Advanced Micro Devices, Inc.

+ *

+ * Permission is hereby granted, free of charge, to any person

+obtaining a

+ * copy of this software and associated documentation files (the

+"Software"),

+ * to deal in the Software without restriction, including without

+limitation

+ * the rights to use, copy, modify, merge, publish, distribute,

+sublicense,

+ * and/or sell copies of the Software, and to permit persons to whom

+the

+ * Software is furnished to do so, subject to the following conditions:

+ *

+ * The above copyright notice and this permission notice shall be

+included in

+ * all copies or substantial portions of the Software.

+ *

+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,

+EXPRESS OR

+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF

+MERCHANTABILITY,

+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT

+SHALL

+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM,

+DAMAGES OR

+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR

+OTHERWISE,

+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE

+OR

+ * OTHER DEALINGS IN THE SOFTWARE.

+ *

+ */

+#include 

+#include 

+

+/*

+ * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.

+ * Based on the Little book of sempahores -

+https://greenteapress.com/wp/semaphores/

+ */

+

+

+

+#ifndef DRM_TASK_BARRIER_H_

+#define DRM_TASK_BARRIER_H_

+



[Le]: It might be better to prefix "drm_" to the functions and structure below, 
even this header file name.



+/*

+ * Represents an instance of a task barrier.

+ */

+struct task_barrier {

+  unsigned int n;

[Le]: We can define it as signed type here for more common use.

+  atomic_t count;

+  struct semaphore enter_turnstile;

+  struct semaphore exit_turnstile;

+};

+

+static inline void task_barrier_signal_turnstile(struct semaphore *turnstile,

+  unsigned 
int n)

+{

+  int i;

+

+  for (i = 0 ; i < n; i++)

+  up(turnstile);

+}

+

+static inline void task_barrier_init(struct task_barrier *tb) {

+  tb->n = 0;

+  atomic_set(>count, 0);

+  sema_init(>enter_turnstile, 0);

+  sema_init(>exit_turnstile, 0);

+}

+

+static inline void task_barrier_add_task(struct task_barrier *tb) {

+  tb->n++;

+}

+

+static inline void task_barrier_rem_task(struct task_barrier *tb) {

+  tb->n--;

+}

+

+/*

+ * Lines up all the threads BEFORE the critical point.

+ *

+ * When all thread passed this code the entry barrier is back to locked state.

+ */

+static inline void task_barrier_enter(struct task_barrier *tb) {

+  if (atomic_inc_return(>count) == tb->n)

+  task_barrier_signal_turnstile(>enter_turnstile, 
tb->n);

+

+  down(>enter_turnstile);

+}

+

+/*

+ * Lines up all the threads AFTER the critical point.

+ *

+ * This function is used to avoid any one thread running ahead of the

+reset if

[Le]: No need to mention "reset" here.



With the above addressed, Acked-by: Le Ma le...@amd.com<mailto:le...@amd.com>



Regards,

Ma Le

+ * the barrier is used in a loop (repeatedly) .

+ */

+static inline void task_barrier_exit(struct task_barrier *tb) {

+  if (atomic_dec_return(>count) == 0)

+  task_barrier_signal_turnstile(>exit_turnstile, 
tb->n);

+

+  down(>exit_turnstile);

+}

+

+static inline void task_barrier_full(struct task_barrier *tb) {

+  task_barrier_enter(tb);

+  task_barrier_exit(tb);

+}

+

+#endif

--

2.7.4


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel