As discussed back in 2003, sched_yield() on Linux is no longer
equivalent to other POSIX variations. From a LWN article; "This call
used to simply move the process to the end of the run queue; now it
moves the process to the "expired" queue, effectively cancelling the
rest of the process's time slice. So a process calling sched_yield() now
must wait until all other runnable processes in the system have used up
their time slices before it will get the processor again."

However its use on Linux has sneaked back in causing suboptimal
performance such as reported by Simon Farnsworth on r600g. Use sleep on
Linux instead.

Signed-off-by: Alan Swanson <swan...@ukfsn.org>
diff -ur mesa/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c mesa-killschedyield/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
--- mesa/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c	2011-09-19 11:53:16.000000000 +0100
+++ mesa-killschedyield/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c	2012-01-31 17:24:46.000000000 +0000
@@ -45,6 +45,7 @@
 #include "pipe/p_defines.h"
 #include "util/u_debug.h"
 #include "os/os_thread.h"
+#include "os/os_time.h"
 #include "util/u_memory.h"
 #include "util/u_double_list.h"
 
@@ -1009,8 +1010,10 @@
    /* Wait on outstanding fences */
    while (fenced_mgr->num_fenced) {
       pipe_mutex_unlock(fenced_mgr->mutex);
-#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
+#if defined(PIPE_OS_UNIX) && !defined(PIPE_OS_LINUX)
       sched_yield();
+#else
+      os_time_sleep(10);
 #endif
       pipe_mutex_lock(fenced_mgr->mutex);
       while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
diff -ur mesa/src/gallium/drivers/nouveau/nouveau_fence.c mesa-killschedyield/src/gallium/drivers/nouveau/nouveau_fence.c
--- mesa/src/gallium/drivers/nouveau/nouveau_fence.c	2011-10-15 13:43:24.000000000 +0100
+++ mesa-killschedyield/src/gallium/drivers/nouveau/nouveau_fence.c	2012-01-31 17:28:09.000000000 +0000
@@ -21,6 +21,7 @@
  */
 
 #include "util/u_double_list.h"
+#include "os_os_time.h"
 
 #include "nouveau_screen.h"
 #include "nouveau_fence.h"
@@ -207,7 +208,11 @@
       spins++;
 #ifdef PIPE_OS_UNIX
       if (!(spins % 8)) /* donate a few cycles */
+#ifndef PIPE_OS_LINUX
          sched_yield();
+#else
+         os_time_sleep(10);
+#endif
 #endif
    } while (spins < NOUVEAU_FENCE_MAX_SPINS);
 
diff -ur mesa/src/gallium/drivers/r600/r600_pipe.c mesa-killschedyield/src/gallium/drivers/r600/r600_pipe.c
--- mesa/src/gallium/drivers/r600/r600_pipe.c	2012-01-23 18:43:17.000000000 +0000
+++ mesa-killschedyield/src/gallium/drivers/r600/r600_pipe.c	2012-01-31 16:32:24.000000000 +0000
@@ -605,7 +605,7 @@
 	while (rscreen->fences.data[rfence->index] == 0) {
 		if (++spins % 256)
 			continue;
-#ifdef PIPE_OS_UNIX
+#if defined(PIPE_OS_UNIX) && !defined(PIPE_OS_LINUX)
 		sched_yield();
 #else
 		os_time_sleep(10);
diff -ur mesa/src/gallium/winsys/radeon/drm/radeon_drm_bo.c mesa-killschedyield/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
--- mesa/src/gallium/winsys/radeon/drm/radeon_drm_bo.c	2012-01-23 18:43:17.000000000 +0000
+++ mesa-killschedyield/src/gallium/winsys/radeon/drm/radeon_drm_bo.c	2012-01-31 17:26:23.000000000 +0000
@@ -33,6 +33,7 @@
 #include "util/u_double_list.h"
 #include "os/os_thread.h"
 #include "os/os_mman.h"
+#include "os/os_time.h"
 
 #include "state_tracker/drm_driver.h"
 
@@ -152,7 +153,11 @@
     struct radeon_bo *bo = get_radeon_bo(_buf);
 
     while (p_atomic_read(&bo->num_active_ioctls)) {
+#if defined(PIPE_OS_UNIX) && !defined(PIPE_OS_LINUX)
         sched_yield();
+#else
+        os_time_sleep(10); 
+#endif
     }
 
     /* XXX use this when it's ready */
@@ -664,7 +669,11 @@
     }
 
     while (p_atomic_read(&bo->num_active_ioctls)) {
+#if defined(PIPE_OS_UNIX) && !defined(PIPE_OS_LINUX)
         sched_yield();
+#else
+        os_time_sleep(10);
+#endif
     }
 
     if (microtiled == RADEON_LAYOUT_TILED)
_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to