Hi,

I was able to reduce CPU usage of applications which use glFinish by
emitting and waiting for an IRQ in radeonFinish before
radeonWaitForIdle. I'm not sure whether radeonWaitForIdle is still
needed after waiting for the IRQ, but keeping it does at least not hurt.
The patch is attached.

One more thing: Since radeonWaitForFrameCompletion uses IRQs there are
no more lag problems. Because the IRQ emit is the last thing on the ring
all frames are finished when the IRQ is received. So checking for
MAX_OUTSTANDING in a loop is no longer necessary in that case, right?

Best regards,
   Felix

               __\|/__    ___     ___     ___
__Tschüß_______\_6 6_/___/__ \___/__ \___/___\___You can do anything,___
_____Felix_______\Ä/\ \_____\ \_____\ \______U___just not everything____
  [EMAIL PROTECTED]    >o<__/   \___/   \___/        at the same time!
Index: radeon_ioctl.c
===================================================================
RCS file: /cvsroot/dri/xc/xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v
retrieving revision 1.24
diff -u -r1.24 radeon_ioctl.c
--- radeon_ioctl.c      25 Sep 2002 17:20:30 -0000      1.24
+++ radeon_ioctl.c      28 Sep 2002 13:32:57 -0000
@@ -1090,6 +1090,29 @@
 {
    radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
    radeonFlush( ctx );
+   if (rmesa->do_irqs) {
+      int fd = rmesa->dri.fd;
+      int ret;
+
+      drmRadeonIrqEmit ie;
+      drmRadeonIrqWait iw;
+
+      ie.irq_seq = &iw.irq_seq;
+
+      LOCK_HARDWARE( rmesa );
+      ret = drmCommandWriteRead( fd, DRM_RADEON_IRQ_EMIT, &ie, sizeof(ie) );
+      if ( ret ) {
+        fprintf( stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__, ret );
+        exit(1);
+      }
+      UNLOCK_HARDWARE( rmesa );
+
+      ret = drmCommandWrite( fd, DRM_RADEON_IRQ_WAIT, &iw, sizeof(iw) );
+      if ( ret ) {
+        fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
+        exit(1);
+      }
+   }
    radeonWaitForIdle( rmesa );
 }
 

Reply via email to