Hi,

> In the slow path of a spinlock_acquire they busy wait for a few
> cycles, and then call schedule with a zero timeout assuming that
> it'll basically do the same as a sched_yield() but more portably.

The obvious problem with this is that we bounce in and out of schedule()
a few times before moving on to the next task. I see this also with
sched_yield().

I had this patch lying around which I think came about when I was playing
with pthreads (which for spinlocks does sched_yield() for a while before
sleeping)

--- linux/kernel/sched.c        Fri Mar  9 10:26:56 2001
+++ linux_intel/kernel/sched.c  Fri Mar  9 08:42:39 2001
@@ -505,6 +505,9 @@
                goto out_unlock;
        }
 #else
+       if (prev->policy & SCHED_YIELD)
+               prev->counter = (prev->counter >> 4);
+
        prev->policy &= ~SCHED_YIELD;
 #endif /* CONFIG_SMP */
 }

Anton


/* test sched_yield */

#include <stdio.h>
#include <sched.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>

#undef USE_SELECT

void waste_time()
{
        int i;
        for(i = 0; i < 10000; i++)
                ;
}

void do_stuff(int i)
{
#ifdef USE_SELECT
        struct timeval tv;
#endif

        while(1) {
                fprintf(stderr, "%d\n", i);
                waste_time();
#ifdef USE_SELECT
                tv.tv_sec = 0;
                tv.tv_usec = 0;
                select(0, NULL, NULL, NULL, &tv);
#else
                sched_yield();
#endif
        }
}

int main()
{
        int i, pid;

        for(i = 0; i < 10; i++) {
                pid = fork();

                if (!pid)
                        do_stuff(i);
        }

        do_stuff(i+1);

        return 0;
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to