On Wed, 2008-01-09 at 18:29 -0500, Steven Rostedt wrote:
> plain text document attachment (rt-time-starvation-fix.patch)
> Handle accurate time even if there's a long delay between
> accumulated clock cycles.
> 
> Signed-off-by: John Stultz <[EMAIL PROTECTED]>
> Signed-off-by: Steven Rostedt <[EMAIL PROTECTED]>

Hrmm.. One more item I just noticed.

> Index: linux-compile-i386.git/kernel/time/timekeeping.c
> ===================================================================
> --- linux-compile-i386.git.orig/kernel/time/timekeeping.c     2008-01-09 
> 14:07:34.000000000 -0500
> +++ linux-compile-i386.git/kernel/time/timekeeping.c  2008-01-09 
> 15:17:31.000000000 -0500
> @@ -448,27 +449,29 @@ static void clocksource_adjust(s64 offse
>   */
>  void update_wall_time(void)
>  {
> -     cycle_t offset;
> +     cycle_t cycle_now, offset;
> 
>       /* Make sure we're fully resumed: */
>       if (unlikely(timekeeping_suspended))
>               return;
> 
>  #ifdef CONFIG_GENERIC_TIME
> -     offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
> +     cycle_now = clocksource_read(clock);
>  #else
> -     offset = clock->cycle_interval;
> +     cycle_now = clock->cycle_last + clock->cycle_interval;
>  #endif
> +     offset = (cycle_now - clock->cycle_last) & clock->mask;

It seems this offset addition was to merge against the colliding
xtime_cache changes in mainline. However, I don't think its quite right,
and might be causing incorrect time() or vtime() results if NO_HZ is
enabled.

> +     clocksource_accumulate(clock, cycle_now);
> +
>       clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
> 
>       /* normally this loop will run just once, however in the
>        * case of lost or late ticks, it will accumulate correctly.
>        */
> -     while (offset >= clock->cycle_interval) {
> +     while (clock->cycle_accumulated >= clock->cycle_interval) {
>               /* accumulate one interval */
>               clock->xtime_nsec += clock->xtime_interval;
> -             clock->cycle_last += clock->cycle_interval;
> -             offset -= clock->cycle_interval;
> +             clock->cycle_accumulated -= clock->cycle_interval;
> 
>               if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
>                       clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
> @@ -482,7 +485,7 @@ void update_wall_time(void)
>       }
> 
>       /* correct the clock when NTP error is too big */
> -     clocksource_adjust(offset);
> +     clocksource_adjust(clock->cycle_accumulated);


I suspect the following is needed, but haven't been able to test it yet.

thanks
-john


Fixup merge between xtime_cache and timekeeping starvation fix.

Signed-off-by: John Stultz <[EMAIL PROTECTED]>

Index: 2.6/kernel/time/timekeeping.c
===================================================================
--- 2.6.orig/kernel/time/timekeeping.c  2008-01-09 16:12:49.000000000 -0800
+++ 2.6/kernel/time/timekeeping.c       2008-01-09 16:13:18.000000000 -0800
@@ -449,7 +449,7 @@ static void clocksource_adjust(s64 offse
  */
 void update_wall_time(void)
 {
-       cycle_t cycle_now, offset;
+       cycle_t cycle_now;
 
        /* Make sure we're fully resumed: */
        if (unlikely(timekeeping_suspended))
@@ -460,7 +460,6 @@ void update_wall_time(void)
 #else
        cycle_now = clock->cycle_last + clock->cycle_interval;
 #endif
-       offset = (cycle_now - clock->cycle_last) & clock->mask;
        clocksource_accumulate(clock, cycle_now);
 
        clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
@@ -491,7 +490,7 @@ void update_wall_time(void)
        xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
        clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
 
-       update_xtime_cache(cyc2ns(clock, offset));
+       update_xtime_cache(cyc2ns(clock, clock->cycle_accumulated));
 
        /* check to see if there is a new clocksource to use */
        change_clocksource();


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to