Given the following code (timertest.c):
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/time.h>
int
main(int argc, char *argv[])
{
long long timeout, elapsed;
int kqfd;
struct timespec ts0, ts;
struct kevent kev;
if (argc < 2)
errx(1, "missing number of seconds");
timeout = atoll(argv[1]) * 1000;
if (errno || timeout <= 0)
err(1, "invalid number of seconds");
if ((kqfd = kqueue()) == -1)
err(1, "kqueue");
EV_SET(&kev, 9999, EVFILT_TIMER, EV_ADD, 0, timeout, NULL);
if (clock_gettime(CLOCK_REALTIME, &ts0) == -1)
err(1, "clock_gettime");
if (kevent(kqfd, &kev, 1, NULL, 0, NULL) == -1)
err(1, "kevent");
printf("timer [%lld msec] started ... ", timeout);
fflush(stdout);
memset(&kev, 0, sizeof(kev));
if (kevent(kqfd, NULL, 0, &kev, 1, NULL) == -1)
err(1, "kevent");
if (kev.flags & EV_ERROR)
errx(1, "event error (%llu)", kev.data);
if (clock_gettime(CLOCK_REALTIME, &ts) == -1)
err(1, "clock_gettime");
timespecsub(&ts, &ts0, &ts);
elapsed = ts.tv_sec * 1000 + ts.tv_nsec / 1000000L;
printf("done\ntime elapsed: %lld msec\n>>> drift: %lld msec\n",
elapsed, elapsed - timeout);
return (0);
}
# cc timertest.c -o timertest
On an older machine, there is a noticeable clock drift:
# ./timertest 600
timer [600000 msec] started ... done
time elapsed: 624008 msec
>>> drift: 24008 msec
On an idle VM, the drift is below noise...
# ./timertest 600
timer [600000 msec] started ... done
time elapsed: 600008 msec
>>> drift: 8 msec
... the same VM under 50% CPU load:
# ./timertest 600
timer [600000 msec] started ... done
time elapsed: 609704 msec
>>> drift: 9704 msec
Is it the way I'm using kevent/EVFILT_TIMER?
Any help is being appreciated.