TIMER_MILLISECOND is defined as the number of cpu cycles per millisecond, current definition is correct for cores with frequency of 2GHZ, for cores with different frequency, it caused different periods between refresh, (i.e. the definition is about 14ms on ARM cores).
Use dpdk API to get CPU frequency, to define TIMER_MILLISECOND. Fixes: af75078fece3 ("first public release") Cc: sta...@dpdk.org Signed-off-by: Raja Zidane <rzid...@nvidia.com> Acked-by: Matan Azrad <ma...@nvidia.com> --- examples/link_status_interrupt/main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c index 551f0524da..eb7a74d37c 100644 --- a/examples/link_status_interrupt/main.c +++ b/examples/link_status_interrupt/main.c @@ -101,9 +101,10 @@ struct lsi_port_statistics { struct lsi_port_statistics port_statistics[RTE_MAX_ETHPORTS]; /* A tsc-based timer responsible for triggering statistics printout */ -#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ +#define TIMER_MILLISECOND (rte_get_tsc_hz() / 1000) #define MAX_TIMER_PERIOD 86400 /* 1 day max */ -static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */ +#define DEFAULT_TIMER_PERIOD 10UL /* default period is 10 seconds */ +static int64_t timer_period; /* Print out statistics on packets dropped */ static void @@ -371,6 +372,7 @@ lsi_parse_args(int argc, char **argv) }; argvopt = argv; + timer_period = DEFAULT_TIMER_PERIOD; while ((opt = getopt_long(argc, argvopt, "p:q:T:", lgopts, &option_index)) != EOF) { -- 2.21.0