Use sched_clock() instead of get_cycles().
We can use sched_clock() because we don't care much about accuracy.
Remove the dependency on X86_TSC

Signed-off-by: Eliezer Tamir <eliezer.ta...@linux.intel.com>
---

 include/net/ll_poll.h |   23 +++++++++++------------
 net/Kconfig           |    1 -
 2 files changed, 11 insertions(+), 13 deletions(-)

diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h
index bc262f8..c75a611 100644
--- a/include/net/ll_poll.h
+++ b/include/net/ll_poll.h
@@ -21,10 +21,6 @@
  * e1000-devel Mailing List <e1000-de...@lists.sourceforge.net>
  */
 
-/*
- * For now this depends on CONFIG_X86_TSC
- */
-
 #ifndef _LINUX_NET_LL_POLL_H
 #define _LINUX_NET_LL_POLL_H
 
@@ -41,11 +37,14 @@ extern unsigned long sysctl_net_ll_poll __read_mostly;
 #define LL_FLUSH_BUSY          -2
 
 /* we don't mind a ~2.5% imprecision */
-#define TSC_MHZ (tsc_khz >> 10)
+#define USECS_TO_NS(us) (us << 10)
 
-static inline cycles_t ll_end_time(void)
+/* we can use sched_clock() because we don't care much about precision
+ * we only care that the average is bounded
+ */
+static inline u64 ll_end_time(void)
 {
-       return TSC_MHZ * ACCESS_ONCE(sysctl_net_ll_poll) + get_cycles();
+       return sched_clock() + USECS_TO_NS(ACCESS_ONCE(sysctl_net_ll_poll));
 }
 
 static inline bool sk_valid_ll(struct sock *sk)
@@ -54,16 +53,16 @@ static inline bool sk_valid_ll(struct sock *sk)
               !need_resched() && !signal_pending(current);
 }
 
-static inline bool can_poll_ll(cycles_t end_time)
+static inline bool can_poll_ll(u64 end_time)
 {
-       return !time_after((unsigned long)get_cycles(),
+       return !time_after((unsigned long)sched_clock(),
                            (unsigned long)end_time);
 }
 
 static inline bool sk_poll_ll(struct sock *sk, int nonblock)
 {
-       cycles_t end_time = ll_end_time();
        const struct net_device_ops *ops;
+       u64 end_time = ll_end_time();
        struct napi_struct *napi;
        int rc = false;
 
@@ -116,7 +115,7 @@ static inline void sk_mark_ll(struct sock *sk, struct 
sk_buff *skb)
 
 #else /* CONFIG_NET_LL_RX_POLL */
 
-static inline cycles_t ll_end_time(void)
+static inline u64 ll_end_time(void)
 {
        return 0;
 }
@@ -139,7 +138,7 @@ static inline void sk_mark_ll(struct sock *sk, struct 
sk_buff *skb)
 {
 }
 
-static inline bool can_poll_ll(cycles_t end_time)
+static inline bool can_poll_ll(u64 end_time)
 {
        return false;
 }
diff --git a/net/Kconfig b/net/Kconfig
index d6a9ce6..e591668 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -245,7 +245,6 @@ config NETPRIO_CGROUP
 
 config NET_LL_RX_POLL
        bool "Low Latency Receive Poll"
-       depends on X86_TSC
        default n
        ---help---
          Support Low Latency Receive Queue Poll.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to