aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEliezer Tamir <eliezer.tamir@linux.intel.com>2013-06-14 16:33:35 +0300
committerDavid S. Miller <davem@davemloft.net>2013-06-17 15:48:14 -0700
commit9a3c71aa802499e0b1db2788ccc75a56c5f00555 (patch)
treeac1284082bf9bdac09952fc437b594efff61b800
parenteb6db622825b2028df74f490b8c36887cf3c2f50 (diff)
net: convert low latency sockets to sched_clock()
Use sched_clock() instead of get_cycles(). We can use sched_clock() because we don't care much about accuracy. Remove the dependency on X86_TSC Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/ll_poll.h33
-rw-r--r--net/Kconfig1
2 files changed, 17 insertions, 17 deletions
diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h
index 44e2f707cb9..6930cbd943e 100644
--- a/include/net/ll_poll.h
+++ b/include/net/ll_poll.h
@@ -21,10 +21,6 @@
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
*/
-/*
- * For now this depends on CONFIG_X86_TSC
- */
-
#ifndef _LINUX_NET_LL_POLL_H
#define _LINUX_NET_LL_POLL_H
@@ -40,13 +36,19 @@ extern unsigned int sysctl_net_ll_poll __read_mostly;
#define LL_FLUSH_FAILED -1
#define LL_FLUSH_BUSY -2
-/* we don't mind a ~2.5% imprecision */
-#define TSC_MHZ (tsc_khz >> 10)
-
-static inline cycles_t ll_end_time(void)
+/* we can use sched_clock() because we don't care much about precision
+ * we only care that the average is bounded
+ */
+static inline u64 ll_end_time(void)
{
- return (cycles_t)TSC_MHZ * ACCESS_ONCE(sysctl_net_ll_poll)
- + get_cycles();
+ u64 end_time = ACCESS_ONCE(sysctl_net_ll_poll);
+
+ /* we don't mind a ~2.5% imprecision
+ * sysctl_net_ll_poll is a u_int so this can't overflow
+ */
+ end_time = (end_time << 10) + sched_clock();
+
+ return end_time;
}
static inline bool sk_valid_ll(struct sock *sk)
@@ -55,16 +57,15 @@ static inline bool sk_valid_ll(struct sock *sk)
!need_resched() && !signal_pending(current);
}
-static inline bool can_poll_ll(cycles_t end_time)
+static inline bool can_poll_ll(u64 end_time)
{
- return !time_after((unsigned long)get_cycles(),
- (unsigned long)end_time);
+ return !time_after64(sched_clock(), end_time);
}
static inline bool sk_poll_ll(struct sock *sk, int nonblock)
{
- cycles_t end_time = ll_end_time();
const struct net_device_ops *ops;
+ u64 end_time = ll_end_time();
struct napi_struct *napi;
int rc = false;
@@ -117,7 +118,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
#else /* CONFIG_NET_LL_RX_POLL */
-static inline cycles_t ll_end_time(void)
+static inline u64 ll_end_time(void)
{
return 0;
}
@@ -140,7 +141,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
{
}
-static inline bool can_poll_ll(cycles_t end_time)
+static inline bool can_poll_ll(u64 end_time)
{
return false;
}
diff --git a/net/Kconfig b/net/Kconfig
index d6a9ce6e180..e591668fb38 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -245,7 +245,6 @@ config NETPRIO_CGROUP
config NET_LL_RX_POLL
bool "Low Latency Receive Poll"
- depends on X86_TSC
default n
---help---
Support Low Latency Receive Queue Poll.