diff options
Diffstat (limited to 'net/core/gen_estimator.c')
| -rw-r--r-- | net/core/gen_estimator.c | 70 |
1 files changed, 37 insertions, 33 deletions
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 9cc9f95b109..6b5b6e7013c 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -14,7 +14,6 @@ */ #include <asm/uaccess.h> -#include <asm/system.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/types.h> @@ -32,6 +31,7 @@ #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/rbtree.h> +#include <linux/slab.h> #include <net/sock.h> #include <net/gen_stats.h> @@ -66,9 +66,9 @@ NOTES. - * The stored value for avbps is scaled by 2^5, so that maximal - rate is ~1Gbit, avpps is scaled by 2^10. - + * avbps is scaled by 2^5, avpps is scaled by 2^10. + * both values are reported as 32 bit unsigned values. bps can + overflow for fast links : max speed being 34360Mbit/sec * Minimal interval is HZ/4=250msec (it is the greatest common divisor for HZ=100 and HZ=1024 8)), maximal interval is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals @@ -81,14 +81,14 @@ struct gen_estimator { struct list_head list; - struct gnet_stats_basic *bstats; - struct gnet_stats_rate_est *rate_est; + struct gnet_stats_basic_packed *bstats; + struct gnet_stats_rate_est64 *rate_est; spinlock_t *stats_lock; int ewma_log; u64 last_bytes; + u64 avbps; u32 last_packets; u32 avpps; - u32 avbps; struct rcu_head e_rcu; struct rb_node node; }; @@ -106,6 +106,7 @@ static DEFINE_RWLOCK(est_lock); /* Protects against soft lockup during large deletion */ static struct rb_root est_root = RB_ROOT; +static DEFINE_SPINLOCK(est_tree_lock); static void est_timer(unsigned long arg) { @@ -115,6 +116,7 @@ static void est_timer(unsigned long arg) rcu_read_lock(); list_for_each_entry_rcu(e, &elist[idx].list, list) { u64 nbytes; + u64 brate; u32 npackets; u32 rate; @@ -125,14 +127,14 @@ static void est_timer(unsigned long arg) nbytes = e->bstats->bytes; npackets = e->bstats->packets; - rate = (nbytes - e->last_bytes)<<(7 - idx); + brate = (nbytes - e->last_bytes)<<(7 - idx); e->last_bytes = nbytes; - e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log; + e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); e->rate_est->bps = (e->avbps+0xF)>>5; rate = (npackets - e->last_packets)<<(12 - idx); e->last_packets = npackets; - e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; + e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); e->rate_est->pps = (e->avpps+0x1FF)>>10; skip: read_unlock(&est_lock); @@ -164,8 +166,8 @@ static void gen_add_node(struct gen_estimator *est) } static -struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats, - const struct gnet_stats_rate_est *rate_est) +struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats, + const struct gnet_stats_rate_est64 *rate_est) { struct rb_node *p = est_root.rb_node; @@ -199,10 +201,9 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats, * * Returns 0 on success or a negative error code. * - * NOTE: Called under rtnl_mutex */ -int gen_new_estimator(struct gnet_stats_basic *bstats, - struct gnet_stats_rate_est *rate_est, +int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { @@ -230,6 +231,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, est->last_packets = bstats->packets; est->avpps = rate_est->pps<<10; + spin_lock_bh(&est_tree_lock); if (!elist[idx].timer.function) { INIT_LIST_HEAD(&elist[idx].list); setup_timer(&elist[idx].timer, est_timer, idx); @@ -240,18 +242,12 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, list_add_rcu(&est->list, &elist[idx].list); gen_add_node(est); + spin_unlock_bh(&est_tree_lock); return 0; } EXPORT_SYMBOL(gen_new_estimator); -static void __gen_kill_estimator(struct rcu_head *head) -{ - struct gen_estimator *e = container_of(head, - struct gen_estimator, e_rcu); - kfree(e); -} - /** * gen_kill_estimator - remove a rate estimator * @bstats: basic statistics @@ -259,23 +255,25 @@ static void __gen_kill_estimator(struct rcu_head *head) * * Removes the rate estimator specified by &bstats and &rate_est. * - * NOTE: Called under rtnl_mutex + * Note : Caller should respect an RCU grace period before freeing stats_lock */ -void gen_kill_estimator(struct gnet_stats_basic *bstats, - struct gnet_stats_rate_est *rate_est) +void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_rate_est64 *rate_est) { struct gen_estimator *e; + spin_lock_bh(&est_tree_lock); while ((e = gen_find_node(bstats, rate_est))) { rb_erase(&e->node, &est_root); - write_lock_bh(&est_lock); + write_lock(&est_lock); e->bstats = NULL; - write_unlock_bh(&est_lock); + write_unlock(&est_lock); list_del_rcu(&e->list); - call_rcu(&e->e_rcu, __gen_kill_estimator); + kfree_rcu(e, e_rcu); } + spin_unlock_bh(&est_tree_lock); } EXPORT_SYMBOL(gen_kill_estimator); @@ -291,8 +289,8 @@ EXPORT_SYMBOL(gen_kill_estimator); * * Returns 0 on success or a negative error code. */ -int gen_replace_estimator(struct gnet_stats_basic *bstats, - struct gnet_stats_rate_est *rate_est, +int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { gen_kill_estimator(bstats, rate_est); @@ -307,11 +305,17 @@ EXPORT_SYMBOL(gen_replace_estimator); * * Returns true if estimator is active, and false if not. */ -bool gen_estimator_active(const struct gnet_stats_basic *bstats, - const struct gnet_stats_rate_est *rate_est) +bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, + const struct gnet_stats_rate_est64 *rate_est) { + bool res; + ASSERT_RTNL(); - return gen_find_node(bstats, rate_est) != NULL; + spin_lock_bh(&est_tree_lock); + res = gen_find_node(bstats, rate_est) != NULL; + spin_unlock_bh(&est_tree_lock); + + return res; } EXPORT_SYMBOL(gen_estimator_active); |
