diff options
Diffstat (limited to 'net/sched/sch_htb.c')
| -rw-r--r-- | net/sched/sch_htb.c | 608 | 
1 files changed, 328 insertions, 280 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 01b519d6c52..9f949abcace 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -38,6 +38,7 @@  #include <linux/workqueue.h>  #include <linux/slab.h>  #include <net/netlink.h> +#include <net/sch_generic.h>  #include <net/pkt_sched.h>  /* HTB algorithm. @@ -64,6 +65,10 @@ static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis f  module_param    (htb_hysteresis, int, 0640);  MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate"); +static int htb_rate_est = 0; /* htb classes have a default rate estimator */ +module_param(htb_rate_est, int, 0640); +MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes"); +  /* used internaly to keep status of single class */  enum htb_cmode {  	HTB_CANT_SEND,		/* class can't send and can't borrow */ @@ -71,94 +76,105 @@ enum htb_cmode {  	HTB_CAN_SEND		/* class can send */  }; -/* interior & leaf nodes; props specific to leaves are marked L: */ +struct htb_prio { +	union { +		struct rb_root	row; +		struct rb_root	feed; +	}; +	struct rb_node	*ptr; +	/* When class changes from state 1->2 and disconnects from +	 * parent's feed then we lost ptr value and start from the +	 * first child again. Here we store classid of the +	 * last valid ptr (used when ptr is NULL). +	 */ +	u32		last_ptr_id; +}; + +/* interior & leaf nodes; props specific to leaves are marked L: + * To reduce false sharing, place mostly read fields at beginning, + * and mostly written ones at the end. + */  struct htb_class {  	struct Qdisc_class_common common; -	/* general class parameters */ -	struct gnet_stats_basic_packed bstats; -	struct gnet_stats_queue qstats; -	struct gnet_stats_rate_est rate_est; -	struct tc_htb_xstats xstats;	/* our special stats */ -	int refcnt;		/* usage count of this class */ +	struct psched_ratecfg	rate; +	struct psched_ratecfg	ceil; +	s64			buffer, cbuffer;/* token bucket depth/rate */ +	s64			mbuffer;	/* max wait time */ +	u32			prio;		/* these two are used only by leaves... */ +	int			quantum;	/* but stored for parent-to-leaf return */ + +	struct tcf_proto	*filter_list;	/* class attached filters */ +	int			filter_cnt; +	int			refcnt;		/* usage count of this class */ -	/* topology */ -	int level;		/* our level (see above) */ -	unsigned int children; -	struct htb_class *parent;	/* parent class */ +	int			level;		/* our level (see above) */ +	unsigned int		children; +	struct htb_class	*parent;	/* parent class */ -	int prio;		/* these two are used only by leaves... */ -	int quantum;		/* but stored for parent-to-leaf return */ +	struct gnet_stats_rate_est64 rate_est; + +	/* +	 * Written often fields +	 */ +	struct gnet_stats_basic_packed bstats; +	struct gnet_stats_queue	qstats; +	struct tc_htb_xstats	xstats;	/* our special stats */ + +	/* token bucket parameters */ +	s64			tokens, ctokens;/* current number of tokens */ +	s64			t_c;		/* checkpoint time */  	union {  		struct htb_class_leaf { -			struct Qdisc *q; -			int deficit[TC_HTB_MAXDEPTH];  			struct list_head drop_list; +			int		deficit[TC_HTB_MAXDEPTH]; +			struct Qdisc	*q;  		} leaf;  		struct htb_class_inner { -			struct rb_root feed[TC_HTB_NUMPRIO];	/* feed trees */ -			struct rb_node *ptr[TC_HTB_NUMPRIO];	/* current class ptr */ -			/* When class changes from state 1->2 and disconnects from -			   parent's feed then we lost ptr value and start from the -			   first child again. Here we store classid of the -			   last valid ptr (used when ptr is NULL). */ -			u32 last_ptr_id[TC_HTB_NUMPRIO]; +			struct htb_prio clprio[TC_HTB_NUMPRIO];  		} inner;  	} un; -	struct rb_node node[TC_HTB_NUMPRIO];	/* node for self or feed tree */ -	struct rb_node pq_node;	/* node for event queue */ -	psched_time_t pq_key; - -	int prio_activity;	/* for which prios are we active */ -	enum htb_cmode cmode;	/* current mode of the class */ +	s64			pq_key; -	/* class attached filters */ -	struct tcf_proto *filter_list; -	int filter_cnt; +	int			prio_activity;	/* for which prios are we active */ +	enum htb_cmode		cmode;		/* current mode of the class */ +	struct rb_node		pq_node;	/* node for event queue */ +	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */ +}; -	/* token bucket parameters */ -	struct qdisc_rate_table *rate;	/* rate table of the class itself */ -	struct qdisc_rate_table *ceil;	/* ceiling rate (limits borrows too) */ -	long buffer, cbuffer;	/* token bucket depth/rate */ -	psched_tdiff_t mbuffer;	/* max wait time */ -	long tokens, ctokens;	/* current number of tokens */ -	psched_time_t t_c;	/* checkpoint time */ +struct htb_level { +	struct rb_root	wait_pq; +	struct htb_prio hprio[TC_HTB_NUMPRIO];  };  struct htb_sched {  	struct Qdisc_class_hash clhash; -	struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ +	int			defcls;		/* class where unclassified flows go to */ +	int			rate2quantum;	/* quant = rate / rate2quantum */ -	/* self list - roots of self generating tree */ -	struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; -	int row_mask[TC_HTB_MAXDEPTH]; -	struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; -	u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; - -	/* self wait list - roots of wait PQs per row */ -	struct rb_root wait_pq[TC_HTB_MAXDEPTH]; +	/* filters for qdisc itself */ +	struct tcf_proto	*filter_list; -	/* time of nearest event per level (row) */ -	psched_time_t near_ev_cache[TC_HTB_MAXDEPTH]; +#define HTB_WARN_TOOMANYEVENTS	0x1 +	unsigned int		warned;	/* only one warning */ +	int			direct_qlen; +	struct work_struct	work; -	int defcls;		/* class where unclassified flows go to */ +	/* non shaped skbs; let them go directly thru */ +	struct sk_buff_head	direct_queue; +	long			direct_pkts; -	/* filters for qdisc itself */ -	struct tcf_proto *filter_list; +	struct qdisc_watchdog	watchdog; -	int rate2quantum;	/* quant = rate / rate2quantum */ -	psched_time_t now;	/* cached dequeue time */ -	struct qdisc_watchdog watchdog; +	s64			now;	/* cached dequeue time */ +	struct list_head	drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ -	/* non shaped skbs; let them go directly thru */ -	struct sk_buff_head direct_queue; -	int direct_qlen;	/* max qlen of above */ +	/* time of nearest event per level (row) */ +	s64			near_ev_cache[TC_HTB_MAXDEPTH]; -	long direct_pkts; +	int			row_mask[TC_HTB_MAXDEPTH]; -#define HTB_WARN_TOOMANYEVENTS	0x1 -	unsigned int warned;	/* only one warning */ -	struct work_struct work; +	struct htb_level	hlevel[TC_HTB_MAXDEPTH];  };  /* find class in global hash table using given handle */ @@ -182,10 +198,10 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)   * filters in qdisc and in inner nodes (if higher filter points to the inner   * node). If we end up with classid MAJOR:0 we enqueue the skb into special   * internal fifo (direct). These packets then go directly thru. If we still - * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull + * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful   * then finish and return direct queue.   */ -#define HTB_DIRECT (struct htb_class*)-1 +#define HTB_DIRECT ((struct htb_class *)-1L)  static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,  				      int *qerr) @@ -197,15 +213,22 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,  	int result;  	/* allow to select class by setting skb->priority to valid classid; -	   note that nfmark can be used too by attaching filter fw with no -	   rules in it */ +	 * note that nfmark can be used too by attaching filter fw with no +	 * rules in it +	 */  	if (skb->priority == sch->handle)  		return HTB_DIRECT;	/* X:0 (direct flow) selected */ -	if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) -		return cl; +	cl = htb_find(skb->priority, sch); +	if (cl) { +		if (cl->level == 0) +			return cl; +		/* Start with inner filter chain if a non-leaf class is selected */ +		tcf = cl->filter_list; +	} else { +		tcf = q->filter_list; +	}  	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; -	tcf = q->filter_list;  	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {  #ifdef CONFIG_NET_CLS_ACT  		switch (result) { @@ -216,10 +239,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,  			return NULL;  		}  #endif -		if ((cl = (void *)res.class) == NULL) { +		cl = (void *)res.class; +		if (!cl) {  			if (res.classid == sch->handle)  				return HTB_DIRECT;	/* X:0 (direct flow) */ -			if ((cl = htb_find(res.classid, sch)) == NULL) +			cl = htb_find(res.classid, sch); +			if (!cl)  				break;	/* filter selected invalid classid */  		}  		if (!cl->level) @@ -268,9 +293,9 @@ static void htb_add_to_id_tree(struct rb_root *root,   * already in the queue.   */  static void htb_add_to_wait_tree(struct htb_sched *q, -				 struct htb_class *cl, long delay) +				 struct htb_class *cl, s64 delay)  { -	struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; +	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;  	cl->pq_key = q->now + delay;  	if (cl->pq_key == q->now) @@ -290,7 +315,7 @@ static void htb_add_to_wait_tree(struct htb_sched *q,  			p = &parent->rb_left;  	}  	rb_link_node(&cl->pq_node, parent, p); -	rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]); +	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);  }  /** @@ -317,7 +342,7 @@ static inline void htb_add_class_to_row(struct htb_sched *q,  	while (mask) {  		int prio = ffz(~mask);  		mask &= ~(1 << prio); -		htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio); +		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);  	}  } @@ -343,16 +368,18 @@ static inline void htb_remove_class_from_row(struct htb_sched *q,  						 struct htb_class *cl, int mask)  {  	int m = 0; +	struct htb_level *hlevel = &q->hlevel[cl->level];  	while (mask) {  		int prio = ffz(~mask); +		struct htb_prio *hprio = &hlevel->hprio[prio];  		mask &= ~(1 << prio); -		if (q->ptr[cl->level][prio] == cl->node + prio) -			htb_next_rb_node(q->ptr[cl->level] + prio); +		if (hprio->ptr == cl->node + prio) +			htb_next_rb_node(&hprio->ptr); -		htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio); -		if (!q->row[cl->level][prio].rb_node) +		htb_safe_rb_erase(cl->node + prio, &hprio->row); +		if (!hprio->row.rb_node)  			m |= 1 << prio;  	}  	q->row_mask[cl->level] &= ~m; @@ -376,12 +403,13 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)  			int prio = ffz(~m);  			m &= ~(1 << prio); -			if (p->un.inner.feed[prio].rb_node) +			if (p->un.inner.clprio[prio].feed.rb_node)  				/* parent already has its feed in use so that -				   reset bit in mask as parent is already ok */ +				 * reset bit in mask as parent is already ok +				 */  				mask &= ~(1 << prio); -			htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); +			htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);  		}  		p->prio_activity |= mask;  		cl = p; @@ -411,17 +439,19 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)  			int prio = ffz(~m);  			m &= ~(1 << prio); -			if (p->un.inner.ptr[prio] == cl->node + prio) { +			if (p->un.inner.clprio[prio].ptr == cl->node + prio) {  				/* we are removing child which is pointed to from -				   parent feed - forget the pointer but remember -				   classid */ -				p->un.inner.last_ptr_id[prio] = cl->common.classid; -				p->un.inner.ptr[prio] = NULL; +				 * parent feed - forget the pointer but remember +				 * classid +				 */ +				p->un.inner.clprio[prio].last_ptr_id = cl->common.classid; +				p->un.inner.clprio[prio].ptr = NULL;  			} -			htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio); +			htb_safe_rb_erase(cl->node + prio, +					  &p->un.inner.clprio[prio].feed); -			if (!p->un.inner.feed[prio].rb_node) +			if (!p->un.inner.clprio[prio].feed.rb_node)  				mask |= 1 << prio;  		} @@ -434,14 +464,14 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)  		htb_remove_class_from_row(q, cl, mask);  } -static inline long htb_lowater(const struct htb_class *cl) +static inline s64 htb_lowater(const struct htb_class *cl)  {  	if (htb_hysteresis)  		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;  	else  		return 0;  } -static inline long htb_hiwater(const struct htb_class *cl) +static inline s64 htb_hiwater(const struct htb_class *cl)  {  	if (htb_hysteresis)  		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; @@ -462,9 +492,9 @@ static inline long htb_hiwater(const struct htb_class *cl)   * mode transitions per time unit. The speed gain is about 1/6.   */  static inline enum htb_cmode -htb_class_mode(struct htb_class *cl, long *diff) +htb_class_mode(struct htb_class *cl, s64 *diff)  { -	long toks; +	s64 toks;  	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {  		*diff = -toks; @@ -488,7 +518,7 @@ htb_class_mode(struct htb_class *cl, long *diff)   * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).   */  static void -htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff) +htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)  {  	enum htb_cmode new_mode = htb_class_mode(cl, diff); @@ -551,9 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)  			__skb_queue_tail(&q->direct_queue, skb);  			q->direct_pkts++;  		} else { -			kfree_skb(skb); -			sch->qstats.drops++; -			return NET_XMIT_DROP; +			return qdisc_drop(skb, sch);  		}  #ifdef CONFIG_NET_CLS_ACT  	} else if (!cl) { @@ -569,38 +597,33 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)  		}  		return ret;  	} else { -		cl->bstats.packets += -			skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; -		cl->bstats.bytes += qdisc_pkt_len(skb);  		htb_activate(q, cl);  	}  	sch->q.qlen++; -	sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; -	sch->bstats.bytes += qdisc_pkt_len(skb);  	return NET_XMIT_SUCCESS;  } -static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff) +static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)  { -	long toks = diff + cl->tokens; +	s64 toks = diff + cl->tokens;  	if (toks > cl->buffer)  		toks = cl->buffer; -	toks -= (long) qdisc_l2t(cl->rate, bytes); +	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);  	if (toks <= -cl->mbuffer)  		toks = 1 - cl->mbuffer;  	cl->tokens = toks;  } -static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff) +static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)  { -	long toks = diff + cl->ctokens; +	s64 toks = diff + cl->ctokens;  	if (toks > cl->cbuffer)  		toks = cl->cbuffer; -	toks -= (long) qdisc_l2t(cl->ceil, bytes); +	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);  	if (toks <= -cl->mbuffer)  		toks = 1 - cl->mbuffer; @@ -623,10 +646,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,  {  	int bytes = qdisc_pkt_len(skb);  	enum htb_cmode old_mode; -	long diff; +	s64 diff;  	while (cl) { -		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); +		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);  		if (cl->level >= level) {  			if (cl->level == level)  				cl->xstats.lends++; @@ -643,17 +666,15 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,  		htb_change_class_mode(q, cl, &diff);  		if (old_mode != cl->cmode) {  			if (old_mode != HTB_CAN_SEND) -				htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); +				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);  			if (cl->cmode != HTB_CAN_SEND)  				htb_add_to_wait_tree(q, cl, diff);  		} -		/* update byte stats except for leaves which are already updated */ -		if (cl->level) { -			cl->bstats.bytes += bytes; -			cl->bstats.packets += skb_is_gso(skb)? -					skb_shinfo(skb)->gso_segs:1; -		} +		/* update basic stats except for leaves which are already updated */ +		if (cl->level) +			bstats_update(&cl->bstats, skb); +  		cl = cl->parent;  	}  } @@ -665,17 +686,20 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,   * next pending event (0 for no event in pq, q->now for too many events).   * Note: Applied are events whose have cl->pq_key <= q->now.   */ -static psched_time_t htb_do_events(struct htb_sched *q, int level, -				   unsigned long start) +static s64 htb_do_events(struct htb_sched *q, const int level, +			 unsigned long start)  {  	/* don't run for longer than 2 jiffies; 2 is used instead of -	   1 to simplify things when jiffy is going to be incremented -	   too soon */ +	 * 1 to simplify things when jiffy is going to be incremented +	 * too soon +	 */  	unsigned long stop_at = start + 2; +	struct rb_root *wait_pq = &q->hlevel[level].wait_pq; +  	while (time_before(jiffies, stop_at)) {  		struct htb_class *cl; -		long diff; -		struct rb_node *p = rb_first(&q->wait_pq[level]); +		s64 diff; +		struct rb_node *p = rb_first(wait_pq);  		if (!p)  			return 0; @@ -684,8 +708,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,  		if (cl->pq_key > q->now)  			return cl->pq_key; -		htb_safe_rb_erase(p, q->wait_pq + level); -		diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); +		htb_safe_rb_erase(p, wait_pq); +		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);  		htb_change_class_mode(q, cl, &diff);  		if (cl->cmode != HTB_CAN_SEND)  			htb_add_to_wait_tree(q, cl, diff); @@ -693,7 +717,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,  	/* too much load - let's continue after a break for scheduling */  	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { -		printk(KERN_WARNING "htb: too many events!\n"); +		pr_warn("htb: too many events!\n");  		q->warned |= HTB_WARN_TOOMANYEVENTS;  	} @@ -701,7 +725,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,  }  /* Returns class->node+prio from id-tree where classe's id is >= id. NULL -   is no such one exists. */ + * is no such one exists. + */  static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,  					      u32 id)  { @@ -727,8 +752,7 @@ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,   *   * Find leaf where current feed pointers points to.   */ -static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, -					 struct rb_node **pptr, u32 * pid) +static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)  {  	int i;  	struct { @@ -737,20 +761,22 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,  		u32 *pid;  	} stk[TC_HTB_MAXDEPTH], *sp = stk; -	BUG_ON(!tree->rb_node); -	sp->root = tree->rb_node; -	sp->pptr = pptr; -	sp->pid = pid; +	BUG_ON(!hprio->row.rb_node); +	sp->root = hprio->row.rb_node; +	sp->pptr = &hprio->ptr; +	sp->pid = &hprio->last_ptr_id;  	for (i = 0; i < 65535; i++) {  		if (!*sp->pptr && *sp->pid) {  			/* ptr was invalidated but id is valid - try to recover -			   the original or next ptr */ +			 * the original or next ptr +			 */  			*sp->pptr =  			    htb_id_find_next_upper(prio, sp->root, *sp->pid);  		}  		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it -				   can become out of date quickly */ +				 * can become out of date quickly +				 */  		if (!*sp->pptr) {	/* we are at right end; rewind & go up */  			*sp->pptr = sp->root;  			while ((*sp->pptr)->rb_left) @@ -765,12 +791,15 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,  			}  		} else {  			struct htb_class *cl; +			struct htb_prio *clp; +  			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);  			if (!cl->level)  				return cl; -			(++sp)->root = cl->un.inner.feed[prio].rb_node; -			sp->pptr = cl->un.inner.ptr + prio; -			sp->pid = cl->un.inner.last_ptr_id + prio; +			clp = &cl->un.inner.clprio[prio]; +			(++sp)->root = clp->feed.rb_node; +			sp->pptr = &clp->ptr; +			sp->pid = &clp->last_ptr_id;  		}  	}  	WARN_ON(1); @@ -778,16 +807,18 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,  }  /* dequeues packet at given priority and level; call only if -   you are sure that there is active class at prio/level */ -static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, -					int level) + * you are sure that there is active class at prio/level + */ +static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio, +					const int level)  {  	struct sk_buff *skb = NULL;  	struct htb_class *cl, *start; +	struct htb_level *hlevel = &q->hlevel[level]; +	struct htb_prio *hprio = &hlevel->hprio[prio]; +  	/* look initial class up in the row */ -	start = cl = htb_lookup_leaf(q->row[level] + prio, prio, -				     q->ptr[level] + prio, -				     q->last_ptr_id[level] + prio); +	start = cl = htb_lookup_leaf(hprio, prio);  	do {  next: @@ -795,9 +826,10 @@ next:  			return NULL;  		/* class can be empty - it is unlikely but can be true if leaf -		   qdisc drops packets in enqueue routine or if someone used -		   graft operation on the leaf since last dequeue; -		   simply deactivate and skip such class */ +		 * qdisc drops packets in enqueue routine or if someone used +		 * graft operation on the leaf since last dequeue; +		 * simply deactivate and skip such class +		 */  		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {  			struct htb_class *next;  			htb_deactivate(q, cl); @@ -806,9 +838,7 @@ next:  			if ((q->row_mask[level] & (1 << prio)) == 0)  				return NULL; -			next = htb_lookup_leaf(q->row[level] + prio, -					       prio, q->ptr[level] + prio, -					       q->last_ptr_id[level] + prio); +			next = htb_lookup_leaf(hprio, prio);  			if (cl == start)	/* fix start if we just deleted it */  				start = next; @@ -821,23 +851,23 @@ next:  			break;  		qdisc_warn_nonwc("htb", cl->un.leaf.q); -		htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> -				  ptr[0]) + prio); -		cl = htb_lookup_leaf(q->row[level] + prio, prio, -				     q->ptr[level] + prio, -				     q->last_ptr_id[level] + prio); +		htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr: +					 &q->hlevel[0].hprio[prio].ptr); +		cl = htb_lookup_leaf(hprio, prio);  	} while (cl != start);  	if (likely(skb != NULL)) { +		bstats_update(&cl->bstats, skb);  		cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);  		if (cl->un.leaf.deficit[level] < 0) {  			cl->un.leaf.deficit[level] += cl->quantum; -			htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> -					  ptr[0]) + prio); +			htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr : +						 &q->hlevel[0].hprio[prio].ptr);  		}  		/* this used to be after charge_class but this constelation -		   gives us slightly better performance */ +		 * gives us slightly better performance +		 */  		if (!cl->un.leaf.q->q.qlen)  			htb_deactivate(q, cl);  		htb_charge_class(q, cl, level, skb); @@ -847,39 +877,40 @@ next:  static struct sk_buff *htb_dequeue(struct Qdisc *sch)  { -	struct sk_buff *skb = NULL; +	struct sk_buff *skb;  	struct htb_sched *q = qdisc_priv(sch);  	int level; -	psched_time_t next_event; +	s64 next_event;  	unsigned long start_at;  	/* try to dequeue direct packets as high prio (!) to minimize cpu work */  	skb = __skb_dequeue(&q->direct_queue);  	if (skb != NULL) { -		sch->flags &= ~TCQ_F_THROTTLED; +ok: +		qdisc_bstats_update(sch, skb); +		qdisc_unthrottled(sch);  		sch->q.qlen--;  		return skb;  	}  	if (!sch->q.qlen)  		goto fin; -	q->now = psched_get_time(); +	q->now = ktime_to_ns(ktime_get());  	start_at = jiffies; -	next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; +	next_event = q->now + 5LLU * NSEC_PER_SEC;  	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {  		/* common case optimization - skip event handler quickly */  		int m; -		psched_time_t event; +		s64 event = q->near_ev_cache[level]; -		if (q->now >= q->near_ev_cache[level]) { +		if (q->now >= event) {  			event = htb_do_events(q, level, start_at);  			if (!event) -				event = q->now + PSCHED_TICKS_PER_SEC; +				event = q->now + NSEC_PER_SEC;  			q->near_ev_cache[level] = event; -		} else -			event = q->near_ev_cache[level]; +		}  		if (next_event > event)  			next_event = event; @@ -887,20 +918,25 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)  		m = ~q->row_mask[level];  		while (m != (int)(-1)) {  			int prio = ffz(m); +  			m |= 1 << prio;  			skb = htb_dequeue_tree(q, prio, level); -			if (likely(skb != NULL)) { -				sch->q.qlen--; -				sch->flags &= ~TCQ_F_THROTTLED; -				goto fin; -			} +			if (likely(skb != NULL)) +				goto ok;  		}  	}  	sch->qstats.overlimits++; -	if (likely(next_event > q->now)) -		qdisc_watchdog_schedule(&q->watchdog, next_event); -	else +	if (likely(next_event > q->now)) { +		if (!test_bit(__QDISC_STATE_DEACTIVATED, +			      &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { +			ktime_t time = ns_to_ktime(next_event); +			qdisc_throttled(q->watchdog.qdisc); +			hrtimer_start(&q->watchdog.timer, time, +				      HRTIMER_MODE_ABS); +		} +	} else {  		schedule_work(&q->work); +	}  fin:  	return skb;  } @@ -935,11 +971,10 @@ static void htb_reset(struct Qdisc *sch)  {  	struct htb_sched *q = qdisc_priv(sch);  	struct htb_class *cl; -	struct hlist_node *n;  	unsigned int i;  	for (i = 0; i < q->clhash.hashsize; i++) { -		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { +		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {  			if (cl->level)  				memset(&cl->un.inner, 0, sizeof(cl->un.inner));  			else { @@ -955,10 +990,8 @@ static void htb_reset(struct Qdisc *sch)  	qdisc_watchdog_cancel(&q->watchdog);  	__skb_queue_purge(&q->direct_queue);  	sch->q.qlen = 0; -	memset(q->row, 0, sizeof(q->row)); +	memset(q->hlevel, 0, sizeof(q->hlevel));  	memset(q->row_mask, 0, sizeof(q->row_mask)); -	memset(q->wait_pq, 0, sizeof(q->wait_pq)); -	memset(q->ptr, 0, sizeof(q->ptr));  	for (i = 0; i < TC_HTB_NUMPRIO; i++)  		INIT_LIST_HEAD(q->drops + i);  } @@ -968,6 +1001,9 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {  	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },  	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },  	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, +	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 }, +	[TCA_HTB_RATE64] = { .type = NLA_U64 }, +	[TCA_HTB_CEIL64] = { .type = NLA_U64 },  };  static void htb_work_func(struct work_struct *work) @@ -981,7 +1017,7 @@ static void htb_work_func(struct work_struct *work)  static int htb_init(struct Qdisc *sch, struct nlattr *opt)  {  	struct htb_sched *q = qdisc_priv(sch); -	struct nlattr *tb[TCA_HTB_INIT + 1]; +	struct nlattr *tb[TCA_HTB_MAX + 1];  	struct tc_htb_glob *gopt;  	int err;  	int i; @@ -989,21 +1025,16 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)  	if (!opt)  		return -EINVAL; -	err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy); +	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);  	if (err < 0)  		return err; -	if (tb[TCA_HTB_INIT] == NULL) { -		printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); +	if (!tb[TCA_HTB_INIT])  		return -EINVAL; -	} +  	gopt = nla_data(tb[TCA_HTB_INIT]); -	if (gopt->version != HTB_VER >> 16) { -		printk(KERN_ERR -		       "HTB: need tc/htb version %d (minor is %d), you have %d\n", -		       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); +	if (gopt->version != HTB_VER >> 16)  		return -EINVAL; -	}  	err = qdisc_class_hash_init(&q->clhash);  	if (err < 0) @@ -1015,10 +1046,13 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)  	INIT_WORK(&q->work, htb_work_func);  	skb_queue_head_init(&q->direct_queue); -	q->direct_qlen = qdisc_dev(sch)->tx_queue_len; -	if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */ -		q->direct_qlen = 2; - +	if (tb[TCA_HTB_DIRECT_QLEN]) +		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); +	else { +		q->direct_qlen = qdisc_dev(sch)->tx_queue_len; +		if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */ +			q->direct_qlen = 2; +	}  	if ((q->rate2quantum = gopt->rate2quantum) < 1)  		q->rate2quantum = 1;  	q->defcls = gopt->defcls; @@ -1028,12 +1062,13 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)  static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)  { -	spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);  	struct htb_sched *q = qdisc_priv(sch);  	struct nlattr *nest;  	struct tc_htb_glob gopt; -	spin_lock_bh(root_lock); +	/* Its safe to not acquire qdisc lock. As we hold RTNL, +	 * no change can happen on the qdisc parameters. +	 */  	gopt.direct_pkts = q->direct_pkts;  	gopt.version = HTB_VER; @@ -1044,14 +1079,13 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)  	nest = nla_nest_start(skb, TCA_OPTIONS);  	if (nest == NULL)  		goto nla_put_failure; -	NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); -	nla_nest_end(skb, nest); +	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) || +	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) +		goto nla_put_failure; -	spin_unlock_bh(root_lock); -	return skb->len; +	return nla_nest_end(skb, nest);  nla_put_failure: -	spin_unlock_bh(root_lock);  	nla_nest_cancel(skb, nest);  	return -1;  } @@ -1060,11 +1094,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,  			  struct sk_buff *skb, struct tcmsg *tcm)  {  	struct htb_class *cl = (struct htb_class *)arg; -	spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);  	struct nlattr *nest;  	struct tc_htb_opt opt; -	spin_lock_bh(root_lock); +	/* Its safe to not acquire qdisc lock. As we hold RTNL, +	 * no change can happen on the class parameters. +	 */  	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;  	tcm->tcm_handle = cl->common.classid;  	if (!cl->level && cl->un.leaf.q) @@ -1076,21 +1111,25 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,  	memset(&opt, 0, sizeof(opt)); -	opt.rate = cl->rate->rate; -	opt.buffer = cl->buffer; -	opt.ceil = cl->ceil->rate; -	opt.cbuffer = cl->cbuffer; +	psched_ratecfg_getrate(&opt.rate, &cl->rate); +	opt.buffer = PSCHED_NS2TICKS(cl->buffer); +	psched_ratecfg_getrate(&opt.ceil, &cl->ceil); +	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);  	opt.quantum = cl->quantum;  	opt.prio = cl->prio;  	opt.level = cl->level; -	NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); +	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt)) +		goto nla_put_failure; +	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && +	    nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps)) +		goto nla_put_failure; +	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && +	    nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps)) +		goto nla_put_failure; -	nla_nest_end(skb, nest); -	spin_unlock_bh(root_lock); -	return skb->len; +	return nla_nest_end(skb, nest);  nla_put_failure: -	spin_unlock_bh(root_lock);  	nla_nest_cancel(skb, nest);  	return -1;  } @@ -1102,8 +1141,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)  	if (!cl->level && cl->un.leaf.q)  		cl->qstats.qlen = cl->un.leaf.q->q.qlen; -	cl->xstats.tokens = cl->tokens; -	cl->xstats.ctokens = cl->ctokens; +	cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); +	cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);  	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||  	    gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || @@ -1177,7 +1216,8 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,  	WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);  	if (parent->cmode != HTB_CAN_SEND) -		htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level); +		htb_safe_rb_erase(&parent->pq_node, +				  &q->hlevel[parent->level].wait_pq);  	parent->level = 0;  	memset(&parent->un.inner, 0, sizeof(parent->un.inner)); @@ -1185,7 +1225,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,  	parent->un.leaf.q = new_q ? new_q : &noop_qdisc;  	parent->tokens = parent->buffer;  	parent->ctokens = parent->cbuffer; -	parent->t_c = psched_get_time(); +	parent->t_c = ktime_to_ns(ktime_get());  	parent->cmode = HTB_CAN_SEND;  } @@ -1196,9 +1236,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)  		qdisc_destroy(cl->un.leaf.q);  	}  	gen_kill_estimator(&cl->bstats, &cl->rate_est); -	qdisc_put_rtab(cl->rate); -	qdisc_put_rtab(cl->ceil); -  	tcf_destroy_chain(&cl->filter_list);  	kfree(cl);  } @@ -1206,24 +1243,25 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)  static void htb_destroy(struct Qdisc *sch)  {  	struct htb_sched *q = qdisc_priv(sch); -	struct hlist_node *n, *next; +	struct hlist_node *next;  	struct htb_class *cl;  	unsigned int i;  	cancel_work_sync(&q->work);  	qdisc_watchdog_cancel(&q->watchdog);  	/* This line used to be after htb_destroy_class call below -	   and surprisingly it worked in 2.4. But it must precede it -	   because filter need its target class alive to be able to call -	   unbind_filter on it (without Oops). */ +	 * and surprisingly it worked in 2.4. But it must precede it +	 * because filter need its target class alive to be able to call +	 * unbind_filter on it (without Oops). +	 */  	tcf_destroy_chain(&q->filter_list);  	for (i = 0; i < q->clhash.hashsize; i++) { -		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) +		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)  			tcf_destroy_chain(&cl->filter_list);  	}  	for (i = 0; i < q->clhash.hashsize; i++) { -		hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], +		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],  					  common.hnode)  			htb_destroy_class(sch, cl);  	} @@ -1239,9 +1277,10 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)  	struct Qdisc *new_q = NULL;  	int last_child = 0; -	// TODO: why don't allow to delete subtree ? references ? does -	// tc subsys quarantee us that in htb_destroy it holds no class -	// refs so that we can remove children safely there ? +	/* TODO: why don't allow to delete subtree ? references ? does +	 * tc subsys guarantee us that in htb_destroy it holds no class +	 * refs so that we can remove children safely there ? +	 */  	if (cl->children || cl->filter_cnt)  		return -EBUSY; @@ -1268,7 +1307,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)  		htb_deactivate(q, cl);  	if (cl->cmode != HTB_CAN_SEND) -		htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); +		htb_safe_rb_erase(&cl->pq_node, +				  &q->hlevel[cl->level].wait_pq);  	if (last_child)  		htb_parent_to_leaf(q, cl, new_q); @@ -1299,9 +1339,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,  	struct htb_sched *q = qdisc_priv(sch);  	struct htb_class *cl = (struct htb_class *)*arg, *parent;  	struct nlattr *opt = tca[TCA_OPTIONS]; -	struct qdisc_rate_table *rtab = NULL, *ctab = NULL; -	struct nlattr *tb[__TCA_HTB_MAX]; +	struct nlattr *tb[TCA_HTB_MAX + 1];  	struct tc_htb_opt *hopt; +	u64 rate64, ceil64;  	/* extract all subattrs from opt attr */  	if (!opt) @@ -1318,12 +1358,16 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,  	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);  	hopt = nla_data(tb[TCA_HTB_PARMS]); - -	rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); -	ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); -	if (!rtab || !ctab) +	if (!hopt->rate.rate || !hopt->ceil.rate)  		goto failure; +	/* Keeping backward compatible with rate_table based iproute2 tc */ +	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) +		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB])); + +	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) +		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB])); +  	if (!cl) {		/* new class */  		struct Qdisc *new_q;  		int prio; @@ -1349,19 +1393,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,  		/* check maximal depth */  		if (parent && parent->parent && parent->parent->level < 2) { -			printk(KERN_ERR "htb: tree is too deep\n"); +			pr_err("htb: tree is too deep\n");  			goto failure;  		}  		err = -ENOBUFS; -		if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) +		cl = kzalloc(sizeof(*cl), GFP_KERNEL); +		if (!cl)  			goto failure; -		err = gen_new_estimator(&cl->bstats, &cl->rate_est, -					qdisc_root_sleeping_lock(sch), -					tca[TCA_RATE] ? : &est.nla); -		if (err) { -			kfree(cl); -			goto failure; +		if (htb_rate_est || tca[TCA_RATE]) { +			err = gen_new_estimator(&cl->bstats, &cl->rate_est, +						qdisc_root_sleeping_lock(sch), +						tca[TCA_RATE] ? : &est.nla); +			if (err) { +				kfree(cl); +				goto failure; +			}  		}  		cl->refcnt = 1; @@ -1373,8 +1420,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,  			RB_CLEAR_NODE(&cl->node[prio]);  		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) -		   so that can't be used inside of sch_tree_lock -		   -- thanks to Karlis Peisenieks */ +		 * so that can't be used inside of sch_tree_lock +		 * -- thanks to Karlis Peisenieks +		 */  		new_q = qdisc_create_dflt(sch->dev_queue,  					  &pfifo_qdisc_ops, classid);  		sch_tree_lock(sch); @@ -1390,7 +1438,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,  			/* remove from evt list because of level change */  			if (parent->cmode != HTB_CAN_SEND) { -				htb_safe_rb_erase(&parent->pq_node, q->wait_pq); +				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);  				parent->cmode = HTB_CAN_SEND;  			}  			parent->level = (parent->parent ? parent->parent->level @@ -1404,10 +1452,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,  		cl->parent = parent;  		/* set class to be in HTB_CAN_SEND state */ -		cl->tokens = hopt->buffer; -		cl->ctokens = hopt->cbuffer; -		cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;	/* 1min */ -		cl->t_c = psched_get_time(); +		cl->tokens = PSCHED_TICKS2NS(hopt->buffer); +		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); +		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */ +		cl->t_c = ktime_to_ns(ktime_get());  		cl->cmode = HTB_CAN_SEND;  		/* attach to the hash list and parent's family */ @@ -1425,20 +1473,30 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,  		sch_tree_lock(sch);  	} +	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0; + +	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0; + +	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); +	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); +  	/* it used to be a nasty bug here, we have to check that node -	   is really leaf before changing cl->un.leaf ! */ +	 * is really leaf before changing cl->un.leaf ! +	 */  	if (!cl->level) { -		cl->quantum = rtab->rate.rate / q->rate2quantum; +		u64 quantum = cl->rate.rate_bytes_ps; + +		do_div(quantum, q->rate2quantum); +		cl->quantum = min_t(u64, quantum, INT_MAX); +  		if (!hopt->quantum && cl->quantum < 1000) { -			printk(KERN_WARNING -			       "HTB: quantum of class %X is small. Consider r2q change.\n", -			       cl->common.classid); +			pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n", +				cl->common.classid);  			cl->quantum = 1000;  		}  		if (!hopt->quantum && cl->quantum > 200000) { -			printk(KERN_WARNING -			       "HTB: quantum of class %X is big. Consider r2q change.\n", -			       cl->common.classid); +			pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n", +				cl->common.classid);  			cl->quantum = 200000;  		}  		if (hopt->quantum) @@ -1447,14 +1505,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,  			cl->prio = TC_HTB_NUMPRIO - 1;  	} -	cl->buffer = hopt->buffer; -	cl->cbuffer = hopt->cbuffer; -	if (cl->rate) -		qdisc_put_rtab(cl->rate); -	cl->rate = rtab; -	if (cl->ceil) -		qdisc_put_rtab(cl->ceil); -	cl->ceil = ctab; +	cl->buffer = PSCHED_TICKS2NS(hopt->buffer); +	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); +  	sch_tree_unlock(sch);  	qdisc_class_hash_grow(sch, &q->clhash); @@ -1463,10 +1516,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,  	return 0;  failure: -	if (rtab) -		qdisc_put_rtab(rtab); -	if (ctab) -		qdisc_put_rtab(ctab);  	return err;  } @@ -1485,13 +1534,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,  	struct htb_class *cl = htb_find(classid, sch);  	/*if (cl && !cl->level) return 0; -	   The line above used to be there to prevent attaching filters to -	   leaves. But at least tc_index filter uses this just to get class -	   for other reasons so that we have to allow for it. -	   ---- -	   19.6.2002 As Werner explained it is ok - bind filter is just -	   another way to "lock" the class - unlike "get" this lock can -	   be broken by class during destroy IIUC. +	 * The line above used to be there to prevent attaching filters to +	 * leaves. But at least tc_index filter uses this just to get class +	 * for other reasons so that we have to allow for it. +	 * ---- +	 * 19.6.2002 As Werner explained it is ok - bind filter is just +	 * another way to "lock" the class - unlike "get" this lock can +	 * be broken by class during destroy IIUC.  	 */  	if (cl)  		cl->filter_cnt++; @@ -1510,14 +1559,13 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)  {  	struct htb_sched *q = qdisc_priv(sch);  	struct htb_class *cl; -	struct hlist_node *n;  	unsigned int i;  	if (arg->stop)  		return;  	for (i = 0; i < q->clhash.hashsize; i++) { -		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { +		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {  			if (arg->count < arg->skip) {  				arg->count++;  				continue;  | 
