diff options
Diffstat (limited to 'net/sched/sch_netem.c')
| -rw-r--r-- | net/sched/sch_netem.c | 198 | 
1 files changed, 126 insertions, 72 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index a6d788d4521..111d70fddae 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -88,10 +88,10 @@ struct netem_sched_data {  	u32 duplicate;  	u32 reorder;  	u32 corrupt; -	u32 rate; +	u64 rate;  	s32 packet_overhead;  	u32 cell_size; -	u32 cell_size_reciprocal; +	struct reciprocal_value cell_size_reciprocal;  	s32 cell_overhead;  	struct crndstate { @@ -110,6 +110,18 @@ struct netem_sched_data {  		CLG_GILB_ELL,  	} loss_model; +	enum { +		TX_IN_GAP_PERIOD = 1, +		TX_IN_BURST_PERIOD, +		LOST_IN_GAP_PERIOD, +		LOST_IN_BURST_PERIOD, +	} _4_state_model; + +	enum { +		GOOD_STATE = 1, +		BAD_STATE, +	} GE_state_model; +  	/* Correlated Loss Generation models */  	struct clgstate {  		/* state of the Markov chain */ @@ -169,7 +181,7 @@ static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)  static void init_crandom(struct crndstate *state, unsigned long rho)  {  	state->rho = rho; -	state->last = net_random(); +	state->last = prandom_u32();  }  /* get_crandom - correlated random number generator @@ -182,9 +194,9 @@ static u32 get_crandom(struct crndstate *state)  	unsigned long answer;  	if (state->rho == 0)	/* no correlation */ -		return net_random(); +		return prandom_u32(); -	value = net_random(); +	value = prandom_u32();  	rho = (u64)state->rho + 1;  	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;  	state->last = answer; @@ -198,51 +210,52 @@ static u32 get_crandom(struct crndstate *state)  static bool loss_4state(struct netem_sched_data *q)  {  	struct clgstate *clg = &q->clg; -	u32 rnd = net_random(); +	u32 rnd = prandom_u32();  	/*  	 * Makes a comparison between rnd and the transition  	 * probabilities outgoing from the current state, then decides the  	 * next state and if the next packet has to be transmitted or lost.  	 * The four states correspond to: -	 *   1 => successfully transmitted packets within a gap period -	 *   4 => isolated losses within a gap period -	 *   3 => lost packets within a burst period -	 *   2 => successfully transmitted packets within a burst period +	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period +	 *   LOST_IN_BURST_PERIOD => isolated losses within a gap period +	 *   LOST_IN_GAP_PERIOD => lost packets within a burst period +	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period  	 */  	switch (clg->state) { -	case 1: +	case TX_IN_GAP_PERIOD:  		if (rnd < clg->a4) { -			clg->state = 4; +			clg->state = LOST_IN_BURST_PERIOD;  			return true; -		} else if (clg->a4 < rnd && rnd < clg->a1) { -			clg->state = 3; +		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { +			clg->state = LOST_IN_GAP_PERIOD;  			return true; -		} else if (clg->a1 < rnd) -			clg->state = 1; +		} else if (clg->a1 + clg->a4 < rnd) { +			clg->state = TX_IN_GAP_PERIOD; +		}  		break; -	case 2: +	case TX_IN_BURST_PERIOD:  		if (rnd < clg->a5) { -			clg->state = 3; +			clg->state = LOST_IN_GAP_PERIOD;  			return true; -		} else -			clg->state = 2; +		} else { +			clg->state = TX_IN_BURST_PERIOD; +		}  		break; -	case 3: +	case LOST_IN_GAP_PERIOD:  		if (rnd < clg->a3) -			clg->state = 2; +			clg->state = TX_IN_BURST_PERIOD;  		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { -			clg->state = 1; -			return true; +			clg->state = TX_IN_GAP_PERIOD;  		} else if (clg->a2 + clg->a3 < rnd) { -			clg->state = 3; +			clg->state = LOST_IN_GAP_PERIOD;  			return true;  		}  		break; -	case 4: -		clg->state = 1; +	case LOST_IN_BURST_PERIOD: +		clg->state = TX_IN_GAP_PERIOD;  		break;  	} @@ -264,15 +277,16 @@ static bool loss_gilb_ell(struct netem_sched_data *q)  	struct clgstate *clg = &q->clg;  	switch (clg->state) { -	case 1: -		if (net_random() < clg->a1) -			clg->state = 2; -		if (net_random() < clg->a4) +	case GOOD_STATE: +		if (prandom_u32() < clg->a1) +			clg->state = BAD_STATE; +		if (prandom_u32() < clg->a4)  			return true; -	case 2: -		if (net_random() < clg->a2) -			clg->state = 1; -		if (clg->a3 > net_random()) +		break; +	case BAD_STATE: +		if (prandom_u32() < clg->a2) +			clg->state = GOOD_STATE; +		if (prandom_u32() > clg->a3)  			return true;  	} @@ -358,6 +372,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche  	return PSCHED_NS2TICKS(ticks);  } +static void tfifo_reset(struct Qdisc *sch) +{ +	struct netem_sched_data *q = qdisc_priv(sch); +	struct rb_node *p; + +	while ((p = rb_first(&q->t_root))) { +		struct sk_buff *skb = netem_rb_to_skb(p); + +		rb_erase(p, &q->t_root); +		skb->next = NULL; +		skb->prev = NULL; +		kfree_skb(skb); +	} +} +  static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)  {  	struct netem_sched_data *q = qdisc_priv(sch); @@ -442,7 +471,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)  		     skb_checksum_help(skb)))  			return qdisc_drop(skb, sch); -		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); +		skb->data[prandom_u32() % skb_headlen(skb)] ^= +			1<<(prandom_u32() % 8);  	}  	if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) @@ -480,7 +510,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)  				now = netem_skb_cb(last)->time_to_send;  			} -			delay += packet_len_2_sched_time(skb->len, q); +			delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);  		}  		cb->time_to_send = now + delay; @@ -520,6 +550,7 @@ static unsigned int netem_drop(struct Qdisc *sch)  			skb->next = NULL;  			skb->prev = NULL;  			len = qdisc_pkt_len(skb); +			sch->qstats.backlog -= len;  			kfree_skb(skb);  		}  	} @@ -609,6 +640,7 @@ static void netem_reset(struct Qdisc *sch)  	struct netem_sched_data *q = qdisc_priv(sch);  	qdisc_reset_queue(sch); +	tfifo_reset(sch);  	if (q->qdisc)  		qdisc_reset(q->qdisc);  	qdisc_watchdog_cancel(&q->watchdog); @@ -616,12 +648,7 @@ static void netem_reset(struct Qdisc *sch)  static void dist_free(struct disttable *d)  { -	if (d) { -		if (is_vmalloc_addr(d)) -			vfree(d); -		else -			kfree(d); -	} +	kvfree(d);  }  /* @@ -662,9 +689,8 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)  	return 0;  } -static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) +static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)  { -	struct netem_sched_data *q = qdisc_priv(sch);  	const struct tc_netem_corr *c = nla_data(attr);  	init_crandom(&q->delay_cor, c->delay_corr); @@ -672,47 +698,45 @@ static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)  	init_crandom(&q->dup_cor, c->dup_corr);  } -static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) +static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)  { -	struct netem_sched_data *q = qdisc_priv(sch);  	const struct tc_netem_reorder *r = nla_data(attr);  	q->reorder = r->probability;  	init_crandom(&q->reorder_cor, r->correlation);  } -static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) +static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)  { -	struct netem_sched_data *q = qdisc_priv(sch);  	const struct tc_netem_corrupt *r = nla_data(attr);  	q->corrupt = r->probability;  	init_crandom(&q->corrupt_cor, r->correlation);  } -static void get_rate(struct Qdisc *sch, const struct nlattr *attr) +static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)  { -	struct netem_sched_data *q = qdisc_priv(sch);  	const struct tc_netem_rate *r = nla_data(attr);  	q->rate = r->rate;  	q->packet_overhead = r->packet_overhead;  	q->cell_size = r->cell_size; +	q->cell_overhead = r->cell_overhead;  	if (q->cell_size)  		q->cell_size_reciprocal = reciprocal_value(q->cell_size); -	q->cell_overhead = r->cell_overhead; +	else +		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };  } -static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) +static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)  { -	struct netem_sched_data *q = qdisc_priv(sch);  	const struct nlattr *la;  	int rem;  	nla_for_each_nested(la, attr, rem) {  		u16 type = nla_type(la); -		switch(type) { +		switch (type) {  		case NETEM_LOSS_GI: {  			const struct tc_netem_gimodel *gi = nla_data(la); @@ -723,7 +747,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)  			q->loss_model = CLG_4_STATES; -			q->clg.state = 1; +			q->clg.state = TX_IN_GAP_PERIOD;  			q->clg.a1 = gi->p13;  			q->clg.a2 = gi->p31;  			q->clg.a3 = gi->p32; @@ -741,7 +765,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)  			}  			q->loss_model = CLG_GILB_ELL; -			q->clg.state = 1; +			q->clg.state = GOOD_STATE;  			q->clg.a1 = ge->p;  			q->clg.a2 = ge->r;  			q->clg.a3 = ge->h; @@ -765,6 +789,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {  	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },  	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },  	[TCA_NETEM_ECN]		= { .type = NLA_U32 }, +	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },  };  static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, @@ -791,6 +816,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)  	struct netem_sched_data *q = qdisc_priv(sch);  	struct nlattr *tb[TCA_NETEM_MAX + 1];  	struct tc_netem_qopt *qopt; +	struct clgstate old_clg; +	int old_loss_model = CLG_RANDOM;  	int ret;  	if (opt == NULL) @@ -801,6 +828,33 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)  	if (ret < 0)  		return ret; +	/* backup q->clg and q->loss_model */ +	old_clg = q->clg; +	old_loss_model = q->loss_model; + +	if (tb[TCA_NETEM_LOSS]) { +		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); +		if (ret) { +			q->loss_model = old_loss_model; +			return ret; +		} +	} else { +		q->loss_model = CLG_RANDOM; +	} + +	if (tb[TCA_NETEM_DELAY_DIST]) { +		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); +		if (ret) { +			/* recover clg and loss_model, in case of +			 * q->clg and q->loss_model were modified +			 * in get_loss_clg() +			 */ +			q->clg = old_clg; +			q->loss_model = old_loss_model; +			return ret; +		} +	} +  	sch->limit = qopt->limit;  	q->latency = qopt->latency; @@ -818,30 +872,24 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)  		q->reorder = ~0;  	if (tb[TCA_NETEM_CORR]) -		get_correlation(sch, tb[TCA_NETEM_CORR]); - -	if (tb[TCA_NETEM_DELAY_DIST]) { -		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); -		if (ret) -			return ret; -	} +		get_correlation(q, tb[TCA_NETEM_CORR]);  	if (tb[TCA_NETEM_REORDER]) -		get_reorder(sch, tb[TCA_NETEM_REORDER]); +		get_reorder(q, tb[TCA_NETEM_REORDER]);  	if (tb[TCA_NETEM_CORRUPT]) -		get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); +		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);  	if (tb[TCA_NETEM_RATE]) -		get_rate(sch, tb[TCA_NETEM_RATE]); +		get_rate(q, tb[TCA_NETEM_RATE]); + +	if (tb[TCA_NETEM_RATE64]) +		q->rate = max_t(u64, q->rate, +				nla_get_u64(tb[TCA_NETEM_RATE64]));  	if (tb[TCA_NETEM_ECN])  		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); -	q->loss_model = CLG_RANDOM; -	if (tb[TCA_NETEM_LOSS]) -		ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); -  	return ret;  } @@ -957,7 +1005,13 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)  	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))  		goto nla_put_failure; -	rate.rate = q->rate; +	if (q->rate >= (1ULL << 32)) { +		if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate)) +			goto nla_put_failure; +		rate.rate = ~0U; +	} else { +		rate.rate = q->rate; +	}  	rate.packet_overhead = q->packet_overhead;  	rate.cell_size = q->cell_size;  	rate.cell_overhead = q->cell_overhead;  | 
