aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-05 18:44:59 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-05 18:44:59 -0800
commit15b0669072127f282896b3bef2e9df4ec5d7264f (patch)
tree8480e09bbc7c26cd5c9ef048b734664cb6fe76be /net
parentc155b914651753f843445d2f860bc00137df5d52 (diff)
parent3537d54c0c39de5738bba8d19f128478b0b96a71 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (44 commits) qlge: Fix sparse warnings for tx ring indexes. qlge: Fix sparse warning regarding rx buffer queues. qlge: Fix sparse endian warning in ql_hw_csum_setup(). qlge: Fix sparse endian warning for inbound packet control block flags. qlge: Fix sparse warnings for byte swapping in qlge_ethool.c myri10ge: print MAC and serial number on probe failure pkt_sched: cls_u32: Fix locking in u32_change() iucv: fix cpu hotplug af_iucv: Free iucv path/socket in path_pending callback af_iucv: avoid left over IUCV connections from failing connects af_iucv: New error return codes for connect() net/ehea: bitops work on unsigned longs Revert "net: Fix for initial link state in 2.6.28" tcp: Kill extraneous SPLICE_F_NONBLOCK checks. tcp: don't mask EOF and socket errors on nonblocking splice receive dccp: Integrate the TFRC library with DCCP dccp: Clean up ccid.c after integration of CCID plugins dccp: Lockless integration of CCID congestion-control plugins qeth: get rid of extra argument after printk to dev_* conversion qeth: No large send using EDDP for HiperSockets. ...
Diffstat (limited to 'net')
-rw-r--r--net/can/bcm.c208
-rw-r--r--net/core/dev.c93
-rw-r--r--net/core/skbuff.c15
-rw-r--r--net/dcb/dcbnl.c14
-rw-r--r--net/dccp/Kconfig4
-rw-r--r--net/dccp/Makefile15
-rw-r--r--net/dccp/ackvec.h49
-rw-r--r--net/dccp/ccid.c254
-rw-r--r--net/dccp/ccid.h14
-rw-r--r--net/dccp/ccids/Kconfig79
-rw-r--r--net/dccp/ccids/Makefile9
-rw-r--r--net/dccp/ccids/ccid2.c22
-rw-r--r--net/dccp/ccids/ccid3.c23
-rw-r--r--net/dccp/ccids/lib/Makefile3
-rw-r--r--net/dccp/ccids/lib/loss_interval.c3
-rw-r--r--net/dccp/ccids/lib/packet_history.c9
-rw-r--r--net/dccp/ccids/lib/tfrc.c19
-rw-r--r--net/dccp/ccids/lib/tfrc.h11
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c4
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/proto.c7
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/route.c52
-rw-r--r--net/iucv/af_iucv.c28
-rw-r--r--net/iucv/iucv.c18
-rw-r--r--net/rfkill/rfkill.c4
-rw-r--r--net/sched/cls_u32.c3
30 files changed, 488 insertions, 493 deletions
diff --git a/net/can/bcm.c b/net/can/bcm.c
index da0d426c0ce..6248ae2502c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -70,7 +70,7 @@
#define CAN_BCM_VERSION CAN_VERSION
static __initdata const char banner[] = KERN_INFO
- "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n";
+ "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
MODULE_LICENSE("Dual BSD/GPL");
@@ -90,6 +90,7 @@ struct bcm_op {
unsigned long frames_abs, frames_filtered;
struct timeval ival1, ival2;
struct hrtimer timer, thrtimer;
+ struct tasklet_struct tsklet, thrtsklet;
ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
int rx_ifindex;
int count;
@@ -341,6 +342,23 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
}
}
+static void bcm_tx_timeout_tsklet(unsigned long data)
+{
+ struct bcm_op *op = (struct bcm_op *)data;
+ struct bcm_msg_head msg_head;
+
+ /* create notification to user */
+ msg_head.opcode = TX_EXPIRED;
+ msg_head.flags = op->flags;
+ msg_head.count = op->count;
+ msg_head.ival1 = op->ival1;
+ msg_head.ival2 = op->ival2;
+ msg_head.can_id = op->can_id;
+ msg_head.nframes = 0;
+
+ bcm_send_to_user(op, &msg_head, NULL, 0);
+}
+
/*
* bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
*/
@@ -352,20 +370,8 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
if (op->kt_ival1.tv64 && (op->count > 0)) {
op->count--;
- if (!op->count && (op->flags & TX_COUNTEVT)) {
- struct bcm_msg_head msg_head;
-
- /* create notification to user */
- msg_head.opcode = TX_EXPIRED;
- msg_head.flags = op->flags;
- msg_head.count = op->count;
- msg_head.ival1 = op->ival1;
- msg_head.ival2 = op->ival2;
- msg_head.can_id = op->can_id;
- msg_head.nframes = 0;
-
- bcm_send_to_user(op, &msg_head, NULL, 0);
- }
+ if (!op->count && (op->flags & TX_COUNTEVT))
+ tasklet_schedule(&op->tsklet);
}
if (op->kt_ival1.tv64 && (op->count > 0)) {
@@ -402,6 +408,9 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
if (op->frames_filtered > ULONG_MAX/100)
op->frames_filtered = op->frames_abs = 0;
+ /* this element is not throttled anymore */
+ data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
+
head.opcode = RX_CHANGED;
head.flags = op->flags;
head.count = op->count;
@@ -420,37 +429,32 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
*/
static void bcm_rx_update_and_send(struct bcm_op *op,
struct can_frame *lastdata,
- struct can_frame *rxdata)
+ const struct can_frame *rxdata)
{
memcpy(lastdata, rxdata, CFSIZ);
- /* mark as used */
- lastdata->can_dlc |= RX_RECV;
+ /* mark as used and throttled by default */
+ lastdata->can_dlc |= (RX_RECV|RX_THR);
- /* throtteling mode inactive OR data update already on the run ? */
- if (!op->kt_ival2.tv64 || hrtimer_callback_running(&op->thrtimer)) {
+ /* throtteling mode inactive ? */
+ if (!op->kt_ival2.tv64) {
/* send RX_CHANGED to the user immediately */
- bcm_rx_changed(op, rxdata);
+ bcm_rx_changed(op, lastdata);
return;
}
- if (hrtimer_active(&op->thrtimer)) {
- /* mark as 'throttled' */
- lastdata->can_dlc |= RX_THR;
+ /* with active throttling timer we are just done here */
+ if (hrtimer_active(&op->thrtimer))
return;
- }
- if (!op->kt_lastmsg.tv64) {
- /* send first RX_CHANGED to the user immediately */
- bcm_rx_changed(op, rxdata);
- op->kt_lastmsg = ktime_get();
- return;
- }
+ /* first receiption with enabled throttling mode */
+ if (!op->kt_lastmsg.tv64)
+ goto rx_changed_settime;
+ /* got a second frame inside a potential throttle period? */
if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
ktime_to_us(op->kt_ival2)) {
- /* mark as 'throttled' and start timer */
- lastdata->can_dlc |= RX_THR;
+ /* do not send the saved data - only start throttle timer */
hrtimer_start(&op->thrtimer,
ktime_add(op->kt_lastmsg, op->kt_ival2),
HRTIMER_MODE_ABS);
@@ -458,7 +462,8 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
}
/* the gap was that big, that throttling was not needed here */
- bcm_rx_changed(op, rxdata);
+rx_changed_settime:
+ bcm_rx_changed(op, lastdata);
op->kt_lastmsg = ktime_get();
}
@@ -467,7 +472,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
* received data stored in op->last_frames[]
*/
static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
- struct can_frame *rxdata)
+ const struct can_frame *rxdata)
{
/*
* no one uses the MSBs of can_dlc for comparation,
@@ -511,14 +516,12 @@ static void bcm_rx_starttimer(struct bcm_op *op)
hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
}
-/*
- * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
- */
-static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
+static void bcm_rx_timeout_tsklet(unsigned long data)
{
- struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+ struct bcm_op *op = (struct bcm_op *)data;
struct bcm_msg_head msg_head;
+ /* create notification to user */
msg_head.opcode = RX_TIMEOUT;
msg_head.flags = op->flags;
msg_head.count = op->count;
@@ -528,6 +531,17 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
msg_head.nframes = 0;
bcm_send_to_user(op, &msg_head, NULL, 0);
+}
+
+/*
+ * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
+ */
+static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
+{
+ struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+
+ /* schedule before NET_RX_SOFTIRQ */
+ tasklet_hi_schedule(&op->tsklet);
/* no restart of the timer is done here! */
@@ -541,9 +555,25 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
}
/*
+ * bcm_rx_do_flush - helper for bcm_rx_thr_flush
+ */
+static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
+{
+ if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
+ if (update)
+ bcm_rx_changed(op, &op->last_frames[index]);
+ return 1;
+ }
+ return 0;
+}
+
+/*
* bcm_rx_thr_flush - Check for throttled data and send it to the userspace
+ *
+ * update == 0 : just check if throttled data is available (any irq context)
+ * update == 1 : check and send throttled data to userspace (soft_irq context)
*/
-static int bcm_rx_thr_flush(struct bcm_op *op)
+static int bcm_rx_thr_flush(struct bcm_op *op, int update)
{
int updated = 0;
@@ -551,27 +581,25 @@ static int bcm_rx_thr_flush(struct bcm_op *op)
int i;
/* for MUX filter we start at index 1 */
- for (i = 1; i < op->nframes; i++) {
- if ((op->last_frames) &&
- (op->last_frames[i].can_dlc & RX_THR)) {
- op->last_frames[i].can_dlc &= ~RX_THR;
- bcm_rx_changed(op, &op->last_frames[i]);
- updated++;
- }
- }
+ for (i = 1; i < op->nframes; i++)
+ updated += bcm_rx_do_flush(op, update, i);
} else {
/* for RX_FILTER_ID and simple filter */
- if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) {
- op->last_frames[0].can_dlc &= ~RX_THR;
- bcm_rx_changed(op, &op->last_frames[0]);
- updated++;
- }
+ updated += bcm_rx_do_flush(op, update, 0);
}
return updated;
}
+static void bcm_rx_thr_tsklet(unsigned long data)
+{
+ struct bcm_op *op = (struct bcm_op *)data;
+
+ /* push the changed data to the userspace */
+ bcm_rx_thr_flush(op, 1);
+}
+
/*
* bcm_rx_thr_handler - the time for blocked content updates is over now:
* Check for throttled data and send it to the userspace
@@ -580,7 +608,9 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
{
struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
- if (bcm_rx_thr_flush(op)) {
+ tasklet_schedule(&op->thrtsklet);
+
+ if (bcm_rx_thr_flush(op, 0)) {
hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
return HRTIMER_RESTART;
} else {
@@ -596,48 +626,38 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
static void bcm_rx_handler(struct sk_buff *skb, void *data)
{
struct bcm_op *op = (struct bcm_op *)data;
- struct can_frame rxframe;
+ const struct can_frame *rxframe = (struct can_frame *)skb->data;
int i;
/* disable timeout */
hrtimer_cancel(&op->timer);
- if (skb->len == sizeof(rxframe)) {
- memcpy(&rxframe, skb->data, sizeof(rxframe));
- /* save rx timestamp */
- op->rx_stamp = skb->tstamp;
- /* save originator for recvfrom() */
- op->rx_ifindex = skb->dev->ifindex;
- /* update statistics */
- op->frames_abs++;
- kfree_skb(skb);
+ if (op->can_id != rxframe->can_id)
+ goto rx_freeskb;
- } else {
- kfree_skb(skb);
- return;
- }
-
- if (op->can_id != rxframe.can_id)
- return;
+ /* save rx timestamp */
+ op->rx_stamp = skb->tstamp;
+ /* save originator for recvfrom() */
+ op->rx_ifindex = skb->dev->ifindex;
+ /* update statistics */
+ op->frames_abs++;
if (op->flags & RX_RTR_FRAME) {
/* send reply for RTR-request (placed in op->frames[0]) */
bcm_can_tx(op);
- return;
+ goto rx_freeskb;
}
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
- bcm_rx_update_and_send(op, &op->last_frames[0], &rxframe);
- bcm_rx_starttimer(op);
- return;
+ bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
+ goto rx_freeskb_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
- bcm_rx_cmp_to_index(op, 0, &rxframe);
- bcm_rx_starttimer(op);
- return;
+ bcm_rx_cmp_to_index(op, 0, rxframe);
+ goto rx_freeskb_starttimer;
}
if (op->nframes > 1) {
@@ -649,15 +669,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
*/
for (i = 1; i < op->nframes; i++) {
- if ((GET_U64(&op->frames[0]) & GET_U64(&rxframe)) ==
+ if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
(GET_U64(&op->frames[0]) &
GET_U64(&op->frames[i]))) {
- bcm_rx_cmp_to_index(op, i, &rxframe);
+ bcm_rx_cmp_to_index(op, i, rxframe);
break;
}
}
- bcm_rx_starttimer(op);
}
+
+rx_freeskb_starttimer:
+ bcm_rx_starttimer(op);
+rx_freeskb:
+ kfree_skb(skb);
}
/*
@@ -681,6 +705,12 @@ static void bcm_remove_op(struct bcm_op *op)
hrtimer_cancel(&op->timer);
hrtimer_cancel(&op->thrtimer);
+ if (op->tsklet.func)
+ tasklet_kill(&op->tsklet);
+
+ if (op->thrtsklet.func)
+ tasklet_kill(&op->thrtsklet);
+
if ((op->frames) && (op->frames != &op->sframe))
kfree(op->frames);
@@ -891,6 +921,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
op->timer.function = bcm_tx_timeout_handler;
+ /* initialize tasklet for tx countevent notification */
+ tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
+ (unsigned long) op);
+
/* currently unused in tx_ops */
hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -1054,9 +1088,17 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
op->timer.function = bcm_rx_timeout_handler;
+ /* initialize tasklet for rx timeout notification */
+ tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
+ (unsigned long) op);
+
hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
op->thrtimer.function = bcm_rx_thr_handler;
+ /* initialize tasklet for rx throttle handling */
+ tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
+ (unsigned long) op);
+
/* add this bcm_op to the list of the rx_ops */
list_add(&op->list, &bo->rx_ops);
@@ -1102,7 +1144,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
*/
op->kt_lastmsg = ktime_set(0, 0);
hrtimer_cancel(&op->thrtimer);
- bcm_rx_thr_flush(op);
+ bcm_rx_thr_flush(op, 1);
}
if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
diff --git a/net/core/dev.c b/net/core/dev.c
index 09c66a449da..382df6c09ee 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -132,6 +132,9 @@
/* Instead of increasing this, you should create a hash table. */
#define MAX_GRO_SKBS 8
+/* This should be increased if a protocol with a bigger head is added. */
+#define GRO_MAX_HEAD (MAX_HEADER + 128)
+
/*
* The list of packet types we will receive (as opposed to discard)
* and the routines to invoke.
@@ -2345,7 +2348,7 @@ static int napi_gro_complete(struct sk_buff *skb)
struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
int err = -ENOENT;
- if (!skb_shinfo(skb)->frag_list)
+ if (NAPI_GRO_CB(skb)->count == 1)
goto out;
rcu_read_lock();
@@ -2365,6 +2368,7 @@ static int napi_gro_complete(struct sk_buff *skb)
}
out:
+ skb_shinfo(skb)->gso_size = 0;
__skb_push(skb, -skb_network_offset(skb));
return netif_receive_skb(skb);
}
@@ -2383,7 +2387,7 @@ void napi_gro_flush(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_gro_flush);
-int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff **pp = NULL;
struct packet_type *ptype;
@@ -2392,6 +2396,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
int count = 0;
int same_flow;
int mac_len;
+ int free;
if (!(skb->dev->features & NETIF_F_GRO))
goto normal;
@@ -2408,6 +2413,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
skb->mac_len = mac_len;
NAPI_GRO_CB(skb)->same_flow = 0;
NAPI_GRO_CB(skb)->flush = 0;
+ NAPI_GRO_CB(skb)->free = 0;
for (p = napi->gro_list; p; p = p->next) {
count++;
@@ -2427,6 +2433,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
goto normal;
same_flow = NAPI_GRO_CB(skb)->same_flow;
+ free = NAPI_GRO_CB(skb)->free;
if (pp) {
struct sk_buff *nskb = *pp;
@@ -2446,17 +2453,91 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
}
NAPI_GRO_CB(skb)->count = 1;
+ skb_shinfo(skb)->gso_size = skb->len;
skb->next = napi->gro_list;
napi->gro_list = skb;
ok:
- return NET_RX_SUCCESS;
+ return free;
normal:
- return netif_receive_skb(skb);
+ return -1;
+}
+
+int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ switch (__napi_gro_receive(napi, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 1:
+ kfree_skb(skb);
+ break;
+ }
+
+ return NET_RX_SUCCESS;
}
EXPORT_SYMBOL(napi_gro_receive);
+int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+{
+ struct net_device *dev = napi->dev;
+ struct sk_buff *skb = napi->skb;
+ int err = NET_RX_DROP;
+
+ napi->skb = NULL;
+
+ if (!skb) {
+ skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
+ if (!skb)
+ goto out;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ }
+
+ BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = info->nr_frags;
+ memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
+
+ skb->data_len = info->len;
+ skb->len += info->len;
+ skb->truesize += info->len;
+
+ if (!pskb_may_pull(skb, ETH_HLEN))
+ goto reuse;
+
+ err = NET_RX_SUCCESS;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ skb->ip_summed = info->ip_summed;
+ skb->csum = info->csum;
+
+ switch (__napi_gro_receive(napi, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 0:
+ goto out;
+ }
+
+reuse:
+ skb_shinfo(skb)->nr_frags = 0;
+
+ skb->len -= skb->data_len;
+ skb->truesize -= skb->data_len;
+ skb->data_len = 0;
+
+ __skb_pull(skb, skb_headlen(skb));
+ skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
+
+ napi->skb = skb;
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(napi_gro_frags);
+
static int process_backlog(struct napi_struct *napi, int quota)
{
int work = 0;
@@ -2535,11 +2616,12 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
{
INIT_LIST_HEAD(&napi->poll_list);
napi->gro_list = NULL;
+ napi->skb = NULL;
napi->poll = poll;
napi->weight = weight;
list_add(&napi->dev_list, &dev->napi_list);
-#ifdef CONFIG_NETPOLL
napi->dev = dev;
+#ifdef CONFIG_NETPOLL
spin_lock_init(&napi->poll_lock);
napi->poll_owner = -1;
#endif
@@ -2552,6 +2634,7 @@ void netif_napi_del(struct napi_struct *napi)
struct sk_buff *skb, *next;
list_del_init(&napi->dev_list);
+ kfree(napi->skb);
for (skb = napi->gro_list; skb; skb = next) {
next = skb->next;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b8d0abb2643..5110b359c75 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2594,6 +2594,17 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
if (skb_shinfo(p)->frag_list)
goto merge;
+ else if (!skb_headlen(p) && !skb_headlen(skb) &&
+ skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags <
+ MAX_SKB_FRAGS) {
+ memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
+ skb_shinfo(skb)->frags,
+ skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
+
+ skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
+ NAPI_GRO_CB(skb)->free = 1;
+ goto done;
+ }
headroom = skb_headroom(p);
nskb = netdev_alloc_skb(p->dev, headroom);
@@ -2613,6 +2624,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
skb_shinfo(nskb)->frag_list = p;
+ skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size;
skb_header_release(p);
nskb->prev = p;
@@ -2627,11 +2639,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
p = nskb;
merge:
- NAPI_GRO_CB(p)->count++;
p->prev->next = skb;
p->prev = skb;
skb_header_release(skb);
+done:
+ NAPI_GRO_CB(p)->count++;
p->data_len += skb->len;
p->truesize += skb->len;
p->len += skb->len;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 5dbfe5fdc0d..8379496de82 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -191,7 +191,7 @@ static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
return 0;
nlmsg_failure:
err:
- kfree(dcbnl_skb);
+ kfree_skb(dcbnl_skb);
return ret;
}
@@ -272,7 +272,7 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
return 0;
nlmsg_failure:
err:
- kfree(dcbnl_skb);
+ kfree_skb(dcbnl_skb);
err_out:
return -EINVAL;
}
@@ -314,7 +314,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
nlmsg_failure:
err:
- kfree(dcbnl_skb);
+ kfree_skb(dcbnl_skb);
err_out:
return -EINVAL;
}
@@ -380,7 +380,7 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
return 0;
nlmsg_failure:
err:
- kfree(dcbnl_skb);
+ kfree_skb(dcbnl_skb);
err_out:
return -EINVAL;
}
@@ -458,7 +458,7 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
return 0;
nlmsg_failure:
err:
- kfree(dcbnl_skb);
+ kfree_skb(dcbnl_skb);
err_out:
return ret;
}
@@ -687,7 +687,7 @@ err_pg:
nla_nest_cancel(dcbnl_skb, pg_nest);
nlmsg_failure:
err:
- kfree(dcbnl_skb);
+ kfree_skb(dcbnl_skb);
err_out:
ret = -EINVAL;
return ret;
@@ -949,7 +949,7 @@ err_bcn:
nla_nest_cancel(dcbnl_skb, bcn_nest);
nlmsg_failure:
err:
- kfree(dcbnl_skb);
+ kfree_skb(dcbnl_skb);
err_out:
ret = -EINVAL;
return ret;
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index 7aa2a7acc7e..ad6dffd9070 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -1,7 +1,6 @@
menuconfig IP_DCCP
tristate "The DCCP Protocol (EXPERIMENTAL)"
depends on INET && EXPERIMENTAL
- select IP_DCCP_CCID2
---help---
Datagram Congestion Control Protocol (RFC 4340)
@@ -25,9 +24,6 @@ config INET_DCCP_DIAG
def_tristate y if (IP_DCCP = y && INET_DIAG = y)
def_tristate m
-config IP_DCCP_ACKVEC
- bool
-
source "net/dccp/ccids/Kconfig"
menu "DCCP Kernel Hacking"
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index f4f8793aaff..2991efcc8de 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -2,14 +2,23 @@ obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o
dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o
+#
+# CCID algorithms to be used by dccp.ko
+#
+# CCID-2 is default (RFC 4340, p. 77) and has Ack Vectors as dependency
+dccp-y += ccids/ccid2.o ackvec.o
+dccp-$(CONFIG_IP_DCCP_CCID3) += ccids/ccid3.o
+dccp-$(CONFIG_IP_DCCP_TFRC_LIB) += ccids/lib/tfrc.o \
+ ccids/lib/tfrc_equation.o \
+ ccids/lib/packet_history.o \
+ ccids/lib/loss_interval.o
+
dccp_ipv4-y := ipv4.o
# build dccp_ipv6 as module whenever either IPv6 or DCCP is a module
obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o
dccp_ipv6-y := ipv6.o
-dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o
-
obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o
@@ -17,5 +26,3 @@ dccp-$(CONFIG_SYSCTL) += sysctl.o
dccp_diag-y := diag.o
dccp_probe-y := probe.o
-
-obj-y += ccids/
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index 4ccee030524..45f95e55f87 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -84,7 +84,6 @@ struct dccp_ackvec_record {
struct sock;
struct sk_buff;
-#ifdef CONFIG_IP_DCCP_ACKVEC
extern int dccp_ackvec_init(void);
extern void dccp_ackvec_exit(void);
@@ -106,52 +105,4 @@ static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
{
return av->av_vec_len;
}
-#else /* CONFIG_IP_DCCP_ACKVEC */
-static inline int dccp_ackvec_init(void)
-{
- return 0;
-}
-
-static inline void dccp_ackvec_exit(void)
-{
-}
-
-static inline struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
-{
- return NULL;
-}
-
-static inline void dccp_ackvec_free(struct dccp_ackvec *av)
-{
-}
-
-static inline int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
- const u64 ackno, const u8 state)
-{
- return -1;
-}
-
-static inline void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av,
- struct sock *sk, const u64 ackno)
-{
-}
-
-static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
- const u64 *ackno, const u8 opt,
- const u8 *value, const u8 len)
-{
- return -1;
-}
-
-static inline int dccp_insert_option_ackvec(const struct sock *sk,
- const struct sk_buff *skb)
-{
- return -1;
-}
-
-static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
-{
- return 0;
-}
-#endif /* CONFIG_IP_DCCP_ACKVEC */
#endif /* _ACKVEC_H */
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index bcc643f992a..f3e9ba1cfd0 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -12,56 +12,70 @@
*/
#include "ccid.h"
+#include "ccids/lib/tfrc.h"
-static u8 builtin_ccids[] = {
- DCCPC_CCID2, /* CCID2 is supported by default */
-#if defined(CONFIG_IP_DCCP_CCID3) || defined(CONFIG_IP_DCCP_CCID3_MODULE)
- DCCPC_CCID3,
+static struct ccid_operations *ccids[] = {
+ &ccid2_ops,
+#ifdef CONFIG_IP_DCCP_CCID3
+ &ccid3_ops,
#endif
};
-static struct ccid_operations *ccids[CCID_MAX];
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
-static atomic_t ccids_lockct = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(ccids_lock);
-
-/*
- * The strategy is: modifications ccids vector are short, do not sleep and
- * veeery rare, but read access should be free of any exclusive locks.
- */
-static void ccids_write_lock(void)
+static struct ccid_operations *ccid_by_number(const u8 id)
{
- spin_lock(&ccids_lock);
- while (atomic_read(&ccids_lockct) != 0) {
- spin_unlock(&ccids_lock);
- yield();
- spin_lock(&ccids_lock);
- }
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ccids); i++)
+ if (ccids[i]->ccid_id == id)
+ return ccids[i];
+ return NULL;
}
-static inline void ccids_write_unlock(void)
+/* check that up to @array_len members in @ccid_array are supported */
+bool ccid_support_check(u8 const *ccid_array, u8 array_len)
{
- spin_unlock(&ccids_lock);
+ while (array_len > 0)
+ if (ccid_by_number(ccid_array[--array_len]) == NULL)
+ return false;
+ return true;
}
-static inline void ccids_read_lock(void)
+/**
+ * ccid_get_builtin_ccids - Populate a list of built-in CCIDs
+ * @ccid_array: pointer to copy into
+ * @array_len: value to return length into
+ * This function allocates memory - caller must see that it is freed after use.
+ */
+int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
{
- atomic_inc(&ccids_lockct);