aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_common.c4
-rw-r--r--net/9p/util.c2
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/atm/lec.h2
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/l2cap_core.c1
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bluetooth/sco.c9
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/cfdgml.c6
-rw-r--r--net/caif/cfmuxl.c4
-rw-r--r--net/can/bcm.c9
-rw-r--r--net/can/raw.c7
-rw-r--r--net/ceph/Kconfig1
-rw-r--r--net/ceph/auth.c8
-rw-r--r--net/ceph/auth_x.c8
-rw-r--r--net/ceph/ceph_common.c112
-rw-r--r--net/ceph/crypto.c73
-rw-r--r--net/ceph/crypto.h4
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c14
-rw-r--r--net/core/dev.c60
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c10
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/dccp/output.c2
-rw-r--r--net/dsa/Kconfig4
-rw-r--r--net/dsa/mv88e6131.c51
-rw-r--r--net/dsa/mv88e6xxx.h2
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ipv4/cipso_ipv4.c8
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inetpeer.c13
-rw-r--r--net/ipv4/ip_fragment.c31
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c4
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c19
-rw-r--r--net/ipv4/sysctl_net_ipv4.c3
-rw-r--r--net/ipv4/tcp_cubic.c9
-rw-r--r--net/ipv4/tcp_lp.c2
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_output.c8
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c4
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c2
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/ipv6/tcp_ipv6.c5
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/ipv6/xfrm6_output.c6
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/irda/irlap.c2
-rw-r--r--net/irda/irlap_event.c8
-rw-r--r--net/irda/irlap_frame.c2
-rw-r--r--net/irda/irlmp_event.c2
-rw-r--r--net/irda/irnet/irnet.h2
-rw-r--r--net/irda/irqueue.c2
-rw-r--r--net/irda/irttp.c2
-rw-r--r--net/irda/qos.c8
-rw-r--r--net/irda/timer.c2
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c4
-rw-r--r--net/llc/llc_input.c3
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/debugfs_netdev.c4
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/rc80211_pid.h2
-rw-r--r--net/mac80211/rx.c4
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c6
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/xt_DSCP.c2
-rw-r--r--net/netfilter/xt_conntrack.c5
-rw-r--r--net/netlabel/netlabel_domainhash.c10
-rw-r--r--net/netlabel/netlabel_mgmt.c2
-rw-r--r--net/rds/ib_send.c2
-rw-r--r--net/rds/iw_cm.c2
-rw-r--r--net/rds/iw_rdma.c2
-rw-r--r--net/rds/iw_send.c2
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rose/rose_route.c2
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/auth.c6
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/sm_statefuns.c20
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/ulpevent.c2
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/unix/af_unix.c18
-rw-r--r--net/wanrouter/wanproc.c2
-rw-r--r--net/wireless/reg.c4
-rw-r--r--net/x25/x25_facilities.c2
-rw-r--r--net/x25/x25_forward.c4
-rw-r--r--net/xfrm/xfrm_policy.c14
-rw-r--r--net/xfrm/xfrm_replay.c5
-rw-r--r--net/xfrm/xfrm_user.c9
155 files changed, 608 insertions, 316 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 7850412f52b..0eb1a886b37 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -124,6 +124,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
grp->nr_vlans--;
+ if (vlan->flags & VLAN_FLAG_GVRP)
+ vlan_gvrp_request_leave(dev);
+
vlan_group_set_device(grp, vlan_id, NULL);
if (!grp->killall)
synchronize_net();
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e34ea9e5e28..b2ff6c8d360 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -487,9 +487,6 @@ static int vlan_dev_stop(struct net_device *dev)
struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct net_device *real_dev = vlan->real_dev;
- if (vlan->flags & VLAN_FLAG_GVRP)
- vlan_gvrp_request_leave(dev);
-
dev_mc_unsync(real_dev, dev);
dev_uc_unsync(real_dev, dev);
if (dev->flags & IFF_ALLMULTI)
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index d1314cf18ad..d940c49d168 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -54,7 +54,7 @@ static const char name_conf[] = "config";
/*
* Structures for interfacing with the /proc filesystem.
- * VLAN creates its own directory /proc/net/vlan with the folowing
+ * VLAN creates its own directory /proc/net/vlan with the following
* entries:
* config device status/configuration
* <device> entry for each device
diff --git a/net/9p/client.c b/net/9p/client.c
index 2ccbf04d37d..48b8e084e71 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -178,7 +178,7 @@ free_and_return:
* @tag: numeric id for transaction
*
* this is a simple array lookup, but will grow the
- * request_slots as necessary to accomodate transaction
+ * request_slots as necessary to accommodate transaction
* ids which did not previously have a slot.
*
* this code relies on the client spinlock to manage locks, its
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 9172ab78fcb..d47880e971d 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -36,7 +36,7 @@ p9_release_req_pages(struct trans_rpage_info *rpinfo)
EXPORT_SYMBOL(p9_release_req_pages);
/**
- * p9_nr_pages - Return number of pages needed to accomodate the payload.
+ * p9_nr_pages - Return number of pages needed to accommodate the payload.
*/
int
p9_nr_pages(struct p9_req_t *req)
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(p9_nr_pages);
* @req: Request to be sent to server.
* @pdata_off: data offset into the first page after translation (gup).
* @pdata_len: Total length of the IO. gup may not return requested # of pages.
- * @nr_pages: number of pages to accomodate the payload
+ * @nr_pages: number of pages to accommodate the payload
* @rw: Indicates if the pages are for read or write.
*/
int
diff --git a/net/9p/util.c b/net/9p/util.c
index b84619b5ba2..da6af81e59d 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(p9_idpool_create);
/**
* p9_idpool_destroy - create a new per-connection id pool
- * @p: idpool to destory
+ * @p: idpool to destroy
*/
void p9_idpool_destroy(struct p9_idpool *p)
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index fce2eae8d47..2252c2085da 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -509,7 +509,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
write_lock_irq(&devs_lock);
net_dev = br2684_find_dev(&be.ifspec);
if (net_dev == NULL) {
- pr_err("tried to attach to non-existant device\n");
+ pr_err("tried to attach to non-existent device\n");
err = -ENXIO;
goto error;
}
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 9d14d196cc1..dfc07196646 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -35,7 +35,7 @@ struct lecdatahdr_8025 {
* Operations that LANE2 capable device can do. Two first functions
* are used to make the device do things. See spec 3.1.3 and 3.1.4.
*
- * The third function is intented for the MPOA component sitting on
+ * The third function is intended for the MPOA component sitting on
* top of the LANE device. The MPOA component assigns it's own function
* to (*associate_indicator)() and the LANE device will use that
* function to tell about TLVs it sees floating through.
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9ed26140a26..824e1f6e50f 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -474,7 +474,7 @@ void interface_rx(struct net_device *soft_iface,
goto dropped;
skb->protocol = eth_type_trans(skb, soft_iface);
- /* should not be neccesary anymore as we use skb_pull_rcsum()
+ /* should not be necessary anymore as we use skb_pull_rcsum()
* TODO: please verify this and remove this TODO
* -- Dec 21st 2009, Simon Wunderlich */
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2216620ff29..b5a8afc2be3 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -587,10 +587,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hci_req_cancel(hdev, ENODEV);
hci_req_lock(hdev);
- /* Stop timer, it might be running */
- del_timer_sync(&hdev->cmd_timer);
-
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
+ del_timer_sync(&hdev->cmd_timer);
hci_req_unlock(hdev);
return 0;
}
@@ -629,6 +627,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
/* Drop last sent command */
if (hdev->sent_cmd) {
+ del_timer_sync(&hdev->cmd_timer);
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}
@@ -1883,7 +1882,7 @@ static void hci_tx_task(unsigned long arg)
read_unlock(&hci_task_lock);
}
-/* ----- HCI RX task (incoming data proccessing) ----- */
+/* ----- HCI RX task (incoming data processing) ----- */
/* ACL data packet */
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index cebe7588469..b2570159a04 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2387,8 +2387,6 @@ static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *s
if (!conn)
goto unlock;
- hci_conn_hold(conn);
-
conn->remote_cap = ev->capability;
conn->remote_oob = ev->oob_data;
conn->remote_auth = ev->authentication;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index ca27f3a4153..2c8dd4494c6 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1051,6 +1051,7 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
tx_skb = skb_clone(skb, GFP_ATOMIC);
bt_cb(skb)->retries++;
control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+ control &= L2CAP_CTRL_SAR;
if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
control |= L2CAP_CTRL_FINAL;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f77308e63e5..299fe56a966 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -679,7 +679,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
if (opt == BT_FLUSHABLE_OFF) {
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- /* proceed futher only when we have l2cap_conn and
+ /* proceed further only when we have l2cap_conn and
No Flush support in the LM */
if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
err = -EINVAL;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 42fdffd1d76..94954c74f6a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -369,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
case BT_CONNECTED:
case BT_CONFIG:
+ if (sco_pi(sk)->conn) {
+ sk->sk_state = BT_DISCONN;
+ sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
+ hci_conn_put(sco_pi(sk)->conn->hcon);
+ sco_pi(sk)->conn = NULL;
+ } else
+ sco_chan_del(sk, ECONNRESET);
+ break;
+
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 88485cc74dc..cc4d3c5ab1c 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -169,7 +169,7 @@ void br_fdb_flush(struct net_bridge *br)
spin_unlock_bh(&br->hash_lock);
}
-/* Flush all entries refering to a specific port.
+/* Flush all entries referring to a specific port.
* if do_all is set also flush static entries
*/
void br_fdb_delete_by_port(struct net_bridge *br,
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index e2160792e1b..0c7badad62a 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -164,7 +164,7 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
goto drop;
/* If STP is turned off, then forward */
- if (p->br->stp_enabled == BR_NO_STP)
+ if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
goto forward;
if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index cb43312b846..3d9fca0e337 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -106,7 +106,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
/*
* Legacy ioctl's through SIOCDEVPRIVATE
* This interface is deprecated because it was too difficult to
- * to do the translation for 32/64bit ioctl compatability.
+ * to do the translation for 32/64bit ioctl compatibility.
*/
static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 008ff6c4eec..74ef4d4846a 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
goto drop;
}
- /* Zero out the CB buffer if no options present */
- if (iph->ihl == 5) {
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ if (iph->ihl == 5)
return 0;
- }
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
@@ -739,7 +737,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
nf_bridge->mask |= BRNF_PKT_TYPE;
}
- if (br_parse_ip_options(skb))
+ if (pf == PF_INET && br_parse_ip_options(skb))
return NF_DROP;
/* The physdev module checks on this */
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 8184c031d02..37a4034dfc2 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -852,7 +852,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTING;
sk->sk_state = CAIF_CONNECTING;
- /* Check priority value comming from socket */
+ /* Check priority value coming from socket */
/* if priority value is out of range it will be ajusted */
if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
cf_sk->conn_req.priority = CAIF_PRIO_MAX;
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 27dab26ad3b..054fdb5aeb8 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -13,6 +13,7 @@
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
+
#define container_obj(layr) ((struct cfsrvl *) layr)
#define DGM_CMD_BIT 0x80
@@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
+ u8 packet_type;
u32 zero = 0;
struct caif_payload_info *info;
struct cfsrvl *service = container_obj(layr);
@@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_getlen(pkt) > DGM_MTU)
return -EMSGSIZE;
- cfpkt_add_head(pkt, &zero, 4);
+ cfpkt_add_head(pkt, &zero, 3);
+ packet_type = 0x08; /* B9 set - UNCLASSIFIED */
+ cfpkt_add_head(pkt, &packet_type, 1);
/* Add info for MUX-layer to route the packet out. */
info = cfpkt_info(pkt);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 46f34b2e047..24f1ffa74b0 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -244,9 +244,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfmuxl *muxl = container_obj(layr);
- struct list_head *node;
+ struct list_head *node, *next;
struct cflayer *layer;
- list_for_each(node, &muxl->srvl_list) {
+ list_for_each_safe(node, next, &muxl->srvl_list) {
layer = list_entry(node, struct cflayer, node);
if (cfsrvl_phyid_match(layer, phyid))
layer->ctrlcmd(layer, ctrl, phyid);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 871a0ad5102..8a6a05e7c3c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -387,7 +387,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
}
/*
- * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
+ * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
*/
static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
{
@@ -1427,9 +1427,14 @@ static int bcm_init(struct sock *sk)
static int bcm_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- struct bcm_sock *bo = bcm_sk(sk);
+ struct bcm_sock *bo;
struct bcm_op *op, *next;
+ if (sk == NULL)
+ return 0;
+
+ bo = bcm_sk(sk);
+
/* remove bcm_ops, timer, rx_unregister(), etc. */
unregister_netdevice_notifier(&bo->notifier);
diff --git a/net/can/raw.c b/net/can/raw.c
index 649acfa7c70..0eb39a7fdf6 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -305,7 +305,12 @@ static int raw_init(struct sock *sk)
static int raw_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- struct raw_sock *ro = raw_sk(sk);
+ struct raw_sock *ro;
+
+ if (!sk)
+ return 0;
+
+ ro = raw_sk(sk);
unregister_netdevice_notifier(&ro->notifier);
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index ad424049b0c..be683f2d401 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -4,6 +4,7 @@ config CEPH_LIB
select LIBCRC32C
select CRYPTO_AES
select CRYPTO
+ select KEYS
default n
help
Choose Y or M here to include cephlib, which provides the
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 549c1f43e1d..b4bf4ac090f 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -35,12 +35,12 @@ static int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol)
/*
* setup, teardown.
*/
-struct ceph_auth_client *ceph_auth_init(const char *name, const char *secret)
+struct ceph_auth_client *ceph_auth_init(const char *name, const struct ceph_crypto_key *key)
{
struct ceph_auth_client *ac;
int ret;
- dout("auth_init name '%s' secret '%s'\n", name, secret);
+ dout("auth_init name '%s'\n", name);
ret = -ENOMEM;
ac = kzalloc(sizeof(*ac), GFP_NOFS);
@@ -52,8 +52,8 @@ struct ceph_auth_client *ceph_auth_init(const char *name, const char *secret)
ac->name = name;
else
ac->name = CEPH_AUTH_NAME_DEFAULT;
- dout("auth_init name %s secret %s\n", ac->name, secret);
- ac->secret = secret;
+ dout("auth_init name %s\n", ac->name);
+ ac->key = key;
return ac;
out:
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 7fd5dfcf6e1..1587dc6010c 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -662,14 +662,16 @@ int ceph_x_init(struct ceph_auth_client *ac)
goto out;
ret = -EINVAL;
- if (!ac->secret) {
+ if (!ac->key) {
pr_err("no secret set (for auth_x protocol)\n");
goto out_nomem;
}
- ret = ceph_crypto_key_unarmor(&xi->secret, ac->secret);
- if (ret)
+ ret = ceph_crypto_key_clone(&xi->secret, ac->key);
+ if (ret < 0) {
+ pr_err("cannot clone key: %d\n", ret);
goto out_nomem;
+ }
xi->starting = true;
xi->ticket_handlers = RB_ROOT;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 95f96ab94bb..132963abc26 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -5,6 +5,8 @@
#include <linux/fs.h>
#include <linux/inet.h>
#include <linux/in6.h>
+#include <linux/key.h>
+#include <keys/ceph-type.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/parser.h>
@@ -20,6 +22,7 @@
#include <linux/ceph/decode.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/auth.h>
+#include "crypto.h"
@@ -117,9 +120,29 @@ int ceph_compare_options(struct ceph_options *new_opt,
if (ret)
return ret;
- ret = strcmp_null(opt1->secret, opt2->secret);
- if (ret)
- return ret;
+ if (opt1->key && !opt2->key)
+ return -1;
+ if (!opt1->key && opt2->key)
+ return 1;
+ if (opt1->key && opt2->key) {
+ if (opt1->key->type != opt2->key->type)
+ return -1;
+ if (opt1->key->created.tv_sec != opt2->key->created.tv_sec)
+ return -1;
+ if (opt1->key->created.tv_nsec != opt2->key->created.tv_nsec)
+ return -1;
+ if (opt1->key->len != opt2->key->len)
+ return -1;
+ if (opt1->key->key && !opt2->key->key)
+ return -1;
+ if (!opt1->key->key && opt2->key->key)
+ return 1;
+ if (opt1->key->key && opt2->key->key) {
+ ret = memcmp(opt1->key->key, opt2->key->key, opt1->key->len);
+ if (ret)
+ return ret;
+ }
+ }
/* any matching mon ip implies a match */
for (i = 0; i < opt1->num_mon; i++) {
@@ -176,6 +199,7 @@ enum {
Opt_fsid,
Opt_name,
Opt_secret,
+ Opt_key,
Opt_ip,
Opt_last_string,
/* string args above */
@@ -192,6 +216,7 @@ static match_table_t opt_tokens = {
{Opt_fsid, "fsid=%s"},
{Opt_name, "name=%s"},
{Opt_secret, "secret=%s"},
+ {Opt_key, "key=%s"},
{Opt_ip, "ip=%s"},
/* string args above */
{Opt_noshare, "noshare"},
@@ -203,11 +228,56 @@ void ceph_destroy_options(struct ceph_options *opt)
{
dout("destroy_options %p\n", opt);
kfree(opt->name);
- kfree(opt->secret);
+ if (opt->key) {
+ ceph_crypto_key_destroy(opt->key);
+ kfree(opt->key);
+ }
kfree(opt);
}
EXPORT_SYMBOL(ceph_destroy_options);
+/* get secret from key store */
+static int get_secret(struct ceph_crypto_key *dst, const char *name) {
+ struct key *ukey;
+ int key_err;
+ int err = 0;
+ struct ceph_crypto_key *ckey;
+
+ ukey = request_key(&key_type_ceph, name, NULL);
+ if (!ukey || IS_ERR(ukey)) {
+ /* request_key errors don't map nicely to mount(2)
+ errors; don't even try, but still printk */
+ key_err = PTR_ERR(ukey);
+ switch (key_err) {
+ case -ENOKEY:
+ pr_warning("ceph: Mount failed due to key not found: %s\n", name);
+ break;
+ case -EKEYEXPIRED:
+ pr_warning("ceph: Mount failed due to expired key: %s\n", name);
+ break;
+ case -EKEYREVOKED:
+ pr_warning("ceph: Mount failed due to revoked key: %s\n", name);
+ break;
+ default:
+ pr_warning("ceph: Mount failed due to unknown key error"
+ " %d: %s\n", key_err, name);
+ }
+ err = -EPERM;
+ goto out;
+ }
+
+ ckey = ukey->payload.data;
+ err = ceph_crypto_key_clone(dst, ckey);
+ if (err)
+ goto out_key;
+ /* pass through, err is 0 */
+
+out_key:
+ key_put(ukey);
+out:
+ return err;
+}
+
int ceph_parse_options(struct ceph_options **popt, char *options,
const char *dev_name, const char *dev_name_end,
int (*parse_extra_token)(char *c, void *private),
@@ -295,9 +365,24 @@ int ceph_parse_options(struct ceph_options **popt, char *options,
GFP_KERNEL);
break;
case Opt_secret:
- opt->secret = kstrndup(argstr[0].from,
- argstr[0].to-argstr[0].from,
- GFP_KERNEL);
+ opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
+ if (!opt->key) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = ceph_crypto_key_unarmor(opt->key, argstr[0].from);
+ if (err < 0)
+ goto out;
+ break;
+ case Opt_key:
+ opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
+ if (!opt->key) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = get_secret(opt->key, argstr[0].from);
+ if (err < 0)
+ goto out;
break;
/* misc */
@@ -394,8 +479,8 @@ void ceph_destroy_client(struct ceph_client *client)
ceph_osdc_stop(&client->osdc);
/*
- * make sure mds and osd connections close out before destroying
- * the auth module, which is needed to free those connections'
+ * make sure osd connections close out before destroying the
+ * auth module, which is needed to free those connections'
* ceph_authorizers.
*/
ceph_msgr_flush();
@@ -496,10 +581,14 @@ static int __init init_ceph_lib(void)
if (ret < 0)
goto out;
- ret = ceph_msgr_init();
+ ret = ceph_crypto_init();
if (ret < 0)
goto out_debugfs;
+ ret = ceph_msgr_init();
+ if (ret < 0)
+ goto out_crypto;
+
pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n",
CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL,
CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
@@ -507,6 +596,8 @@ static int __init init_ceph_lib(void)
return 0;
+out_crypto:
+ ceph_crypto_shutdown();
out_debugfs:
ceph_debugfs_cleanup();
out:
@@ -517,6 +608,7 @@ static void __exit exit_ceph_lib(void)
{
dout("exit_ceph_lib\n");
ceph_msgr_exit();
+ ceph_crypto_shutdown();
ceph_debugfs_cleanup();
}
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 7b505b0c983..5a8009c9e0c 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -5,10 +5,23 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <crypto/hash.h>
+#include <linux/key-type.h>
+#include <keys/ceph-type.h>
#include <linux/ceph/decode.h>
#include "crypto.h"
+int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+ const struct ceph_crypto_key *src)
+{
+ memcpy(dst, src, sizeof(struct ceph_crypto_key));
+ dst->key = kmalloc(src->len, GFP_NOFS);
+ if (!dst->key)
+ return -ENOMEM;
+ memcpy(dst->key, src->key, src->len);
+ return 0;
+}
+
int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
{
if (*p + sizeof(u16) + sizeof(key->created) +
@@ -410,3 +423,63 @@ int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
return -EINVAL;
}
}
+
+int ceph_key_instantiate(struct key *key, const void *data, size_t datalen)
+{
+ struct ceph_crypto_key *ckey;
+ int ret;
+ void *p;
+
+ ret = -EINVAL;
+ if (datalen <= 0 || datalen > 32767 || !data)
+ goto err;
+
+ ret = key_payload_reserve(key, datalen);
+ if (ret < 0)
+ goto err;
+
+ ret = -ENOMEM;
+ ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
+ if (!ckey)
+ goto err;
+
+ /* TODO ceph_crypto_key_decode should really take const input */
+ p = (void*)data;
+ ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen);
+ if (ret < 0)
+ goto err_ckey;
+
+ key->payload.data = ckey;
+ return 0;
+
+err_ckey:
+ kfree(ckey);
+err:
+ return ret;
+}
+
+int ceph_key_match(const struct key *key, const void *description)
+{
+ return strcmp(key->description, description) == 0;
+}
+
+void ceph_key_destroy(struct key *key) {
+ struct ceph_crypto_key *ckey = key->payload.data;
+
+ ceph_crypto_key_destroy(ckey);
+}
+
+struct key_type key_type_ceph = {
+ .name = "ceph",
+ .instantiate = ceph_key_instantiate,
+ .match = ceph_key_match,
+ .destroy = ceph_key_destroy,
+};
+
+int ceph_crypto_init(void) {
+ return register_key_type(&key_type_ceph);
+}
+
+void ceph_crypto_shutdown(void) {
+ unregister_key_type(&key_type_ceph);
+}
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index f9eccace592..1919d1550d7 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -19,6 +19,8 @@ static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
kfree(key->key);
}
+extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+ const struct ceph_crypto_key *src);
extern int ceph_crypto_key_encode(struct ceph_crypto_key *key,
void **p, void *end);
extern int ceph_crypto_key_decode(struct ceph_crypto_key *key,
@@ -40,6 +42,8 @@ extern int ceph_encrypt2(struct ceph_crypto_key *secret,
void *dst, size_t *dst_len,
const void *src1, size_t src1_len,
const void *src2, size_t src2_len);
+extern int ceph_crypto_init(void);
+extern void ceph_crypto_shutdown(void);
/* armor.c */
extern int ceph_armor(char *dst, const char *src, const char *end);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 8a079399174..cbe31fa4550 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -759,7 +759,7 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
/* authentication */
monc->auth = ceph_auth_init(cl->options->name,
- cl->options->secret);
+ cl->options->key);
if (IS_ERR(monc->auth))
return PTR_ERR(monc->auth);
monc->auth->want_keys =
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 02212ed5085..50af02737a3 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -837,8 +837,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
dout("moving osd to %p lru\n", req->r_osd);
__move_osd_to_lru(osdc, req->r_osd);
}
- if (list_empty(&req->r_osd_item) &&
- list_empty(&req->r_linger_item))
+ if (list_empty(&req->r_linger_item))
req->r_osd = NULL;
}
@@ -883,7 +882,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
dout("moving osd to %p lru\n", req->r_osd);
__move_osd_to_lru(osdc, req->r_osd);
}
- req->r_osd = NULL;
+ if (list_empty(&req->r_osd_item))
+ req->r_osd = NULL;
}
}
@@ -917,7 +917,7 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger);
/*
* Pick an osd (the first 'up' osd in the pg), allocate the osd struct
* (as needed), and set the request r_osd appropriately. If there is
- * no up osd, set r_osd to NULL. Move the request to the appropiate list
+ * no up osd, set r_osd to NULL. Move the request to the appropriate list
* (unsent, homeless) or leave on in-flight lru.
*
* Return 0 if unchanged, 1 if changed, or negative on error.
@@ -1602,11 +1602,11 @@ void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
cookie, ver, event);
if (event) {
event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
- INIT_WORK(&event_work->work, do_event_work);
if (!event_work) {
dout("ERROR: could not allocate event_work\n");
goto done_err;
}
+ INIT_WORK(&event_work->work, do_event_work);
event_work->event = event;
event_work->ver = ver;
event_work->notify_id = notify_id;
@@ -1672,7 +1672,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
if (req->r_sent == 0) {
rc = __map_request(osdc, req);
if (rc < 0)
- return rc;
+ goto out_unlock;
if (req->r_osd == NULL) {
dout("send_request %p no up osds in pg\n", req);
ceph_monc_request_next_osdmap(&osdc->client->monc);
@@ -1689,6 +1689,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
}
}
}
+
+out_unlock:
mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem);
return rc;
diff --git a/net/core/dev.c b/net/core/dev.c
index 3da9fb06d47..30a4078b3fa 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1007,7 +1007,7 @@ rollback:
}
write_lock_bh(&dev_base_lock);
- hlist_del(&dev->name_hlist);
+ hlist_del_rcu(&dev->name_hlist);
write_unlock_bh(&dev_base_lock);
synchronize_rcu();
@@ -1284,11 +1284,13 @@ static int dev_close_many(struct list_head *head)
*/
int dev_close(struct net_device *dev)
{
- LIST_HEAD(single);
+ if (dev->flags & IFF_UP) {
+ LIST_HEAD(single);
- list_add(&dev->unreg_list, &single);
- dev_close_many(&single);
- list_del(&single);
+ list_add(&dev->unreg_list, &single);
+ dev_close_many(&single);
+ list_del(&single);
+ }
return 0;
}
EXPORT_SYMBOL(dev_close);
@@ -2091,7 +2093,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
u32 features;
/*
- * If device doesnt need skb->dst, release it right now while
+ * If device doesn't need skb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
@@ -2151,7 +2153,7 @@ gso:
nskb->next = NULL;
/*
- * If device doesnt need nskb->dst, release it right now while
+ * If device doesn't need nskb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
@@ -2970,8 +2972,8 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
* when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
* a compare and 2 stores extra right now if we dont have it on
* but have CONFIG_NET_CLS_ACT
- * NOTE: This doesnt stop any functionality; if you dont have
- * the ingress scheduler, you just cant add policies on ingress.
+ * NOTE: This doesn't stop any functionality; if you dont have
+ * the ingress scheduler, you just can't add policies on ingress.
*
*/
static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
@@ -3800,7 +3802,7 @@ static void net_rx_action(struct softirq_action *h)
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
- * accidently calling ->poll() when NAPI is not scheduled.
+ * accidentally calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
@@ -4773,7 +4775,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
* is never reached
*/
WARN_ON(1);
- err = -EINVAL;
+ err = -ENOTTY;
break;
}
@@ -5041,7 +5043,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
/* Set the per device memory buffer space.
* Not applicable in our case */
case SIOCSIFLINK:
- return -EINVAL;
+ return -ENOTTY;
/*
* Unknown or private ioctl.
@@ -5062,7 +5064,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
/* Take care of Wireless Extensions */
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
return wext_handle_ioctl(net, &ifr, cmd, arg);
- return -EINVAL;
+ return -ENOTTY;
}
}
@@ -5184,33 +5186,37 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
/* Fix illegal checksum combinations */
if ((features & NETIF_F_HW_CSUM) &&
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- netdev_info(dev, "mixed HW and IP checksum settings.\n");
+ netdev_warn(dev, "mixed HW and IP checksum settings.\n");
features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
}
if ((features & NETIF_F_NO_CSUM) &&
(features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- netdev_info(dev, "mixed no checksumming and other settings.\n");
+ netdev_warn(dev, "mixed no checksumming and other settings.\n");
features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
}
/* Fix illegal SG+CSUM combinations. */
if ((features & NETIF_F_SG) &&
!(features & NETIF_F_ALL_CSUM)) {
- netdev_info(dev,
- "Dropping NETIF_F_SG since no checksum feature.\n");
+ netdev_dbg(dev,
+ "Dropping NETIF_F_SG since no checksum feature.\n");
features &= ~NETIF_F_SG;
}
/* TSO requires that SG is present as well. */
- if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
- netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
- features &= ~NETIF_F_TSO;
+ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
+ netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
+ features &= ~NETIF_F_ALL_TSO;
}
+ /* TSO ECN requires that TSO is present as well. */
+ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
+ features &= ~NETIF_F_TSO_ECN;
+
/* Software GSO depends on SG. */
if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
- netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
+ netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
features &= ~NETIF_F_GSO;
}
@@ -5220,13 +5226,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
if (!((features & NETIF_F_GEN_CSUM) ||
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- netdev_info(dev,
+ netdev_dbg(dev,
"Dropping NETIF_F_UFO since no checksum offload features.\n");
features &= ~NETIF_F_UFO;
}
if (!(features & NETIF_F_SG)) {
- netdev_info(dev,
+ netdev_dbg(dev,
"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
features &= ~NETIF_F_UFO;
}
@@ -5408,12 +5414,6 @@ int register_netdevice(struct net_device *dev)
dev->features |= NETIF_F_SOFT_FEATURES;
dev->wanted_features = dev->features & dev->hw_features;
- /* Avoid warning from netdev_fix_features() for GSO without SG */
- if (!(dev->wanted_features & NETIF_F_SG)) {
- dev->wanted_features &= ~NETIF_F_GSO;
- dev->features &= ~NETIF_F_GSO;
- }
-
/* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
* vlan_dev_init() will do the dev->features check, so these features
* are enabled only if supported by underlying device.
@@ -6336,7 +6336,7 @@ static void __net_exit default_device_exit(struct net *net)
if (dev->rtnl_link_ops)
continue;
- /* Push remaing network devices to init_net */
+ /* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 232b1873bb2..afb8afb066b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -425,7 +425,7 @@ EXPORT_SYMBOL(sk_run_filter);
* As we dont want to clear mem[] array for each packet going through
* sk_run_filter(), we check that filter loaded by user never try to read
* a cell if not previously written, and we check all branches to be sure
- * a malicious user doesnt try to abuse us.
+ * a malicious user doesn't try to abuse us.
*/
static int check_load_and_stores(struct sock_filter *filter, int flen)
{
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 01a1101b593..a7b34213186 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -129,7 +129,7 @@ static void linkwatch_schedule_work(int urgent)
if (!cancel_delayed_work(&linkwatch_work))
return;
- /* Otherwise we reschedule it again for immediate exection. */
+ /* Otherwise we reschedule it again for immediate execution. */
schedule_delayed_work(&linkwatch_work, 0);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 49f7ea5b4c7..d7c4bb4b182 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -196,7 +196,7 @@ EXPORT_SYMBOL_GPL(__rtnl_register);
* as failure of this function is very unlikely, it can only happen due
* to lack of memory when allocating the chain to store all message
* handlers for a protocol. Meant for use in init functions where lack
- * of memory implies no sense in continueing.
+ * of memory implies no sense in continuing.
*/
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func doit, rtnl_dumpit_func dumpit)
@@ -1440,7 +1440,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
errout:
if (err < 0 && modified && net_ratelimit())
printk(KERN_WARNING "A link change request failed with "
- "some changes comitted already. Interface %s may "
+ "some changes committed already. Interface %s may "
"have been left with an inconsistent configuration, "
"please check.\n", dev->name);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 801dd08908f..7ebeed0a877 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2267,7 +2267,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
* of bytes already consumed and the next call to
* skb_seq_read() will return the remaining part of the block.
*
- * Note 1: The size of each block of data returned can be arbitary,
+ * Note 1: The size of each block of data returned can be arbitrary,
* this limitation is the cost for zerocopy seqeuental
* reads of potentially non linear data.
*
diff --git a/net/core/sock.c b/net/core/sock.c
index 7dfed792434..6e819780c23 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -215,7 +215,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
-/* Maximal space eaten by iovec or ancilliary data plus some space */
+/* Maximal space eaten by iovec or ancillary data plus some space */
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
EXPORT_SYMBOL(sysctl_optmem_max);
@@ -1175,7 +1175,7 @@ static void __sk_free(struct sock *sk)
void sk_free(struct sock *sk)
{
/*
- * We substract one from sk_wmem_alloc and can know if
+ * We subtract one from sk_wmem_alloc and can know if
* some packets are still in some tx queue.
* If not null, sock_wfree() will call __sk_free(sk) later
*/
@@ -1185,10 +1185,10 @@ void sk_free(struct sock *sk)
EXPORT_SYMBOL(sk_free);
/*
- * Last sock_put should drop referrence to sk->sk_net. It has already
- * been dropped in sk_change_net. Taking referrence to stopping namespace
+ * Last sock_put should drop reference to sk->sk_net. It has already
+ * been dropped in sk_change_net. Taking reference to stopping namespace
* is not an option.
- * Take referrence to a socket to remove it from hash _alive_ and after that
+ * Take reference to a socket to remove it from hash _alive_ and after that
* destroy it in the context of init_net.
*/
void sk_release_kernel(struct sock *sk)
diff --git a/net/dccp/options.c b/net/dccp/options.c
index f06ffcfc8d7..4b2ab657ac8 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -123,6 +123,8 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R:
if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */
break;
+ if (len == 0)
+ goto out_invalid_option;
rc = dccp_feat_parse_options(sk, dreq, mandatory, opt,
*value, value + 1, len - 1);
if (rc)
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 784d3021054..136d41cbcd0 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -143,7 +143,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
}
/**
- * dccp_determine_ccmps - Find out about CCID-specfic packet-size limits
+ * dccp_determine_ccmps - Find out about CCID-specific packet-size limits
* We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
* since the RX CCID is restricted to feedback packets (Acks), which are small
* in comparison with the data traffic. A value of 0 means "no current CCMPS".
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 87bb5f4de0e..c53ded2a98d 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -41,12 +41,12 @@ config NET_DSA_MV88E6XXX_NEED_PPU
default n
config NET_DSA_MV88E6131
- bool "Marvell 88E6095/6095F/6131 ethernet switch chip support"
+ bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
select NET_DSA_MV88E6XXX
select NET_DSA_MV88E6XXX_NEED_PPU
select NET_DSA_TAG_DSA
---help---
- This enables support for the Marvell 88E6095/6095F/6131
+ This enables support for the Marvell 88E6085/6095/6095F/6131
ethernet switch chips.
config NET_DSA_MV88E6123_61_65
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c
index bb2b41bc854..45f7411e90b 100644
--- a/net/dsa/mv88e6131.c
+++ b/net/dsa/mv88e6131.c
@@ -14,6 +14,13 @@
#include "dsa_priv.h"
#include "mv88e6xxx.h"
+/*
+ * Switch product IDs
+ */
+#define ID_6085 0x04a0
+#define ID_6095 0x0950
+#define ID_6131 0x1060
+
static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
{
int ret;
@@ -21,9 +28,11 @@ static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
if (ret >= 0) {
ret &= 0xfff0;
- if (ret == 0x0950)
+ if (ret == ID_6085)
+ return "Marvell 88E6085";
+ if (ret == ID_6095)
return "Marvell 88E6095/88E6095F";
- if (ret == 0x1060)
+ if (ret == ID_6131)
return "Marvell 88E6131";
}
@@ -124,7 +133,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
* Ignore removed tag data on doubly tagged packets, disable
* flow control messages, force flow control priority to the
* highest, and send all special multicast frames to the CPU
- * port at the higest priority.
+ * port at the highest priority.
*/
REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
@@ -164,6 +173,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
int addr = REG_PORT(p);
u16 val;
@@ -171,10 +181,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* MAC Forcing register: don't force link, speed, duplex
* or flow control state to any particular values on physical
* ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * full duplex.
+ * (100 Mb/s on 6085) full duplex.
*/
if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- REG_WRITE(addr, 0x01, 0x003e);
+ if (ps->id == ID_6085)
+ REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
+ else
+ REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
else
REG_WRITE(addr, 0x01, 0x0003);
@@ -194,8 +207,15 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* mode, but do not enable forwarding of unknown unicasts.
*/
val = 0x0433;
- if (p == dsa_upstream_port(ds))
+ if (p == dsa_upstream_port(ds)) {
val |= 0x0104;
+ /*
+ * On 6085, unknown multicast forward is controlled
+ * here rather than in Port Control 2 register.
+ */
+ if (ps->id == ID_6085)
+ val |= 0x0008;
+ }
if (ds->dsa_port_mask & (1 << p))
val |= 0x0100;
REG_WRITE(addr, 0x04, val);
@@ -238,10 +258,19 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* If this is the upstream port for this switch, enable
* forwarding of unknown multicast addresses.
*/
- val = 0x0080 | dsa_upstream_port(ds);
- if (p == dsa_upstream_port(ds))
- val |= 0x0040;
- REG_WRITE(addr, 0x08, val);
+ if (ps->id == ID_6085)
+ /*
+ * on 6085, bits 3:0 are reserved, bit 6 control ARP
+ * mirroring, and multicast forward is handled in
+ * Port Control register.
+ */
+ REG_WRITE(addr, 0x08, 0x0080);
+ else {
+ val = 0x0080 | dsa_upstream_port(ds);
+ if (p == dsa_upstream_port(ds))
+ val |= 0x0040;
+ REG_WRITE(addr, 0x08, val);
+ }
/*
* Rate Control: disable ingress rate limiting.
@@ -286,6 +315,8 @@ static int mv88e6131_setup(struct dsa_switch *ds)
mv88e6xxx_ppu_state_init(ds);
mutex_init(&ps->stats_mutex);
+ ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
ret = mv88e6131_switch_reset(ds);
if (ret < 0)
return ret;
diff --git a/net/dsa/mv88e6xxx.h b/net/dsa/mv88e6xxx.h
index eb0e0aaa9f1..61156ca26a0 100644
--- a/net/dsa/mv88e6xxx.h
+++ b/net/dsa/mv88e6xxx.h
@@ -39,6 +39,8 @@ struct mv88e6xxx_priv_state {
* Hold this mutex over snapshot + dump sequences.
*/
struct mutex stats_mutex;
+
+ int id; /* switch product id */
};
struct mv88e6xxx_hw_stat {
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index ce2d3358285..5761185f884 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,3 @@
obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
af_802154-y := af_ieee802154.o raw.o dgram.o
-
-ccflags-y += -Wall -DDEBUG
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 094e150c626..a0af7ea8787 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -112,7 +112,7 @@ int cipso_v4_rbm_strictvalid = 1;
/* The maximum number of category ranges permitted in the ranged category tag
* (tag #5). You may note that the IETF draft states that the maximum number
* of category ranges is 7, but if the low end of the last category range is
- * zero then it is possibile to fit 8 category ranges because the zero should
+ * zero then it is possible to fit 8 category ranges because the zero should
* be omitted. */
#define CIPSO_V4_TAG_RNG_CAT_MAX 8
@@ -438,7 +438,7 @@ cache_add_failure:
*
* Description:
* Search the DOI definition list for a DOI definition with a DOI value that
- * matches @doi. The caller is responsibile for calling rcu_read_[un]lock().
+ * matches @doi. The caller is responsible for calling rcu_read_[un]lock().
* Returns a pointer to the DOI definition on success and NULL on failure.
*/
static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi)
@@ -1293,7 +1293,7 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
return ret_val;
/* This will send packets using the "optimized" format when
- * possibile as specified in section 3.4.2.6 of the
+ * possible as specified in section 3.4.2.6 of the
* CIPSO draft. */
if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10)
tag_len = 14;
@@ -1752,7 +1752,7 @@ validate_return:
}
/**
- * cipso_v4_error - Send the correct reponse for a bad packet
+ * cipso_v4_error - Send the correct response for a bad packet
* @skb: the packet
* @error: the error code
* @gateway: CIPSO gateway flag
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5345b0bee6d..cd9ca0811cf 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1680,7 +1680,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
return;
cnf->sysctl = NULL;
- unregister_sysctl_table(t->sysctl_header);
+ unregister_net_sysctl_table(t->sysctl_header);
kfree(t->dev_name);
kfree(t);
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index b92c86f6e9b..5fe9b8b41df 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -12,7 +12,7 @@
*
* Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
*
- * This work is based on the LPC-trie which is originally descibed in:
+ * This work is based on the LPC-trie which is originally described in:
*
* An experimental study of compression methods for dynamic tries
* Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
@@ -1978,9 +1978,6 @@ struct fib_table *fib_trie_table(u32 id)
t = (struct trie *) tb->tb_data;
memset(t, 0, sizeof(*t));
- if (id == RT_TABLE_LOCAL)
- pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION);
-
return tb;
}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index a91dc161108..e5f8a71d3a2 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -704,7 +704,7 @@ static void icmp_unreach(struct sk_buff *skb)
*/
/*
- * Check the other end isnt violating RFC 1122. Some routers send
+ * Check the other end isn't violating RFC 1122. Some routers send
* bogus responses to broadcast frames. If you see this message
* first check your netmask matches at both ends, if it does then
* get the other vendor to fix their kit.
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6c0b7f4a3d7..38f23e721b8 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
- ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
+ sk2->sk_state == TCP_LISTEN) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,8 +122,7 @@ again:
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
- if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
- !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+ if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dd1b20eca1a..9df4e635fb5 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -354,7 +354,8 @@ static void inetpeer_free_rcu(struct rcu_head *head)
}
/* May be called with local BH enabled. */
-static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
+static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
int do_free;
@@ -368,7 +369,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
* We use refcnt=-1 to alert lockless readers this entry is deleted.
*/
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
- struct inet_peer __rcu **stack[PEER_MAXDEPTH];
struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(&p->daddr, stack, base) != p)
BUG();
@@ -422,7 +422,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p)
}
/* May be called with local BH enabled. */
-static int cleanup_once(unsigned long ttl)
+static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
struct inet_peer *p = NULL;
@@ -454,7 +454,7 @@ static int cleanup_once(unsigned long ttl)
* happen because of entry limits in route cache. */
return -1;
- unlink_from_pool(p, peer_to_base(p));
+ unlink_from_pool(p, peer_to_base(p), stack);
return 0;
}
@@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
if (base->total >= inet_peer_threshold)
/* Remove one less-recently-used entry. */
- cleanup_once(0);
+ cleanup_once(0, stack);
return p;
}
@@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy)
{
unsigned long now = jiffies;
int ttl, total;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH];
total = compute_total();
if (total >= inet_peer_threshold)
@@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy)
ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
total / inet_peer_threshold * HZ;
- while (!cleanup_once(ttl)) {
+ while (!cleanup_once(ttl, stack)) {
if (jiffies != now)
break;
}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a1151b8adf3..b1d282f11be 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -223,31 +223,30 @@ static void ip_expire(unsigned long arg)
if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
struct sk_buff *head = qp->q.fragments;
+ const struct iphdr *iph;
+ int err;
rcu_read_lock();
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
goto out_rcu_unlock;
+ /* skb dst is stale, drop it, and perform route lookup again */
+ skb_dst_drop(head);
+ iph = ip_hdr(head);
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+ if (err)
+ goto out_rcu_unlock;
+
/*
- * Only search router table for the head fragment,
- * when defraging timeout at PRE_ROUTING HOOK.
+ * Only an end host needs to send an ICMP
+ * "Fragment Reassembly Timeout" message, per RFC792.
*/
- if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
- const struct iphdr *iph = ip_hdr(head);
- int err = ip_route_input(head, iph->daddr, iph->saddr,
- iph->tos, head->dev);
- if (unlikely(err))
- goto out_rcu_unlock;
-
- /*
- * Only an end host needs to send an ICMP
- * "Fragment Reassembly Timeout" message, per RFC792.
- */
- if (skb_rtable(head)->rt_type != RTN_LOCAL)
- goto out_rcu_unlock;
+ if (qp->user == IP_DEFRAG_CONNTRACK_IN &&
+ skb_rtable(head)->rt_type != RTN_LOCAL)
+ goto out_rcu_unlock;
- }
/* Send an ICMP "Fragment Reassembly Timeout" message. */
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 28a736f3442..2391b24e825 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -329,7 +329,7 @@ int ip_options_compile(struct net *net,
pp_ptr = optptr + 2;
goto error;
}
- if (skb) {
+ if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
opt->is_changed = 1;
}
@@ -371,7 +371,7 @@ int ip_options_compile(struct net *net,
goto error;
}
opt->ts = optptr - iph;
- if (skb) {
+ if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
timeptr = (__be32*)&optptr[optptr[2]+3];
}
@@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
unsigned long orefdst;
int err;
- if (!opt->srr)
+ if (!opt->srr || !rt)
return 0;
if (skb->pkt_type != PACKET_HOST)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 67f241b9764..459c011b1d4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -603,7 +603,7 @@ slow_path:
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
len = mtu;
- /* IF: we are not sending upto and including the packet end
+ /* IF: we are not sending up to and including the packet end
then align the next start on an eight byte boundary */
if (len < left) {
len &= ~7;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 2b097752426..cbff2ecccf3 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1444,7 +1444,7 @@ static int __init ip_auto_config(void)
root_server_addr = addr;
/*
- * Use defaults whereever applicable.
+ * Use defaults wherever applicable.
*/
if (ic_defaults() < 0)
return -1;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 4b5d457c2d7..89bc7e66d59 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -76,7 +76,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
}
/*
- * Unfortunatly, _b and _mask are not aligned to an int (or long int)
+ * Unfortunately, _b and _mask are not aligned to an int (or long int)
* Some arches dont care, unrolling the loop is a win on them.
* For other arches, we only have a 16bit alignement.
*/
@@ -1874,7 +1874,7 @@ static int __init arp_tables_init(void)
if (ret < 0)
goto err1;
- /* Noone else will be downing sem now, so we won't sleep */
+ /* No one else will be downing sem now, so we won't sleep */
ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
if (ret < 0)
goto err2;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index ffcea0d1678..70491502800 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -2233,7 +2233,7 @@ static int __init ip_tables_init(void)
if (ret < 0)
goto err1;
- /* Noone else will be downing sem now, so we won't sleep */
+ /* No one else will be downing sem now, so we won't sleep */
ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
if (ret < 0)
goto err2;
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 21bcf471b25..9c71b2755ce 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -521,7 +521,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
}
EXPORT_SYMBOL(nf_nat_protocol_register);
-/* Noone stores the protocol anywhere; simply delete it. */
+/* No one stores the protocol anywhere; simply delete it. */
void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
{
spin_lock_bh(&nf_nat_lock);
@@ -532,7 +532,7 @@ void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
}
EXPORT_SYMBOL(nf_nat_protocol_unregister);
-/* Noone using conntrack by the time this called. */
+/* No one using conntrack by the time this called. */
static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
{
struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 2d3c72e5bbb..bceaec42c37 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -622,7 +622,7 @@ do_confirm:
static void raw_close(struct sock *sk, long timeout)
{
/*
- * Raw sockets may have direct kernel refereneces. Kill them.
+ * Raw sockets may have direct kernel references. Kill them.
*/
ip_ra_control(sk, 0, NULL);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4b0c8118080..99e6e4bb1c7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -821,7 +821,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
}
/*
- * Pertubation of rt_genid by a small quantity [1..256]
+ * Perturbation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
* many times (2^24) without giving recent rt_genid.
* Jenkins hash is strong enough that litle changes of rt_genid are OK.
@@ -1191,7 +1191,7 @@ restart:
#endif
/*
* Since lookup is lockfree, we must make sure
- * previous writes to rt are comitted to memory
+ * previous writes to rt are committed to memory
* before making rt visible to other CPUS.
*/
rcu_assign_pointer(rt_hash_table[hash].chain, rt);
@@ -1891,6 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
+ rth->rt_route_iif = dev->ifindex;
rth->rt_iif = dev->ifindex;
rth->dst.dev = init_net.loopback_dev;
dev_hold(rth->dst.dev);
@@ -2026,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_key_src = saddr;
rth->rt_src = saddr;
rth->rt_gateway = daddr;
+ rth->rt_route_iif = in_dev->dev->ifindex;
rth->rt_iif = in_dev->dev->ifindex;
rth->dst.dev = (out_dev)->dev;
dev_hold(rth->dst.dev);
@@ -2202,6 +2204,7 @@ local_input:
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
+ rth->rt_route_iif = dev->ifindex;
rth->rt_iif = dev->ifindex;
rth->dst.dev = net->loopback_dev;
dev_hold(rth->dst.dev);
@@ -2401,7 +2404,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_mark = oldflp4->flowi4_mark;
rth->rt_dst = fl4->daddr;
rth->rt_src = fl4->saddr;
- rth->rt_iif = 0;
+ rth->rt_route_iif = 0;
+ rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex;
/* get references to the devices that are to be hold by the routing
cache entry */
rth->dst.dev = dev_out;
@@ -2686,6 +2690,12 @@ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
+static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
+ unsigned long old)
+{
+ return NULL;
+}
+
static struct dst_ops ipv4_dst_blackhole_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
@@ -2694,6 +2704,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.default_mtu = ipv4_blackhole_default_mtu,
.default_advmss = ipv4_default_advmss,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
+ .cow_metrics = ipv4_rt_blackhole_cow_metrics,
};
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
@@ -2716,6 +2727,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_key_dst = ort->rt_key_dst;
rt->rt_key_src = ort->rt_key_src;
rt->rt_tos = ort->rt_tos;
+ rt->rt_route_iif = ort->rt_route_iif;
rt->rt_iif = ort->rt_iif;
rt->rt_oif = ort->rt_oif;
rt->rt_mark = ort->rt_mark;
@@ -2725,7 +2737,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_type = ort->rt_type;
rt->rt_dst = ort->rt_dst;
rt->rt_src = ort->rt_src;
- rt->rt_iif = ort->rt_iif;
rt->rt_gateway = ort->rt_gateway;
rt->rt_spec_dst = ort->rt_spec_dst;
rt->peer = ort->peer;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1a456652086..321e6e84dbc 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_do_large_bitmap,
},
-#ifdef CONFIG_IP_MULTICAST
{
.procname = "igmp_max_memberships",
.data = &sysctl_igmp_max_memberships,
@@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
-
-#endif
{
.procname = "igmp_max_msf",
.data = &sysctl_igmp_max_msf,
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 34340c9c95f..f376b05cca8 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -93,6 +93,7 @@ struct bictcp {
u32 ack_cnt; /* number of acks */
u32 tcp_cwnd; /* estimated tcp cwnd */
#define ACK_RATIO_SHIFT 4
+#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
u8 sample_cnt; /* number of samples to decide curr_rtt */
u8 found; /* the exit point is found? */
@@ -398,8 +399,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
u32 delay;
if (icsk->icsk_ca_state == TCP_CA_Open) {
- cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
- ca->delayed_ack += cnt;
+ u32 ratio = ca->delayed_ack;
+
+ ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
+ ratio += cnt;
+
+ ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
}
/* Some calls are for duplicates without timetamps */
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 656d431c99a..72f7218b03f 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -12,7 +12,7 @@
* within cong_avoid.
* o Error correcting in remote HZ, therefore remote HZ will be keeped
* on checking and updating.
- * o Handling calculation of One-Way-Delay (OWD) within rtt_sample, sicne
+ * o Handling calculation of One-Way-Delay (OWD) within rtt_sample, since
* OWD have a similar meaning as RTT. Also correct the buggy formular.
* o Handle reaction for Early Congestion Indication (ECI) within
* pkts_acked, as mentioned within pseudo code.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dfa5beb0c1c..17388c7f49c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -73,7 +73,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
tcp_advance_send_head(sk, skb);
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
- /* Don't override Nagle indefinately with F-RTO */
+ /* Don't override Nagle indefinitely with F-RTO */
if (tp->frto_counter == 2)
tp->frto_counter = 3;
@@ -1003,7 +1003,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
int nlen;
u8 flags;
- BUG_ON(len > skb->len);
+ if (WARN_ON(len > skb->len))
+ return -EINVAL;
nsize = skb_headlen(skb) - len;
if (nsize < 0)
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index dc7f43179c9..05c3b6f0e8e 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -20,7 +20,7 @@
#define TCP_YEAH_DELTA 3 //log minimum fraction of cwnd to be removed on loss
#define TCP_YEAH_EPSILON 1 //log maximum fraction to be removed on early decongestion
#define TCP_YEAH_PHY 8 //lin maximum delta from base
-#define TCP_YEAH_RHO 16 //lin minumum number of consecutive rtt to consider competition on loss
+#define TCP_YEAH_RHO 16 //lin minimum number of consecutive rtt to consider competition on loss
#define TCP_YEAH_ZETA 50 //lin minimum number of state switchs to reset reno_count
#define TCP_SCALABLE_AI_CNT 100U
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 588f47af5fa..f87a8eb76f3 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -189,7 +189,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
* @sk: socket struct in question
* @snum: port number to look up
* @saddr_comp: AF-dependent comparison of bound local IP addresses
- * @hash2_nulladdr: AF-dependant hash value in secondary hash chains,
+ * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
* with NULL address
*/
int udp_lib_get_port(struct sock *sk, unsigned short snum,
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 571aa96a175..2d51840e53a 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
}
EXPORT_SYMBOL(xfrm4_prepare_output);
-static int xfrm4_output_finish(struct sk_buff *skb)
+int xfrm4_output_finish(struct sk_buff *skb)
{
#ifdef CONFIG_NETFILTER
if (!skb_dst(skb)->xfrm) {
@@ -86,7 +86,11 @@ static int xfrm4_output_finish(struct sk_buff *skb)
int xfrm4_output(struct sk_buff *skb)
{
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x = dst->xfrm;
+
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
- NULL, skb_dst(skb)->dev, xfrm4_output_finish,
+ NULL, dst->dev,
+ x->outer_mode->afinfo->output_finish,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 13e0e7f659f..d20a05e970d 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -74,6 +74,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
rt->rt_key_dst = fl4->daddr;
rt->rt_key_src = fl4->saddr;
rt->rt_tos = fl4->flowi4_tos;
+ rt->rt_route_iif = fl4->flowi4_iif;
rt->rt_iif = fl4->flowi4_iif;
rt->rt_oif = fl4->flowi4_oif;
rt->rt_mark = fl4->flowi4_mark;
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1717c64628d..805d63ef434 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -78,6 +78,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
.init_tempsel = __xfrm4_init_tempsel,
.init_temprop = xfrm4_init_temprop,
.output = xfrm4_output,
+ .output_finish = xfrm4_output_finish,
.extract_input = xfrm4_extract_input,
.extract_output = xfrm4_extract_output,
.transport_finish = xfrm4_transport_finish,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3daaf3c7703..a7bda075705 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1084,7 +1084,7 @@ static int ipv6_get_saddr_eval(struct net *net,
case IPV6_SADDR_RULE_PRIVACY:
{
/* Rule 7: Prefer public address
- * Note: prefer temprary address if use_tempaddr >= 2
+ * Note: prefer temporary address if use_tempaddr >= 2
*/
int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
@@ -1968,7 +1968,7 @@ ok:
* to the stored lifetime since we'll
* be updating the timestamp below,
* else we'll set it back to the
- * minumum.
+ * minimum.
*/
if (prefered_lft != ifp->prefered_lft) {
valid_lft = stored_lft;
@@ -4537,7 +4537,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
t = p->sysctl;
p->sysctl = NULL;
- unregister_sysctl_table(t->sysctl_header);
+ unregister_net_sysctl_table(t->sysctl_header);
kfree(t->dev_name);
kfree(t);
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 4b13d5d8890..afcc7099f96 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1113,7 +1113,7 @@ static int __init inet6_init(void)
/*
* ipngwg API draft makes clear that the correct semantics
* for TCP and UDP is to consider one TCP and UDP instance
- * in a host availiable by both INET and INET6 APIs and
+ * in a host available by both INET and INET6 APIs and
* able to communicate via both network protocols.
*/
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5aa8ec88f19..59dccfbb5b1 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -371,7 +371,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
asg = esp_req_sg(aead, req);
- sg = asg + 1;
+ sg = asg + sglists;
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 16605465046..f2c5b0fc0f2 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
(!sk->sk_reuse || !sk2->sk_reuse ||
- ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
+ sk2->sk_state == TCP_LISTEN) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 18208876aa8..46cf7bea676 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -779,7 +779,7 @@ slow_path:
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
len = mtu;
- /* IF: we are not sending upto and including the packet end
+ /* IF: we are not sending up to and including the packet end
then align the next start on an eight byte boundary */
if (len < left) {
len &= ~7;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0b2af9b85ce..5a1c6f27ffa 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -2248,7 +2248,7 @@ static int __init ip6_tables_init(void)
if (ret < 0)
goto err1;
- /* Noone else will be downing sem now, so we won't sleep */
+ /* No one else will be downing sem now, so we won't sleep */
ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
if (ret < 0)
goto err2;
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 28e74488a32..a5a4c5dd539 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -45,6 +45,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
int tcphoff, needs_ack;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
struct ipv6hdr *ip6h;
+#define DEFAULT_TOS_VALUE 0x0U
+ const __u8 tclass = DEFAULT_TOS_VALUE;
struct dst_entry *dst = NULL;
u8 proto;
struct flowi6 fl6;
@@ -124,7 +126,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
skb_put(nskb, sizeof(struct ipv6hdr));
skb_reset_network_header(nskb);
ip6h = ipv6_hdr(nskb);
- ip6h->version = 6;
+ *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20));
ip6h->hop_limit = ip6_dst_hoplimit(dst);
ip6h->nexthdr = IPPROTO_TCP;
ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 97c5b21b967..cdd6d045e42 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -71,7 +71,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
if (reasm == NULL)
return NF_STOLEN;
- /* error occured or not fragmented */
+ /* error occurred or not fragmented */
if (reasm == skb)
return NF_ACCEPT;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 843406f14d7..fd0eec6f88c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -153,6 +153,12 @@ static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
+static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
+ unsigned long old)
+{
+ return NULL;
+}
+
static struct dst_ops ip6_dst_blackhole_ops = {
.family = AF_INET6,
.protocol = cpu_to_be16(ETH_P_IPV6),
@@ -161,6 +167,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.default_mtu = ip6_blackhole_default_mtu,
.default_advmss = ip6_default_advmss,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
+ .cow_metrics = ip6_rt_blackhole_cow_metrics,
};
static const u32 ip6_template_metrics[RTAX_MAX] = {
@@ -2012,7 +2019,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
rt->dst.output = ip6_output;
rt->rt6i_dev = net->loopback_dev;
rt->rt6i_idev = idev;
- dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1);
rt->dst.obsolete = -1;
rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2b0c186862c..4f49e5dd41b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -503,6 +503,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
+ dst = NULL;
goto done;
}
skb = tcp_make_synack(sk, dst, req, rvp);
@@ -1621,6 +1622,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
@@ -1648,7 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
__kfree_skb(opt_skb);
return 0;
}
- }
+ } else
+ sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d7037c006e1..9e305d74b3d 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -505,6 +505,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
int rc;
int is_udplite = IS_UDPLITE(sk);
+ if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+ sock_rps_save_rxhash(sk, skb->rxhash);
+
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
@@ -1332,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */
- if ((skb_headroom(skb) < frag_hdr_sz) &&
+ if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
goto out;
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8e688b3de9a..49a91c5f562 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -79,7 +79,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
}
EXPORT_SYMBOL(xfrm6_prepare_output);
-static int xfrm6_output_finish(struct sk_buff *skb)
+int xfrm6_output_finish(struct sk_buff *skb)
{
#ifdef CONFIG_NETFILTER
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
@@ -97,9 +97,9 @@ static int __xfrm6_output(struct sk_buff *skb)
if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)))) {
- return ip6_fragment(skb, xfrm6_output_finish);
+ return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
}
- return xfrm6_output_finish(skb);
+ return x->outer_mode->afinfo->output_finish(skb);
}
int xfrm6_output(struct sk_buff *skb)
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index afe941e9415..248f0b2a7ee 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -178,6 +178,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
.tmpl_sort = __xfrm6_tmpl_sort,
.state_sort = __xfrm6_state_sort,
.output = xfrm6_output,
+ .output_finish = xfrm6_output_finish,
.extract_input = xfrm6_extract_input,
.extract_output = xfrm6_extract_output,
.transport_finish = xfrm6_transport_finish,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index c9890e25cd4..cc616974a44 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
MSG_NOSIGNAL)) {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
lock_sock(sk);
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 783c5f367d2..005b424494a 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -165,7 +165,7 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
irlap_apply_default_connection_parameters(self);
- self->N3 = 3; /* # connections attemts to try before giving up */
+ self->N3 = 3; /* # connections attempts to try before giving up */
self->state = LAP_NDM;
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index d434c888074..bb47021c9a5 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -708,7 +708,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
self->frame_sent = TRUE;
}
- /* Readjust our timer to accomodate devices
+ /* Readjust our timer to accommodate devices
* doing faster or slower discovery than us...
* Jean II */
irlap_start_query_timer(self, info->S, info->s);
@@ -931,7 +931,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
irlap_send_rr_frame(self, CMD_FRAME);
/* The timer is set to half the normal timer to quickly
- * detect a failure to negociate the new connection
+ * detect a failure to negotiate the new connection
* parameters. IrLAP 6.11.3.2, note 3.
* Note that currently we don't process this failure
* properly, as we should do a quick disconnect.
@@ -1052,7 +1052,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
return -EPROTO;
}
- /* Substract space used by this skb */
+ /* Subtract space used by this skb */
self->bytes_left -= skb->len;
#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
/* Window has been adjusted for the max packet
@@ -1808,7 +1808,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
return -EPROTO; /* Try again later */
}
- /* Substract space used by this skb */
+ /* Subtract space used by this skb */
self->bytes_left -= skb->len;
#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
/* Window has been adjusted for the max packet
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 688222cbf55..8c004161a84 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -848,7 +848,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
* though IrLAP is currently sending the *last* frame of the
* tx-window, the driver most likely has only just started
* sending the *first* frame of the same tx-window.
- * I.e. we are always at the very begining of or Tx window.
+ * I.e. we are always at the very beginning of or Tx window.
* Now, we are supposed to set the final timer from the end
* of our tx-window to let the other peer reply. So, we need
* to add extra time to compensate for the fact that we
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index c1fb5db8104..9505a7d06f1 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -498,7 +498,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
switch (event) {
#ifdef CONFIG_IRDA_ULTRA
case LM_UDATA_INDICATION:
- /* This is most bizzare. Those packets are aka unreliable
+ /* This is most bizarre. Those packets are aka unreliable
* connected, aka IrLPT or SOCK_DGRAM/IRDAPROTO_UNITDATA.
* Why do we pass them as Ultra ??? Jean II */
irlmp_connless_data_indication(self, skb);
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 0d82ff5aeff..979ecb2435a 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -73,7 +73,7 @@
* Infinite thanks to those brave souls for providing the infrastructure
* upon which IrNET is built.
*
- * Thanks to all my collegues in HP for helping me. In particular,
+ * Thanks to all my colleagues in HP for helping me. In particular,
* thanks to Salil Pradhan and Bill Serra for W2k testing...
* Thanks to Luiz Magalhaes for irnetd and much testing...
*
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 849aaf0dabb..9715e6e5900 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -40,7 +40,7 @@
* o the hash function for ints is pathetic (but could be changed)
* o locking is sometime suspicious (especially during enumeration)
* o most users have only a few elements (== overhead)
- * o most users never use seach, so don't benefit from hashing
+ * o most users never use search, so don't benefit from hashing
* Problem already fixed :
* o not 64 bit compliant (most users do hashv = (int) self)
* o hashbin_remove() is broken => use hashbin_remove_this()
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index f6054f9ccbe..9d9af460697 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1193,7 +1193,7 @@ EXPORT_SYMBOL(irttp_connect_request);
/*
* Function irttp_connect_confirm (handle, qos, skb)
*
- * Sevice user confirms TSAP connection with peer.
+ * Service user confirms TSAP connection with peer.
*
*/
static void irttp_connect_confirm(void *instance, void *sap,
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 2b00974e5ba..1b51bcf4239 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -39,16 +39,16 @@
#include <net/irda/irlap_frame.h>
/*
- * Maximum values of the baud rate we negociate with the other end.
+ * Maximum values of the baud rate we negotiate with the other end.
* Most often, you don't have to change that, because Linux-IrDA will
* use the maximum offered by the link layer, which usually works fine.
* In some very rare cases, you may want to limit it to lower speeds...
*/
int sysctl_max_baud_rate = 16000000;
/*
- * Maximum value of the lap disconnect timer we negociate with the other end.
+ * Maximum value of the lap disconnect timer we negotiate with the other end.
* Most often, the value below represent the best compromise, but some user
- * may want to keep the LAP alive longuer or shorter in case of link failure.
+ * may want to keep the LAP alive longer or shorter in case of link failure.
* Remember that the threshold time (early warning) is fixed to 3s...
*/
int sysctl_max_noreply_time = 12;
@@ -411,7 +411,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
* Fix tx data size according to user limits - Jean II
*/
if (qos->data_size.value > sysctl_max_tx_data_size)
- /* Allow non discrete adjustement to avoid loosing capacity */
+ /* Allow non discrete adjustement to avoid losing capacity */
qos->data_size.value = sysctl_max_tx_data_size;
/*
* Override Tx window if user request it. - Jean II
diff --git a/net/irda/timer.c b/net/irda/timer.c
index 0335ba0cc59..f418cb2ad49 100644
--- a/net/irda/timer.c
+++ b/net/irda/timer.c
@@ -59,7 +59,7 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s)
* slot time, plus add some extra time to properly receive the last
* discovery packet (which is longer due to extra discovery info),
* to avoid messing with for incomming connections requests and
- * to accomodate devices that perform discovery slower than us.
+ * to accommodate devices that perform discovery slower than us.
* Jean II */
timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s)
+ XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 9637e45744f..986b2a5e876 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -250,7 +250,7 @@ static struct device *af_iucv_dev;
* PRMDATA[0..6] socket data (max 7 bytes);
* PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
*
- * The socket data length is computed by substracting the socket data length
+ * The socket data length is computed by subtracting the socket data length
* value from 0xFF.
* If the socket data len is greater 7, then PRMDATA can be used for special
* notifications (see iucv_sock_shutdown); and further,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 1ee5dab3cfa..8f156bd86be 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -735,7 +735,7 @@ static void iucv_cleanup_queue(void)
struct iucv_irq_list *p, *n;
/*
- * When a path is severed, the pathid can be reused immediatly
+ * When a path is severed, the pathid can be reused immediately
* on a iucv connect or a connection pending interrupt. Remove
* all entries from the task queue that refer to a stale pathid
* (iucv_path_table[ix] == NULL). Only then do the iucv connect
@@ -807,7 +807,7 @@ void iucv_unregister(struct iucv_handler *handler, int smp)
spin_lock_bh(&iucv_table_lock);
/* Remove handler from the iucv_handler_list. */
list_del_init(&handler->list);
- /* Sever all pathids still refering to the handler. */
+ /* Sever all pathids still referring to the handler. */
list_for_each_entry_safe(p, n, &handler->paths, list) {
iucv_sever_pathid(p->pathid, NULL);
iucv_path_table[p->pathid] = NULL;
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 058f1e9a912..90324211131 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -121,8 +121,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
s32 data_size = ntohs(pdulen) - llc_len;
if (data_size < 0 ||
- ((skb_tail_pointer(skb) -
- (u8 *)pdu) - llc_len) < data_size)
+ !pskb_may_pull(skb, data_size))
return 0;
if (unlikely(pskb_trim_rcsum(skb, data_size)))
return 0;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 334213571ad..44049733c4e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1504,6 +1504,8 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode old_req;
int err;
+ lockdep_assert_held(&sdata->u.mgd.mtx);
+
old_req = sdata->u.mgd.req_smps;
sdata->u.mgd.req_smps = smps_mode;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index dacace6b139..9ea7c0d0103 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -177,9 +177,9 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
- mutex_lock(&local->iflist_mtx);
+ mutex_lock(&sdata->u.mgd.mtx);
err = __ieee80211_request_smps(sdata, smps_mode);
- mutex_unlock(&local->iflist_mtx);
+ mutex_unlock(&sdata->u.mgd.mtx);
return err;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index a4040170142..c18396c248d 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -97,7 +97,7 @@ struct ieee80211_bss {
size_t supp_rates_len;
/*
- * During assocation, we save an ERP value from a probe response so
+ * During association, we save an ERP value from a probe response so
* that we can feed ERP info to the driver when handling the
* association completes. these fields probably won't be up-to-date
* otherwise, you probably don't want to use them.
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 8d65b47d983..336ca9d0c5c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -628,7 +628,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
*
* @mpath: mesh path whose queue has to be freed
*
- * Locking: the function must me called withing a rcu_read_lock region
+ * Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index dbdebeda097..c06aa3ac6b9 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -259,7 +259,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
}
}
- /* try to sample up to half of the availble rates during each interval */
+ /* try to sample up to half of the available rates during each interval */
mi->sample_count *= 4;
cur_prob = 0;
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h
index 6510f8ee738..19111c7bf45 100644
--- a/net/mac80211/rc80211_pid.h
+++ b/net/mac80211/rc80211_pid.h
@@ -77,7 +77,7 @@ union rc_pid_event_data {
};
struct rc_pid_event {
- /* The time when the event occured */
+ /* The time when the event occurred */
unsigned long timestamp;
/* Event ID number */
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index aa5cc37b492..c5d4530d828 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -381,7 +381,7 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
* specs were sane enough this time around to require padding each A-MSDU
* subframe to a length that is a multiple of four.
*
- * Padding like Atheros hardware adds which is inbetween the 802.11 header and
+ * Padding like Atheros hardware adds which is between the 802.11 header and
* the payload is not supported, the driver is required to move the 802.11
* header to be directly in front of the payload in that case.
*/
@@ -2541,7 +2541,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
* same TID from the same station
*/
rx->skb = skb;
- rx->flags = 0;
CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data)
@@ -2612,6 +2611,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
.sdata = sta->sdata,
.local = sta->local,
.queue = tid,
+ .flags = 0,
};
struct tid_ampdu_rx *tid_agg_rx;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index d0311a322dd..13e8c30adf0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -47,9 +47,9 @@
* Station entries are added by mac80211 when you establish a link with a
* peer. This means different things for the different type of interfaces
* we support. For a regular station this mean we add the AP sta when we
- * receive an assocation response from the AP. For IBSS this occurs when
+ * receive an association response from the AP. For IBSS this occurs when
* get to know about a peer on the same IBSS. For WDS we add the sta for
- * the peer imediately upon device open. When using AP mode we add stations
+ * the peer immediately upon device open. When using AP mode we add stations
* for each respective station upon request from userspace through nl80211.
*
* In order to remove a STA info structure, various sta_info_destroy_*()
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 57681149e37..b2f95966c7f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -173,7 +173,7 @@ struct sta_ampdu_mlme {
/**
* enum plink_state - state of a mesh peer link finite state machine
*
- * @PLINK_LISTEN: initial state, considered the implicit state of non existant
+ * @PLINK_LISTEN: initial state, considered the implicit state of non existent
* mesh peer links
* @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer
* @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ce4596ed126..bd1224fd216 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -237,6 +237,10 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
&local->dynamic_ps_disable_work);
}
+ /* Don't restart the timer if we're not disassociated */
+ if (!ifmgd->associated)
+ return TX_CONTINUE;
+
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index d87e03bc8ef..72d1ac611fd 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -918,7 +918,7 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
to = ip_set_list[to_id];
/* Features must not change.
- * Not an artifical restriction anymore, as we must prevent
+ * Not an artificial restriction anymore, as we must prevent
* possible loops created by swapping in setlist type of sets. */
if (!(from->type->features == to->type->features &&
from->type->family == to->type->family))
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 51f3af7c474..059af3120be 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -572,7 +572,7 @@ static const struct file_operations ip_vs_app_fops = {
.open = ip_vs_app_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_net,
};
#endif
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 5092505f27a..bf28ac2fc99 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -595,7 +595,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
atomic_inc(&dest->inactconns);
} else {
/* It is a persistent connection/template, so increase
- the peristent connection counter */
+ the persistent connection counter */
atomic_inc(&dest->persistconns);
}
@@ -657,7 +657,7 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
}
} else {
/* It is a persistent connection/template, so decrease
- the peristent connection counter */
+ the persistent connection counter */
atomic_dec(&dest->persistconns);
}
@@ -1046,7 +1046,7 @@ static const struct file_operations ip_vs_conn_fops = {
.open = ip_vs_conn_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_net,
};
static const char *ip_vs_origin_name(unsigned flags)
@@ -1114,7 +1114,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
.open = ip_vs_conn_sync_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_net,
};
#endif
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index ea722810faf..37890f228b1 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2066,7 +2066,7 @@ static const struct file_operations ip_vs_info_fops = {
.open = ip_vs_info_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
+ .release = seq_release_net,
};
#endif
@@ -2109,7 +2109,7 @@ static const struct file_operations ip_vs_stats_fops = {
.open = ip_vs_stats_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = single_release_net,
};
static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
@@ -2178,7 +2178,7 @@ static const struct file_operations ip_vs_stats_percpu_fops = {
.open = ip_vs_stats_percpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = single_release_net,
};
#endif
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index f276df9896b..87e40ea77a9 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -131,7 +131,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
{
list_del(&en->list);
/*
- * We don't kfree dest because it is refered either by its service
+ * We don't kfree dest because it is referred either by its service
* or the trash dest list.
*/
atomic_dec(&en->dest->refcnt);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index cb1c9913d38..90f618ab6dd 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -152,7 +152,7 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
write_lock(&set->lock);
list_for_each_entry_safe(e, ep, &set->list, list) {
/*
- * We don't kfree dest because it is refered either
+ * We don't kfree dest because it is referred either
* by its service or by the trash dest list.
*/
atomic_dec(&e->dest->refcnt);
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index b027ccc49f4..d12ed53ec95 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -566,7 +566,7 @@ static struct ipvs_sctp_nextstate
* SHUTDOWN sent from the client, waitinf for SHUT ACK from the server
*/
/*
- * We recieved the data chuck, keep the state unchanged. I assume
+ * We received the data chuck, keep the state unchanged. I assume
* that still data chuncks can be received by both the peers in
* SHUDOWN state
*/
@@ -633,7 +633,7 @@ static struct ipvs_sctp_nextstate
* SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client
*/
/*
- * We recieved the data chuck, keep the state unchanged. I assume
+ * We received the data chuck, keep the state unchanged. I assume
* that still data chuncks can be received by both the peers in
* SHUDOWN state
*/
@@ -701,7 +701,7 @@ static struct ipvs_sctp_nextstate
* SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server
*/
/*
- * We recieved the data chuck, keep the state unchanged. I assume
+ * We received the data chuck, keep the state unchanged. I assume
* that still data chuncks can be received by both the peers in
* SHUDOWN state
*/
@@ -771,7 +771,7 @@ static struct ipvs_sctp_nextstate
* SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client
*/
/*
- * We recieved the data chuck, keep the state unchanged. I assume
+ * We received the data chuck, keep the state unchanged. I assume
* that still data chuncks can be received by both the peers in
* SHUDOWN state
*/
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 941286ca911..2e1c11f7841 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -453,7 +453,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
REJECT will give spurious warnings here. */
/* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
- /* No external references means noone else could have
+ /* No external references means no one else could have
confirmed us. */
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
pr_debug("Confirming conntrack %p\n", ct);
@@ -901,7 +901,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
&dataoff, &protonum);
if (ret <= 0) {
- pr_debug("not prepared to track yet or error occured\n");
+ pr_debug("not prepared to track yet or error occurred\n");
NF_CT_STAT_INC_ATOMIC(net, error);
NF_CT_STAT_INC_ATOMIC(net, invalid);
ret = -ret;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 9ae57c57c50..2e664a69d7d 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -98,7 +98,7 @@ static const char * const dccp_state_names[] = {
#define sIV CT_DCCP_INVALID
/*
- * DCCP state transistion table
+ * DCCP state transition table
*
* The assumption is the same as for TCP tracking:
*
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 6f4ee70f460..6772b115465 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -107,9 +107,9 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/
+/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/
/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */
+/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */
/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
},
{
@@ -121,7 +121,7 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */
+/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */
/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index c05c0dc3349..cb5a2858178 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -707,7 +707,7 @@ static const char *ct_sdp_header_search(const char *dptr, const char *limit,
}
/* Locate a SDP header (optionally a substring within the header value),
- * optionally stopping at the first occurence of the term header, parse
+ * optionally stopping at the first occurrence of the term header, parse
* it and return the offset and length of the data we're interested in.
*/
int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5ab22e2bbd7..5b466cd1272 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -134,7 +134,7 @@ static int __nf_queue(struct sk_buff *skb,
const struct nf_afinfo *afinfo;
const struct nf_queue_handler *qh;
- /* QUEUE == DROP if noone is waiting, to be safe. */
+ /* QUEUE == DROP if no one is waiting, to be safe. */
rcu_read_lock();
qh = rcu_dereference(queue_handler[pf]);
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index 0a229191e55..ae8271652ef 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -99,7 +99,7 @@ tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
u_int8_t orig, nv;
orig = ipv6_get_dsfield(iph);
- nv = (orig & info->tos_mask) ^ info->tos_value;
+ nv = (orig & ~info->tos_mask) ^ info->tos_value;
if (orig != nv) {
if (!skb_make_writable(skb, sizeof(struct iphdr)))
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 481a86fdc40..61805d7b38a 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -272,11 +272,6 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
{
int ret;
- if (strcmp(par->table, "raw") == 0) {
- pr_info("state is undetermined at the time of raw table\n");
- return -EINVAL;
- }
-
ret = nf_ct_l3proto_try_module_get(par->family);
if (ret < 0)
pr_info("cannot load conntrack support for proto=%u\n",
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index d37b7f80fa3..de0d8e4cbfb 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -109,7 +109,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
*
* Description:
* This is the hashing function for the domain hash table, it returns the
- * correct bucket number for the domain. The caller is responsibile for
+ * correct bucket number for the domain. The caller is responsible for
* ensuring that the hash table is protected with either a RCU read lock or the
* hash table lock.
*
@@ -134,7 +134,7 @@ static u32 netlbl_domhsh_hash(const char *key)
*
* Description:
* Searches the domain hash table and returns a pointer to the hash table
- * entry if found, otherwise NULL is returned. The caller is responsibile for
+ * entry if found, otherwise NULL is returned. The caller is responsible for
* ensuring that the hash table is protected with either a RCU read lock or the
* hash table lock.
*
@@ -165,7 +165,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
* Searches the domain hash table and returns a pointer to the hash table
* entry if an exact match is found, if an exact match is not present in the
* hash table then the default entry is returned if valid otherwise NULL is
- * returned. The caller is responsibile ensuring that the hash table is
+ * returned. The caller is responsible ensuring that the hash table is
* protected with either a RCU read lock or the hash table lock.
*
*/
@@ -193,7 +193,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
*
* Description:
* Generate an audit record for adding a new NetLabel/LSM mapping entry with
- * the given information. Caller is responsibile for holding the necessary
+ * the given information. Caller is responsible for holding the necessary
* locks.
*
*/
@@ -605,7 +605,7 @@ int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info)
*
* Description:
* Look through the domain hash table searching for an entry to match @domain,
- * return a pointer to a copy of the entry or NULL. The caller is responsibile
+ * return a pointer to a copy of the entry or NULL. The caller is responsible
* for ensuring that rcu_read_[un]lock() is called.
*
*/
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 998e85e895d..4f251b19fbc 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -259,7 +259,7 @@ add_failure:
*
* Description:
* This function is a helper function used by the LISTALL and LISTDEF command
- * handlers. The caller is responsibile for ensuring that the RCU read lock
+ * handlers. The caller is responsible for ensuring that the RCU read lock
* is held. Returns zero on success, negative values on failure.
*
*/
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index c47a511f203..7c4dce8fa5e 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -355,7 +355,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
*
* Conceptually, we have two counters:
* - send credits: this tells us how many WRs we're allowed
- * to submit without overruning the reciever's queue. For
+ * to submit without overruning the receiver's queue. For
* each SEND WR we post, we decrement this by one.
*
* - posted credits: this tells us how many WRs we recently
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 712cf2d1f28..3a60a15d1b4 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -181,7 +181,7 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
unsigned int send_size, recv_size;
int ret;
- /* The offset of 1 is to accomodate the additional ACK WR. */
+ /* The offset of 1 is to accommodate the additional ACK WR. */
send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1);
recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1);
rds_iw_ring_resize(send_ring, send_size - 1);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 59509e9a9e7..6deaa77495e 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -122,7 +122,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
#else
/* FIXME - needs to compare the local and remote
* ipaddr/port tuple, but the ipaddr is the only
- * available infomation in the rds_sock (as the rest are
+ * available information in the rds_sock (as the rest are
* zero'ed. It doesn't appear to be properly populated
* during connection setup...
*/
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 6280ea020d4..545d8ee3efb 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -307,7 +307,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
*
* Conceptually, we have two counters:
* - send credits: this tells us how many WRs we're allowed
- * to submit without overruning the reciever's queue. For
+ * to submit without overruning the receiver's queue. For
* each SEND WR we post, we decrement this by one.
*
* - posted credits: this tells us how many WRs we recently
diff --git a/net/rds/send.c b/net/rds/send.c
index 35b9c2e9caf..d58ae5f9339 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -116,7 +116,7 @@ static void release_in_xmit(struct rds_connection *conn)
}
/*
- * We're making the concious trade-off here to only send one message
+ * We're making the conscious trade-off here to only send one message
* down the connection at a time.
* Pro:
* - tx queueing is a simple fifo list
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 08dcd2f29cd..479cae57d18 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -587,7 +587,7 @@ static int rose_clear_routes(void)
/*
* Check that the device given is a valid AX.25 interface that is "up".
- * called whith RTNL
+ * called with RTNL
*/
static struct net_device *rose_ax25_dev_find(char *devname)
{
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 15873e14cb5..14b42f4ad79 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -999,7 +999,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
switch (n->nlmsg_type) {
case RTM_NEWACTION:
/* we are going to assume all other flags
- * imply create only if it doesnt exist
+ * imply create only if it doesn't exist
* Note that CREATE | EXCL implies that
* but since we want avoid ambiguity (eg when flags
* is zero) then just set this
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 50c7c06c019..7affe9a9275 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -161,7 +161,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
}
if (offset > 0 && offset > skb->len) {
pr_info("tc filter pedit"
- " offset %d cant exceed pkt length %d\n",
+ " offset %d can't exceed pkt length %d\n",
offset, skb->len);
goto bad;
}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index a4de67eca82..49130e8abff 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -47,7 +47,7 @@
* on the meta type. Obviously, the length of the data must also
* be provided for non-numeric types.
*
- * Additionaly, type dependant modifiers such as shift operators
+ * Additionally, type dependent modifiers such as shift operators
* or mask may be applied to extend the functionaliy. As of now,
* the variable length type supports shifting the byte string to
* the right, eating up any number of octets and thus supporting
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index e1429a85091..29b942ce9e8 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -183,7 +183,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
* filters in qdisc and in inner nodes (if higher filter points to the inner
* node). If we end up with classid MAJOR:0 we enqueue the skb into special
* internal fifo (direct). These packets then go directly thru. If we still
- * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
+ * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
* then finish and return direct queue.
*/
#define HTB_DIRECT ((struct htb_class *)-1L)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index edbbf7ad662..69c35f6cd13 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -160,7 +160,7 @@ static bool loss_4state(struct netem_sched_data *q)
u32 rnd = net_random();
/*
- * Makes a comparision between rnd and the transition
+ * Makes a comparison between rnd and the transition
* probabilities outgoing from the current state, then decides the
* next state and if the next packet has to be transmitted or lost.
* The four states correspond to:
@@ -212,9 +212,9 @@ static bool loss_4state(struct netem_sched_data *q)
* Generates losses according to the Gilbert-Elliot loss model or
* its special cases (Gilbert or Simple Gilbert)
*
- * Makes a comparision between random number and the transition
+ * Makes a comparison between random number and the transition
* probabilities outgoing from the current state, then decides the
- * next state. A second random number is extracted and the comparision
+ * next state. A second random number is extracted and the comparison
* with the loss probability of the current state decides if the next
* packet will be transmitted or lost.
*/
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 6b04287913c..1a21c571aa0 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -569,6 +569,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
sctp_assoc_set_primary(asoc, transport);
if (asoc->peer.active_path == peer)
asoc->peer.active_path = transport;
+ if (asoc->peer.retran_path == peer)
+ asoc->peer.retran_path = transport;
if (asoc->peer.last_data_from == peer)
asoc->peer.last_data_from = transport;
@@ -1323,6 +1325,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
if (t)
asoc->peer.retran_path = t;
+ else
+ t = asoc->peer.retran_path;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
" %p addr: ",
@@ -1593,7 +1597,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
struct sctp_chunk *ack;
struct sctp_chunk *tmp;
- /* We can remove all the entries from the queue upto
+ /* We can remove all the entries from the queue up to
* the "Peer-Sequence-Number".
*/
list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index ddbbf7c81fa..865e68fef21 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -113,7 +113,7 @@ struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp)
return new;
}
-/* Free the shared key stucture */
+/* Free the shared key structure */
static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)
{
BUG_ON(!list_empty(&sh_key->key_list));
@@ -122,7 +122,7 @@ static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)
kfree(sh_key);
}
-/* Destory the entire key list. This is done during the
+/* Destroy the entire key list. This is done during the
* associon and endpoint free process.
*/
void sctp_auth_destroy_keys(struct list_head *keys)
@@ -324,7 +324,7 @@ static struct sctp_auth_bytes *sctp_auth_asoc_create_secret(
if (!peer_key_vector || !local_key_vector)
goto out;
- /* Figure out the order in wich the key_vectors will be
+ /* Figure out the order in which the key_vectors will be
* added to the endpoint shared key.
* SCTP-AUTH, Section 6.1:
* This is performed by selecting the numerically smaller key
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 826661be73e..5436c692116 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -1034,7 +1034,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
* association.
*
* This means that any chunks that can help us identify the association need
-* to be looked at to find this assocation.
+* to be looked at to find this association.
*/
static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
const union sctp_addr *laddr,
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 60600d337a3..b4f3cf06d8d 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -510,7 +510,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
sh->checksum = sctp_end_cksum(crc32);
} else {
if (dst->dev->features & NETIF_F_SCTP_CSUM) {
- /* no need to seed psuedo checksum for SCTP */
+ /* no need to seed pseudo checksum for SCTP */
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (skb_transport_header(nskb) -
nskb->head);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 26dc005113a..bf92a5b68f8 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -177,13 +177,13 @@ static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
* 3) If the missing report count for TSN t is to be
* incremented according to [RFC2960] and
* [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
- * then the sender MUST futher execute steps 3.1 and
+ * then the sender MUST further execute steps 3.1 and
* 3.2 to determine if the missing report count for
* TSN t SHOULD NOT be incremented.
*
* 3.3) If 3.1 and 3.2 do not dictate that the missing
* report count for t should not be incremented, then
- * the sender SOULD increment missing report count for
+ * the sender SHOULD increment missing report count for
* t (according to [RFC2960] and [SCTP_STEWART_2002]).
*/
static inline int sctp_cacc_skip(struct sctp_transport *primary,
@@ -843,7 +843,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
case SCTP_CID_ECN_CWR:
case SCTP_CID_ASCONF_ACK:
one_packet = 1;
- /* Fall throught */
+ /* Fall through */
case SCTP_CID_SACK:
case SCTP_CID_HEARTBEAT:
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index de98665db52..b3434cc7d0c 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3106,10 +3106,10 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
/* create an ASCONF_ACK chunk.
* Based on the definitions of parameters, we know that the size of
- * ASCONF_ACK parameters are less than or equal to the twice of ASCONF
+ * ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF
* parameters.
*/
- asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 2);
+ asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4);
if (!asconf_ack)
goto done;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index b21b218d564..5f86ee4b54c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -482,7 +482,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
* If the timer was a heartbeat, we only increment error counts
* when we already have an outstanding HEARTBEAT that has not
* been acknowledged.
- * Additionaly, some tranport states inhibit error increments.
+ * Additionally, some tranport states inhibit error increments.
*/
if (!is_hb) {
asoc->overall_error_count++;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 4b4eb7c96bb..76792083c37 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -551,7 +551,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
*
* This means that if we only want to abort associations
* in an authenticated way (i.e AUTH+ABORT), then we
- * can't destroy this association just becuase the packet
+ * can't destroy this association just because the packet
* was malformed.
*/
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
@@ -1546,7 +1546,7 @@ cleanup:
}
/*
- * Handle simultanous INIT.
+ * Handle simultaneous INIT.
* This means we started an INIT and then we got an INIT request from
* our peer.
*
@@ -2079,7 +2079,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
- * Becasue the length is "invalid", we can't really discard just
+ * Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
@@ -2120,7 +2120,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
- * Becasue the length is "invalid", we can't really discard just
+ * Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
@@ -2381,7 +2381,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
- * Becasue the length is "invalid", we can't really discard just
+ * Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
@@ -2448,7 +2448,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
- * Becasue the length is "invalid", we can't really discard just
+ * Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
@@ -3855,7 +3855,7 @@ gen_shutdown:
}
/*
- * SCTP-AUTH Section 6.3 Receving authenticated chukns
+ * SCTP-AUTH Section 6.3 Receiving authenticated chukns
*
* The receiver MUST use the HMAC algorithm indicated in the HMAC
* Identifier field. If this algorithm was not specified by the
@@ -4231,7 +4231,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
*
* This means that if we only want to abort associations
* in an authenticated way (i.e AUTH+ABORT), then we
- * can't destroy this association just becuase the packet
+ * can't destroy this association just because the packet
* was malformed.
*/
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
@@ -4402,9 +4402,9 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
}
/* Handle protocol violation of an invalid chunk bundling. For example,
- * when we have an association and we recieve bundled INIT-ACK, or
+ * when we have an association and we receive bundled INIT-ACK, or
* SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
- * statement from the specs. Additinally, there might be an attacker
+ * statement from the specs. Additionally, there might be an attacker
* on the path and we may not want to continue this communication.
*/
static sctp_disposition_t sctp_sf_violation_chunk(
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3951a10605b..deb82e35a10 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1193,7 +1193,7 @@ out_free:
* an endpoint that is multi-homed. Much like sctp_bindx() this call
* allows a caller to specify multiple addresses at which a peer can be
* reached. The way the SCTP stack uses the list of addresses to set up
- * the association is implementation dependant. This function only
+ * the association is implementation dependent. This function only
* specifies that the stack will try to make use of all the addresses in
* the list when needed.
*
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index aa72e89c3ee..dff27d5e22f 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -554,7 +554,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo));
/* Per TSVWG discussion with Randy. Allow the application to
- * ressemble a fragmented message.
+ * resemble a fragmented message.
*/
ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 17678189d05..f2d1de7f2ff 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -240,7 +240,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
} else {
/*
* If fragment interleave is enabled, we
- * can queue this to the recieve queue instead
+ * can queue this to the receive queue instead
* of the lobby.
*/
if (sctp_sk(sk)->frag_interleave)
diff --git a/net/socket.c b/net/socket.c
index 5212447c86e..310d16b1b3c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2986,7 +2986,7 @@ out:
/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
* for some operations; this forces use of the newer bridge-utils that
- * use compatiable ioctls
+ * use compatible ioctls
*/
static int old_bridge_ioctl(compat_ulong_t __user *argp)
{
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 9022f0a6503..0a9a2ec2e46 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -427,7 +427,7 @@ static int
context_derive_keys_rc4(struct krb5_ctx *ctx)
{
struct crypto_hash *hmac;
- static const char sigkeyconstant[] = "signaturekey";
+ char sigkeyconstant[] = "signaturekey";
int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
struct hash_desc desc;
struct scatterlist sg[1];
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index bcdae78fdfc..8d0f7d3c71c 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1101,7 +1101,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
/* credential is:
* version(==1), proc(0,1,2,3), seq, service (1,2,3), handle
- * at least 5 u32s, and is preceeded by length, so that makes 6.
+ * at least 5 u32s, and is preceded by length, so that makes 6.
*/
if (argv->iov_len < 5 * 4)
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 1e336a06d3e..bf005d3c65e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -504,7 +504,7 @@ static int xs_nospace(struct rpc_task *task)
* EAGAIN: The socket was blocked, please call again later to
* complete the request
* ENOTCONN: Caller needs to invoke connect logic then call again
- * other: Some other error occured, the request was not sent
+ * other: Some other error occurred, the request was not sent
*/
static int xs_udp_send_request(struct rpc_task *task)
{
@@ -590,7 +590,7 @@ static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
* EAGAIN: The socket was blocked, please call again later to
* complete the request
* ENOTCONN: Caller needs to invoke connect logic then call again
- * other: Some other error occured, the request was not sent
+ * other: Some other error occurred, the request was not sent
*
* XXX: In the case of soft timeouts, should we eventually give up
* if sendmsg is not able to make progress?
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 43639ff1cbe..ebf338f7b14 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -2471,7 +2471,7 @@ exit:
* A pending message being re-assembled must store certain values
* to handle subsequent fragments correctly. The following functions
* help storing these values in unused, available fields in the
- * pending message. This makes dynamic memory allocation unecessary.
+ * pending message. This makes dynamic memory allocation unnecessary.
*/
static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index c9fa6dfcf28..80025a1b3bf 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -160,7 +160,7 @@ void tipc_named_withdraw(struct publication *publ)
buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
- warn("Withdrawl distribution failure\n");
+ warn("Withdrawal distribution failure\n");
return;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 1663e1a2efd..b1d75beb7e2 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -207,7 +207,7 @@ static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
/*
* This may look like an off by one error but it is a bit more
* subtle. 108 is the longest valid AF_UNIX path for a binding.
- * sun_path[108] doesnt as such exist. However in kernel space
+ * sun_path[108] doesn't as such exist. However in kernel space
* we are guaranteed that it is a valid memory location in our
* kernel address buffer.
*/
@@ -524,6 +524,8 @@ static int unix_dgram_connect(struct socket *, struct sockaddr *,
int, int);
static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
struct msghdr *, size_t);
+static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
+ struct msghdr *, size_t, int);
static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,
@@ -583,7 +585,7 @@ static const struct proto_ops unix_seqpacket_ops = {
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = unix_seqpacket_sendmsg,
- .recvmsg = unix_dgram_recvmsg,
+ .recvmsg = unix_seqpacket_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
@@ -1699,6 +1701,18 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
return unix_dgram_sendmsg(kiocb, sock, msg, len);
}
+static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
+}
+
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index 11f25c7a7a0..f346395314b 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -51,7 +51,7 @@
/*
* Structures for interfacing with the /proc filesystem.
- * Router creates its own directory /proc/net/router with the folowing
+ * Router creates its own directory /proc/net/router with the following
* entries:
* config device configuration
* status global device statistics
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 3332d5bce31..ab801a1097b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -809,7 +809,7 @@ static void handle_channel(struct wiphy *wiphy,
if (r) {
/*
* We will disable all channels that do not match our
- * recieved regulatory rule unless the hint is coming
+ * received regulatory rule unless the hint is coming
* from a Country IE and the Country IE had no information
* about a band. The IEEE 802.11 spec allows for an AP
* to send only a subset of the regulatory rules allowed,
@@ -838,7 +838,7 @@ static void handle_channel(struct wiphy *wiphy,
request_wiphy && request_wiphy == wiphy &&
request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
/*
- * This gaurantees the driver's requested regulatory domain
+ * This guarantees the driver's requested regulatory domain
* will always be used as a base for further regulatory
* settings
*/
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 406207515b5..f77e4e75f91 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -31,7 +31,7 @@
* x25_parse_facilities - Parse facilities from skb into the facilities structs
*
* @skb: sk_buff to parse
- * @facilities: Regular facilites, updated as facilities are found
+ * @facilities: Regular facilities, updated as facilities are found
* @dte_facs: ITU DTE facilities, updated as DTE facilities are found
* @vc_fac_mask: mask is updated with all facilities found
*
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
index 25a81079396..c541b622ae1 100644
--- a/net/x25/x25_forward.c
+++ b/net/x25/x25_forward.c
@@ -31,7 +31,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
goto out_no_route;
if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
- /* This shouldnt happen, if it occurs somehow
+ /* This shouldn't happen, if it occurs somehow
* do something sensible
*/
goto out_put_route;
@@ -45,7 +45,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
}
/* Remote end sending a call request on an already
- * established LCI? It shouldnt happen, just in case..
+ * established LCI? It shouldn't happen, just in case..
*/
read_lock_bh(&x25_forward_list_lock);
list_for_each(entry, &x25_forward_list) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 15792d8b627..b4d745ea8ee 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1406,6 +1406,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
struct net *net = xp_net(policy);
unsigned long now = jiffies;
struct net_device *dev;
+ struct xfrm_mode *inner_mode;
struct dst_entry *dst_prev = NULL;
struct dst_entry *dst0 = NULL;
int i = 0;
@@ -1436,6 +1437,17 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
goto put_states;
}
+ if (xfrm[i]->sel.family == AF_UNSPEC) {
+ inner_mode = xfrm_ip2inner_mode(xfrm[i],
+ xfrm_af2proto(family));
+ if (!inner_mode) {
+ err = -EAFNOSUPPORT;
+ dst_release(dst);
+ goto put_states;
+ }
+ } else
+ inner_mode = xfrm[i]->inner_mode;
+
if (!dst_prev)
dst0 = dst1;
else {
@@ -1464,7 +1476,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst1->lastuse = now;
dst1->input = dst_discard;
- dst1->output = xfrm[i]->outer_mode->afinfo->output;
+ dst1->output = inner_mode->afinfo->output;
dst1->next = dst_prev;
dst_prev = dst1;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index f218385950c..47f1b8638df 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -532,9 +532,12 @@ int xfrm_init_replay(struct xfrm_state *x)
if (replay_esn) {
if (replay_esn->replay_window >
- replay_esn->bmp_len * sizeof(__u32))
+ replay_esn->bmp_len * sizeof(__u32) * 8)
return -EINVAL;
+ if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0)
+ return -EINVAL;
+
if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn)
x->repl = &xfrm_replay_esn;
else
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 3d15d3e1b2c..c658cb3bc7c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -124,6 +124,9 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
{
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
+ if ((p->flags & XFRM_STATE_ESN) && !rt)
+ return -EINVAL;
+
if (!rt)
return 0;
@@ -894,7 +897,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
u32 *f;
nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
- if (nlh == NULL) /* shouldnt really happen ... */
+ if (nlh == NULL) /* shouldn't really happen ... */
return -EMSGSIZE;
f = nlmsg_data(nlh);
@@ -954,7 +957,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
u32 *f;
nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
- if (nlh == NULL) /* shouldnt really happen ... */
+ if (nlh == NULL) /* shouldn't really happen ... */
return -EMSGSIZE;
f = nlmsg_data(nlh);
@@ -1361,7 +1364,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!xp)
return err;
- /* shouldnt excl be based on nlh flags??
+ /* shouldn't excl be based on nlh flags??
* Aha! this is anti-netlink really i.e more pfkey derived
* in netlink excl is a flag and you wouldnt need
* a type XFRM_MSG_UPDPOLICY - JHS */