aboutsummaryrefslogtreecommitdiff
path: root/net/sctp/socket.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/socket.c')
-rw-r--r--net/sctp/socket.c1433
1 files changed, 987 insertions, 446 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 842c7f3650b..42989968940 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -28,16 +28,12 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -52,9 +48,6 @@
* Ryan Layer <rmlayer@us.ibm.com>
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -70,32 +63,32 @@
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/compat.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/route.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
+#include <net/busy_poll.h>
#include <linux/socket.h> /* for sa_family_t */
+#include <linux/export.h>
#include <net/sock.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-/* WARNING: Please do not remove the SCTP_STATIC attribute to
- * any of the functions below as they are used to export functions
- * used by a project regression testsuite.
- */
-
/* Forward declarations for internal helper functions. */
static int sctp_writeable(struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
size_t msg_len);
-static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p);
+static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
static void sctp_wait_for_close(struct sock *sk, long timeo);
+static void sctp_destruct_sock(struct sock *sk);
static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
union sctp_addr *addr, int len);
static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
@@ -108,7 +101,6 @@ static int sctp_do_bind(struct sock *, union sctp_addr *, int);
static int sctp_autobind(struct sock *sk);
static void sctp_sock_migrate(struct sock *, struct sock *,
struct sctp_association *, sctp_socket_type_t);
-static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
extern struct kmem_cache *sctp_bucket_cachep;
extern long sysctl_sctp_mem[3];
@@ -278,14 +270,14 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
* sockaddr_in6 [RFC 2553]),
* addr_len - the size of the address structure.
*/
-SCTP_STATIC int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
+static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
{
int retval = 0;
- sctp_lock_sock(sk);
+ lock_sock(sk);
- SCTP_DEBUG_PRINTK("sctp_bind(sk: %p, addr: %p, addr_len: %d)\n",
- sk, addr, addr_len);
+ pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
+ addr, addr_len);
/* Disallow binding twice. */
if (!sctp_sk(sk)->ep->base.bind_addr.port)
@@ -294,7 +286,7 @@ SCTP_STATIC int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
else
retval = -EINVAL;
- sctp_release_sock(sk);
+ release_sock(sk);
return retval;
}
@@ -332,8 +324,9 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
}
/* Bind a local address either to an endpoint or to an association. */
-SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
+static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
{
+ struct net *net = sock_net(sk);
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
struct sctp_bind_addr *bp = &ep->base.bind_addr;
@@ -344,19 +337,15 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
/* Common sockaddr verification. */
af = sctp_sockaddr_af(sp, addr, len);
if (!af) {
- SCTP_DEBUG_PRINTK("sctp_do_bind(sk: %p, newaddr: %p, len: %d) EINVAL\n",
- sk, addr, len);
+ pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
+ __func__, sk, addr, len);
return -EINVAL;
}
snum = ntohs(addr->v4.sin_port);
- SCTP_DEBUG_PRINTK_IPADDR("sctp_do_bind(sk: %p, new addr: ",
- ", port: %d, new port: %d, len: %d)\n",
- sk,
- addr,
- bp->port, snum,
- len);
+ pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
+ __func__, sk, &addr->sa, bp->port, snum, len);
/* PF specific bind() address verification. */
if (!sp->pf->bind_verify(sp, addr))
@@ -370,14 +359,14 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
if (!snum)
snum = bp->port;
else if (snum != bp->port) {
- SCTP_DEBUG_PRINTK("sctp_do_bind:"
- " New port %d does not match existing port "
- "%d.\n", snum, bp->port);
+ pr_debug("%s: new port %d doesn't match existing port "
+ "%d\n", __func__, snum, bp->port);
return -EINVAL;
}
}
- if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+ if (snum && snum < PROT_SOCK &&
+ !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
/* See if the address matches any of the addresses we may have
@@ -426,6 +415,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
static int sctp_send_asconf(struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
+ struct net *net = sock_net(asoc->base.sk);
int retval = 0;
/* If there is an outstanding ASCONF chunk, queue it for later
@@ -438,7 +428,7 @@ static int sctp_send_asconf(struct sctp_association *asoc,
/* Hold the chunk until an ASCONF_ACK is received. */
sctp_chunk_hold(chunk);
- retval = sctp_primitive_ASCONF(asoc, chunk);
+ retval = sctp_primitive_ASCONF(net, asoc, chunk);
if (retval)
sctp_chunk_free(chunk);
else
@@ -468,15 +458,15 @@ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
struct sockaddr *sa_addr;
struct sctp_af *af;
- SCTP_DEBUG_PRINTK("sctp_bindx_add (sk: %p, addrs: %p, addrcnt: %d)\n",
- sk, addrs, addrcnt);
+ pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
+ addrs, addrcnt);
addr_buf = addrs;
for (cnt = 0; cnt < addrcnt; cnt++) {
/* The list may contain either IPv4 or IPv6 address;
* determine the address length for walking thru the list.
*/
- sa_addr = (struct sockaddr *)addr_buf;
+ sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
if (!af) {
retval = -EINVAL;
@@ -514,6 +504,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
+ struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
@@ -528,17 +519,16 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
int i;
int retval = 0;
- if (!sctp_addip_enable)
+ if (!net->sctp.addip_enable)
return retval;
sp = sctp_sk(sk);
ep = sp->ep;
- SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
- __func__, sk, addrs, addrcnt);
+ pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
+ __func__, sk, addrs, addrcnt);
list_for_each_entry(asoc, &ep->asocs, asocs) {
-
if (!asoc->peer.asconf_capable)
continue;
@@ -555,7 +545,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- addr = (union sctp_addr *)addr_buf;
+ addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
if (!af) {
retval = -EINVAL;
@@ -583,22 +573,36 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
goto out;
}
- retval = sctp_send_asconf(asoc, chunk);
- if (retval)
- goto out;
-
/* Add the new addresses to the bind address list with
* use_as_src set to 0.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- addr = (union sctp_addr *)addr_buf;
+ addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
memcpy(&saveaddr, addr, af->sockaddr_len);
retval = sctp_add_bind_addr(bp, &saveaddr,
SCTP_ADDR_NEW, GFP_ATOMIC);
addr_buf += af->sockaddr_len;
}
+ if (asoc->src_out_of_asoc_ok) {
+ struct sctp_transport *trans;
+
+ list_for_each_entry(trans,
+ &asoc->peer.transport_addr_list, transports) {
+ /* Clear the source and route cache */
+ dst_release(trans->dst);
+ trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
+ 2*asoc->pathmtu, 4380));
+ trans->ssthresh = asoc->peer.i.a_rwnd;
+ trans->rto = asoc->rto_initial;
+ sctp_max_rto(asoc, trans);
+ trans->rtt = trans->srtt = trans->rttvar = 0;
+ sctp_transport_route(trans, NULL,
+ sctp_sk(asoc->base.sk));
+ }
+ }
+ retval = sctp_send_asconf(asoc, chunk);
}
out:
@@ -631,8 +635,8 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
union sctp_addr *sa_addr;
struct sctp_af *af;
- SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n",
- sk, addrs, addrcnt);
+ pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
+ __func__, sk, addrs, addrcnt);
addr_buf = addrs;
for (cnt = 0; cnt < addrcnt; cnt++) {
@@ -646,7 +650,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
goto err_bindx_rem;
}
- sa_addr = (union sctp_addr *)addr_buf;
+ sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
if (!af) {
retval = -EINVAL;
@@ -658,11 +662,15 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
goto err_bindx_rem;
}
- if (sa_addr->v4.sin_port != htons(bp->port)) {
+ if (sa_addr->v4.sin_port &&
+ sa_addr->v4.sin_port != htons(bp->port)) {
retval = -EINVAL;
goto err_bindx_rem;
}
+ if (!sa_addr->v4.sin_port)
+ sa_addr->v4.sin_port = htons(bp->port);
+
/* FIXME - There is probably a need to check if sk->sk_saddr and
* sk->sk_rcv_addr are currently set to one of the addresses to
* be removed. This is something which needs to be looked into
@@ -699,6 +707,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
+ struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
@@ -711,15 +720,17 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
struct sctp_sockaddr_entry *saddr;
int i;
int retval = 0;
+ int stored = 0;
- if (!sctp_addip_enable)
+ chunk = NULL;
+ if (!net->sctp.addip_enable)
return retval;
sp = sctp_sk(sk);
ep = sp->ep;
- SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
- __func__, sk, addrs, addrcnt);
+ pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
+ __func__, sk, addrs, addrcnt);
list_for_each_entry(asoc, &ep->asocs, asocs) {
@@ -739,7 +750,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- laddr = (union sctp_addr *)addr_buf;
+ laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
if (!af) {
retval = -EINVAL;
@@ -762,8 +773,42 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
bp = &asoc->base.bind_addr;
laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
addrcnt, sp);
- if (!laddr)
- continue;
+ if ((laddr == NULL) && (addrcnt == 1)) {
+ if (asoc->asconf_addr_del_pending)
+ continue;
+ asoc->asconf_addr_del_pending =
+ kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
+ if (asoc->asconf_addr_del_pending == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ asoc->asconf_addr_del_pending->sa.sa_family =
+ addrs->sa_family;
+ asoc->asconf_addr_del_pending->v4.sin_port =
+ htons(bp->port);
+ if (addrs->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)addrs;
+ asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
+ } else if (addrs->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addrs;
+ asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
+ }
+
+ pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
+ __func__, asoc, &asoc->asconf_addr_del_pending->sa,
+ asoc->asconf_addr_del_pending);
+
+ asoc->src_out_of_asoc_ok = 1;
+ stored = 1;
+ goto skip_mkasconf;
+ }
+
+ if (laddr == NULL)
+ return -EINVAL;
/* We do not need RCU protection throughout this loop
* because this is done under a socket lock from the
@@ -776,12 +821,13 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
goto out;
}
+skip_mkasconf:
/* Reset use_as_src flag for the addresses in the bind address
* list that are to be deleted.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- laddr = (union sctp_addr *)addr_buf;
+ laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, laddr))
@@ -801,12 +847,37 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
sctp_sk(asoc->base.sk));
}
+ if (stored)
+ /* We don't need to transmit ASCONF */
+ continue;
retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
}
+/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
+int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
+{
+ struct sock *sk = sctp_opt2sk(sp);
+ union sctp_addr *addr;
+ struct sctp_af *af;
+
+ /* It is safe to write port space in caller. */
+ addr = &addrw->a;
+ addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ if (!af)
+ return -EINVAL;
+ if (sctp_verify_addr(sk, addr, af->sockaddr_len))
+ return -EINVAL;
+
+ if (addrw->state == SCTP_ADDR_NEW)
+ return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
+ else
+ return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
+}
+
/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
*
* API 8.1
@@ -883,9 +954,9 @@ out:
*
* Returns 0 if ok, <0 errno code on error.
*/
-SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
- struct sockaddr __user *addrs,
- int addrs_size, int op)
+static int sctp_setsockopt_bindx(struct sock *sk,
+ struct sockaddr __user *addrs,
+ int addrs_size, int op)
{
struct sockaddr *kaddrs;
int err;
@@ -895,8 +966,8 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
void *addr_buf;
struct sctp_af *af;
- SCTP_DEBUG_PRINTK("sctp_setsocktopt_bindx: sk %p addrs %p"
- " addrs_size %d opt %d\n", sk, addrs, addrs_size, op);
+ pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
+ __func__, sk, addrs, addrs_size, op);
if (unlikely(addrs_size <= 0))
return -EINVAL;
@@ -923,7 +994,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
return -EINVAL;
}
- sa_addr = (struct sockaddr *)addr_buf;
+ sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
/* If the address family is not supported or if this address
@@ -970,11 +1041,12 @@ out:
* Common routine for handling connect() and sctp_connectx().
* Connect will come in with just a single address.
*/
-static int __sctp_connect(struct sock* sk,
+static int __sctp_connect(struct sock *sk,
struct sockaddr *kaddrs,
int addrs_size,
sctp_assoc_t *assoc_id)
{
+ struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc = NULL;
@@ -1014,7 +1086,7 @@ static int __sctp_connect(struct sock* sk,
goto out_free;
}
- sa_addr = (union sctp_addr *)addr_buf;
+ sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
/* If the address family is not supported or if this address
@@ -1037,9 +1109,10 @@ static int __sctp_connect(struct sock* sk,
/* Make sure the destination port is correctly set
* in all addresses.
*/
- if (asoc && asoc->peer.port && asoc->peer.port != port)
+ if (asoc && asoc->peer.port && asoc->peer.port != port) {
+ err = -EINVAL;
goto out_free;
-
+ }
/* Check if there already is a matching association on the
* endpoint (other than the one created here).
@@ -1082,7 +1155,7 @@ static int __sctp_connect(struct sock* sk,
* be permitted to open new associations.
*/
if (ep->base.bind_addr.port < PROT_SOCK &&
- !capable(CAP_NET_BIND_SERVICE)) {
+ !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto out_free;
}
@@ -1125,7 +1198,7 @@ static int __sctp_connect(struct sock* sk,
goto out_free;
}
- err = sctp_primitive_ASSOCIATE(asoc, NULL);
+ err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
if (err < 0) {
goto out_free;
}
@@ -1152,12 +1225,17 @@ static int __sctp_connect(struct sock* sk,
asoc = NULL;
out_free:
+ pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
+ __func__, asoc, kaddrs, err);
- SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
- " kaddrs: %p err: %d\n",
- asoc, kaddrs, err);
- if (asoc)
+ if (asoc) {
+ /* sctp_primitive_ASSOCIATE may have added this association
+ * To the hash table, try to unhash it, just in case, its a noop
+ * if it wasn't hashed so we're safe
+ */
+ sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ }
return err;
}
@@ -1193,7 +1271,7 @@ out_free:
* an endpoint that is multi-homed. Much like sctp_bindx() this call
* allows a caller to specify multiple addresses at which a peer can be
* reached. The way the SCTP stack uses the list of addresses to set up
- * the association is implementation dependant. This function only
+ * the association is implementation dependent. This function only
* specifies that the stack will try to make use of all the addresses in
* the list when needed.
*
@@ -1223,7 +1301,7 @@ out_free:
*
* Returns >=0 if ok, <0 errno code on error.
*/
-SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
+static int __sctp_setsockopt_connectx(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size,
sctp_assoc_t *assoc_id)
@@ -1231,8 +1309,8 @@ SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
int err = 0;
struct sockaddr *kaddrs;
- SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n",
- __func__, sk, addrs, addrs_size);
+ pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
+ __func__, sk, addrs, addrs_size);
if (unlikely(addrs_size <= 0))
return -EINVAL;
@@ -1261,9 +1339,9 @@ SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
* This is an older interface. It's kept for backward compatibility
* to the option that doesn't provide association id.
*/
-SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk,
- struct sockaddr __user *addrs,
- int addrs_size)
+static int sctp_setsockopt_connectx_old(struct sock *sk,
+ struct sockaddr __user *addrs,
+ int addrs_size)
{
return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
}
@@ -1274,9 +1352,9 @@ SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk,
* indication to the call. Error is always negative and association id is
* always positive.
*/
-SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
- struct sockaddr __user *addrs,
- int addrs_size)
+static int sctp_setsockopt_connectx(struct sock *sk,
+ struct sockaddr __user *addrs,
+ int addrs_size)
{
sctp_assoc_t assoc_id = 0;
int err = 0;
@@ -1292,29 +1370,51 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
/*
* New (hopefully final) interface for the API.
* We use the sctp_getaddrs_old structure so that use-space library
- * can avoid any unnecessary allocations. The only defferent part
+ * can avoid any unnecessary allocations. The only different part
* is that we store the actual length of the address buffer into the
- * addrs_num structure member. That way we can re-use the existing
+ * addrs_num structure member. That way we can re-use the existing
* code.
*/
-SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
- char __user *optval,
- int __user *optlen)
+#ifdef CONFIG_COMPAT
+struct compat_sctp_getaddrs_old {
+ sctp_assoc_t assoc_id;
+ s32 addr_num;
+ compat_uptr_t addrs; /* struct sockaddr * */
+};
+#endif
+
+static int sctp_getsockopt_connectx3(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
{
struct sctp_getaddrs_old param;
sctp_assoc_t assoc_id = 0;
int err = 0;
- if (len < sizeof(param))
- return -EINVAL;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ struct compat_sctp_getaddrs_old param32;
- if (copy_from_user(&param, optval, sizeof(param)))
- return -EFAULT;
+ if (len < sizeof(param32))
+ return -EINVAL;
+ if (copy_from_user(&param32, optval, sizeof(param32)))
+ return -EFAULT;
- err = __sctp_setsockopt_connectx(sk,
- (struct sockaddr __user *)param.addrs,
- param.addr_num, &assoc_id);
+ param.assoc_id = param32.assoc_id;
+ param.addr_num = param32.addr_num;
+ param.addrs = compat_ptr(param32.addrs);
+ } else
+#endif
+ {
+ if (len < sizeof(param))
+ return -EINVAL;
+ if (copy_from_user(&param, optval, sizeof(param)))
+ return -EFAULT;
+ }
+ err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
+ param.addrs, param.addr_num,
+ &assoc_id);
if (err == 0 || err == -EINPROGRESS) {
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
return -EFAULT;
@@ -1375,20 +1475,26 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
* shutdown phase does not finish during this period, close() will
* return but the graceful shutdown phase continues in the system.
*/
-SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
+static void sctp_close(struct sock *sk, long timeout)
{
+ struct net *net = sock_net(sk);
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct list_head *pos, *temp;
+ unsigned int data_was_unread;
- SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
+ pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
- sctp_lock_sock(sk);
+ lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
ep = sctp_sk(sk)->ep;
+ /* Clean up any skbs sitting on the receive queue. */
+ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
+ data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
+
/* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1406,32 +1512,30 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
}
}
- if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+ if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
+ !skb_queue_empty(&asoc->ulpq.reasm) ||
+ (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, NULL, 0);
if (chunk)
- sctp_primitive_ABORT(asoc, chunk);
+ sctp_primitive_ABORT(net, asoc, chunk);
} else
- sctp_primitive_SHUTDOWN(asoc, NULL);
+ sctp_primitive_SHUTDOWN(net, asoc, NULL);
}
- /* Clean up any skbs sitting on the receive queue. */
- sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
- sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
-
/* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout)
sctp_wait_for_close(sk, timeout);
/* This will run the backlog queue. */
- sctp_release_sock(sk);
+ release_sock(sk);
/* Supposedly, no process has access to the socket, but
* the net layers still may.
*/
- sctp_local_bh_disable();
- sctp_bh_lock_sock(sk);
+ local_bh_disable();
+ bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
@@ -1439,8 +1543,8 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
sock_hold(sk);
sk_common_release(sk);
- sctp_bh_unlock_sock(sk);
- sctp_local_bh_enable();
+ bh_unlock_sock(sk);
+ local_bh_enable();
sock_put(sk);
@@ -1480,19 +1584,20 @@ static int sctp_error(struct sock *sk, int flags, int err)
*/
/* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
-SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
+static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
-SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t msg_len)
+static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t msg_len)
{
+ struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
- struct sctp_association *new_asoc=NULL, *asoc=NULL;
+ struct sctp_association *new_asoc = NULL, *asoc = NULL;
struct sctp_transport *transport, *chunk_tp;
struct sctp_chunk *chunk;
union sctp_addr to;
struct sockaddr *msg_name = NULL;
- struct sctp_sndrcvinfo default_sinfo = { 0 };
+ struct sctp_sndrcvinfo default_sinfo;
struct sctp_sndrcvinfo *sinfo;
struct sctp_initmsg *sinit;
sctp_assoc_t associd = 0;
@@ -1504,14 +1609,12 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct sctp_datamsg *datamsg;
int msg_flags = msg->msg_flags;
- SCTP_DEBUG_PRINTK("sctp_sendmsg(sk: %p, msg: %p, msg_len: %zu)\n",
- sk, msg, msg_len);
-
err = 0;
sp = sctp_sk(sk);
ep = sp->ep;
- SCTP_DEBUG_PRINTK("Using endpoint: %p.\n", ep);
+ pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk,
+ msg, msg_len, ep);
/* We cannot send a message over a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) {
@@ -1521,9 +1624,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
/* Parse out the SCTP CMSGs. */
err = sctp_msghdr_parse(msg, &cmsgs);
-
if (err) {
- SCTP_DEBUG_PRINTK("msghdr parse err = %x\n", err);
+ pr_debug("%s: msghdr parse err:%x\n", __func__, err);
goto out_nounlock;
}
@@ -1555,8 +1657,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
associd = sinfo->sinfo_assoc_id;
}
- SCTP_DEBUG_PRINTK("msg_len: %zu, sinfo_flags: 0x%x\n",
- msg_len, sinfo_flags);
+ pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__,
+ msg_len, sinfo_flags);
/* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) {
@@ -1585,9 +1687,9 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
transport = NULL;
- SCTP_DEBUG_PRINTK("About to look up association.\n");
+ pr_debug("%s: about to look up association\n", __func__);
- sctp_lock_sock(sk);
+ lock_sock(sk);
/* If a msg_name has been specified, assume this is to be used. */
if (msg_name) {
@@ -1615,7 +1717,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
if (asoc) {
- SCTP_DEBUG_PRINTK("Just looked up association: %p.\n", asoc);
+ pr_debug("%s: just looked up association:%p\n", __func__, asoc);
/* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
* socket that has an association in CLOSED state. This can
@@ -1628,9 +1730,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
if (sinfo_flags & SCTP_EOF) {
- SCTP_DEBUG_PRINTK("Shutting down association: %p\n",
- asoc);
- sctp_primitive_SHUTDOWN(asoc, NULL);
+ pr_debug("%s: shutting down association:%p\n",
+ __func__, asoc);
+
+ sctp_primitive_SHUTDOWN(net, asoc, NULL);
err = 0;
goto out_unlock;
}
@@ -1642,8 +1745,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
}
- SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
- sctp_primitive_ABORT(asoc, chunk);
+ pr_debug("%s: aborting association:%p\n",
+ __func__, asoc);
+
+ sctp_primitive_ABORT(net, asoc, chunk);
err = 0;
goto out_unlock;
}
@@ -1651,7 +1756,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
/* Do we need to create the association? */
if (!asoc) {
- SCTP_DEBUG_PRINTK("There is no association yet.\n");
+ pr_debug("%s: there is no association yet\n", __func__);
if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) {
err = -EINVAL;
@@ -1662,7 +1767,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
* either the default or the user specified stream counts.
*/
if (sinfo) {
- if (!sinit || (sinit && !sinit->sinit_num_ostreams)) {
+ if (!sinit || !sinit->sinit_num_ostreams) {
/* Check against the defaults. */
if (sinfo->sinfo_stream >=
sp->initmsg.sinit_num_ostreams) {
@@ -1700,7 +1805,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
* associations.
*/
if (ep->base.bind_addr.port < PROT_SOCK &&
- !capable(CAP_NET_BIND_SERVICE)) {
+ !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto out_unlock;
}
@@ -1750,12 +1855,13 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
/* ASSERT: we have a valid association at this point. */
- SCTP_DEBUG_PRINTK("We have a valid association.\n");
+ pr_debug("%s: we have a valid association\n", __func__);
if (!sinfo) {
/* If the user didn't specify SNDRCVINFO, make up one with
* some defaults.
*/
+ memset(&default_sinfo, 0, sizeof(default_sinfo));
default_sinfo.sinfo_stream = asoc->default_stream;
default_sinfo.sinfo_flags = asoc->default_flags;
default_sinfo.sinfo_ppid = asoc->default_ppid;
@@ -1774,7 +1880,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
if (asoc->pmtu_pending)
- sctp_assoc_pending_pmtu(asoc);
+ sctp_assoc_pending_pmtu(sk, asoc);
/* If fragmentation is disabled and the message length exceeds the
* association fragmentation point, return EMSGSIZE. The I-D
@@ -1786,12 +1892,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_free;
}
- if (sinfo) {
- /* Check for invalid stream. */
- if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
- err = -EINVAL;
- goto out_free;
- }
+ /* Check for invalid stream. */
+ if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
+ err = -EINVAL;
+ goto out_free;
}
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
@@ -1817,16 +1921,17 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
/* Auto-connect, if we aren't connected already. */
if (sctp_state(asoc, CLOSED)) {
- err = sctp_primitive_ASSOCIATE(asoc, NULL);
+ err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
if (err < 0)
goto out_free;
- SCTP_DEBUG_PRINTK("We associated primitively.\n");
+
+ pr_debug("%s: we associated primitively\n", __func__);
}
/* Break the message into multiple chunks of maximum size. */
datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
- if (!datamsg) {
- err = -ENOMEM;
+ if (IS_ERR(datamsg)) {
+ err = PTR_ERR(datamsg);
goto out_free;
}
@@ -1845,19 +1950,17 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
* works that way today. Keep it that way or this
* breaks.
*/
- err = sctp_primitive_SEND(asoc, datamsg);
+ err = sctp_primitive_SEND(net, asoc, datamsg);
/* Did the lower layer accept the chunk? */
- if (err)
+ if (err) {
sctp_datamsg_free(datamsg);
- else
- sctp_datamsg_put(datamsg);
+ goto out_free;
+ }
- SCTP_DEBUG_PRINTK("We sent primitively.\n");
+ pr_debug("%s: we sent primitively\n", __func__);
- if (err)
- goto out_free;
- else
- err = msg_len;
+ sctp_datamsg_put(datamsg);
+ err = msg_len;
/* If we are already past ASSOCIATE, the lower
* layers are responsible for association cleanup.
@@ -1865,10 +1968,12 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
out_free:
- if (new_asoc)
+ if (new_asoc) {
+ sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ }
out_unlock:
- sctp_release_sock(sk);
+ release_sock(sk);
out_nounlock:
return sctp_error(sk, msg_flags, err);
@@ -1939,9 +2044,9 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
*/
static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
-SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len, int noblock,
- int flags, int *addr_len)
+static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len)
{
struct sctp_ulpevent *event = NULL;
struct sctp_sock *sp = sctp_sk(sk);
@@ -1950,12 +2055,11 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
int err = 0;
int skb_len;
- SCTP_DEBUG_PRINTK("sctp_recvmsg(%s: %p, %s: %p, %s: %zd, %s: %d, %s: "
- "0x%x, %s: %p)\n", "sk", sk, "msghdr", msg,
- "len", len, "knoblauch", noblock,
- "flags", flags, "addr_len", addr_len);
+ pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
+ "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
+ addr_len);
- sctp_lock_sock(sk);
+ lock_sock(sk);
if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) {
err = -ENOTCONN;
@@ -2039,7 +2143,7 @@ out_free:
sctp_ulpevent_free(event);
}
out:
- sctp_release_sock(sk);
+ release_sock(sk);
return err;
}
@@ -2070,10 +2174,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
unsigned int optlen)
{
+ struct sctp_association *asoc;
+ struct sctp_ulpevent *event;
+
if (optlen > sizeof(struct sctp_event_subscribe))
return -EINVAL;
if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
return -EFAULT;
+
+ /*
+ * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
+ * if there is no data to be sent or retransmit, the stack will
+ * immediately send up this notification.
+ */
+ if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
+ &sctp_sk(sk)->subscribe)) {
+ asoc = sctp_id2assoc(sk, 0);
+
+ if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
+ event = sctp_ulpevent_make_sender_dry_event(asoc,
+ GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ sctp_ulpq_tail_event(&asoc->ulpq, event);
+ }
+ }
+
return 0;
}
@@ -2092,6 +2219,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
+ struct net *net = sock_net(sk);
/* Applicable to UDP-style socket only */
if (sctp_style(sk, TCP))
@@ -2100,8 +2228,9 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
- /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
- sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
+
+ if (sp->autoclose > net->sctp.max_autoclose)
+ sp->autoclose = net->sctp.max_autoclose;
return 0;
}
@@ -2214,7 +2343,9 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
int error;
if (params->spp_flags & SPP_HB_DEMAND && trans) {
- error = sctp_primitive_REQUESTHEARTBEAT (trans->asoc, trans);
+ struct net *net = sock_net(trans->asoc->base.sk);
+
+ error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
if (error)
return error;
}
@@ -2267,7 +2398,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
if (trans) {
trans->pathmtu = params->spp_pathmtu;
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
} else if (asoc) {
asoc->pathmtu = params->spp_pathmtu;
sctp_frag_point(asoc, params->spp_pathmtu);
@@ -2283,8 +2414,8 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
trans->param_flags =
(trans->param_flags & ~SPP_PMTUD) | pmtud_change;
if (update) {
- sctp_transport_pmtu(trans);
- sctp_assoc_sync_pmtu(asoc);
+ sctp_transport_pmtu(trans, sctp_opt2sk(sp));
+ sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
}
} else if (asoc) {
asoc->param_flags =
@@ -2355,7 +2486,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
int hb_change, pmtud_change, sackdelay_change;
if (optlen != sizeof(struct sctp_paddrparams))
- return - EINVAL;
+ return -EINVAL;
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
@@ -2376,7 +2507,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
/* If an address other than INADDR_ANY is specified, and
* no transport is found, then the request is invalid.
*/
- if (!sctp_is_any(sk, ( union sctp_addr *)&params.spp_address)) {
+ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
trans = sctp_addr_id2transport(sk, &params.spp_address,
params.spp_assoc_id);
if (!trans)
@@ -2420,6 +2551,16 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
return 0;
}
+static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
+{
+ return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
+}
+
+static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
+{
+ return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
+}
+
/*
* 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
*
@@ -2471,8 +2612,11 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
if (params.sack_delay == 0 && params.sack_freq == 0)
return 0;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
- pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
- pr_warn("Use struct sctp_sack_info instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
+ "Use struct sctp_sack_info instead\n",
+ current->comm, task_pid_nr(current));
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
@@ -2481,7 +2625,7 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
else
params.sack_freq = 0;
} else
- return - EINVAL;
+ return -EINVAL;
/* Validate value parameter. */
if (params.sack_delay > 500)
@@ -2500,37 +2644,31 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
asoc->sackdelay =
msecs_to_jiffies(params.sack_delay);
asoc->param_flags =
- (asoc->param_flags & ~SPP_SACKDELAY) |
- SPP_SACKDELAY_ENABLE;
+ sctp_spp_sackdelay_enable(asoc->param_flags);
} else {
sp->sackdelay = params.sack_delay;
sp->param_flags =
- (sp->param_flags & ~SPP_SACKDELAY) |
- SPP_SACKDELAY_ENABLE;
+ sctp_spp_sackdelay_enable(sp->param_flags);
}
}
if (params.sack_freq == 1) {
if (asoc) {
asoc->param_flags =
- (asoc->param_flags & ~SPP_SACKDELAY) |
- SPP_SACKDELAY_DISABLE;
+ sctp_spp_sackdelay_disable(asoc->param_flags);
} else {
sp->param_flags =
- (sp->param_flags & ~SPP_SACKDELAY) |
- SPP_SACKDELAY_DISABLE;
+ sctp_spp_sackdelay_disable(sp->param_flags);
}
} else if (params.sack_freq > 1) {
if (asoc) {
asoc->sackfreq = params.sack_freq;
asoc->param_flags =
- (asoc->param_flags & ~SPP_SACKDELAY) |
- SPP_SACKDELAY_ENABLE;
+ sctp_spp_sackdelay_enable(asoc->param_flags);
} else {
sp->sackfreq = params.sack_freq;
sp->param_flags =
- (sp->param_flags & ~SPP_SACKDELAY) |
- SPP_SACKDELAY_ENABLE;
+ sctp_spp_sackdelay_enable(sp->param_flags);
}
}
@@ -2542,18 +2680,15 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
trans->sackdelay =
msecs_to_jiffies(params.sack_delay);
trans->param_flags =
- (trans->param_flags & ~SPP_SACKDELAY) |
- SPP_SACKDELAY_ENABLE;
+ sctp_spp_sackdelay_enable(trans->param_flags);
}
if (params.sack_freq == 1) {
trans->param_flags =
- (trans->param_flags & ~SPP_SACKDELAY) |
- SPP_SACKDELAY_DISABLE;
+ sctp_spp_sackdelay_disable(trans->param_flags);
} else if (params.sack_freq > 1) {
trans->sackfreq = params.sack_freq;
trans->param_flags =
- (trans->param_flags & ~SPP_SACKDELAY) |
- SPP_SACKDELAY_ENABLE;
+ sctp_spp_sackdelay_enable(trans->param_flags);
}
}
}
@@ -2707,6 +2842,8 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
{
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
+ unsigned long rto_min, rto_max;
+ struct sctp_sock *sp = sctp_sk(sk);
if (optlen != sizeof (struct sctp_rtoinfo))
return -EINVAL;
@@ -2720,26 +2857,36 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
+ rto_max = rtoinfo.srto_max;
+ rto_min = rtoinfo.srto_min;
+
+ if (rto_max)
+ rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
+ else
+ rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
+
+ if (rto_min)
+ rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
+ else
+ rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
+
+ if (rto_min > rto_max)
+ return -EINVAL;
+
if (asoc) {
if (rtoinfo.srto_initial != 0)
asoc->rto_initial =
msecs_to_jiffies(rtoinfo.srto_initial);
- if (rtoinfo.srto_max != 0)
- asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max);
- if (rtoinfo.srto_min != 0)
- asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min);
+ asoc->rto_max = rto_max;
+ asoc->rto_min = rto_min;
} else {
/* If there is no association or the association-id = 0
* set the values to the endpoint.
*/
- struct sctp_sock *sp = sctp_sk(sk);
-
if (rtoinfo.srto_initial != 0)
sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
- if (rtoinfo.srto_max != 0)
- sp->rtoinfo.srto_max = rtoinfo.srto_max;
- if (rtoinfo.srto_min != 0)
- sp->rtoinfo.srto_min = rtoinfo.srto_min;
+ sp->rtoinfo.srto_max = rto_max;
+ sp->rtoinfo.srto_min = rto_min;
}
return 0;
@@ -2797,13 +2944,8 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsig
asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
}
- if (assocparams.sasoc_cookie_life != 0) {
- asoc->cookie_life.tv_sec =
- assocparams.sasoc_cookie_life / 1000;
- asoc->cookie_life.tv_usec =
- (assocparams.sasoc_cookie_life % 1000)
- * 1000;
- }
+ if (assocparams.sasoc_cookie_life != 0)
+ asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
} else {
/* Set the values to the endpoint */
struct sctp_sock *sp = sctp_sk(sk);
@@ -2880,8 +3022,11 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
int val;
if (optlen == sizeof(int)) {
- pr_warn("Use of int in maxseg socket option deprecated\n");
- pr_warn("Use struct sctp_assoc_value instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in maxseg socket option.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
params.assoc_id = 0;
@@ -2927,17 +3072,17 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
unsigned int optlen)
{
+ struct net *net = sock_net(sk);
struct sctp_sock *sp;
- struct sctp_endpoint *ep;
struct sctp_association *asoc = NULL;
struct sctp_setpeerprim prim;
struct sctp_chunk *chunk;
+ struct sctp_af *af;
int err;
sp = sctp_sk(sk);
- ep = sp->ep;
- if (!sctp_addip_enable)
+ if (!net->sctp.addip_enable)
return -EPERM;
if (optlen != sizeof(struct sctp_setpeerprim))
@@ -2959,6 +3104,13 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
if (!sctp_state(asoc, ESTABLISHED))
return -ENOTCONN;
+ af = sctp_get_af_specific(prim.sspp_addr.ss_family);
+ if (!af)
+ return -EINVAL;
+
+ if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
+ return -EADDRNOTAVAIL;
+
if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
return -EADDRNOTAVAIL;
@@ -2970,7 +3122,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
err = sctp_send_asconf(asoc, chunk);
- SCTP_DEBUG_PRINTK("We set peer primary addr primitively.\n");
+ pr_debug("%s: we set peer primary addr primitively\n", __func__);
return err;
}
@@ -3131,8 +3283,11 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
int assoc_id = 0;
if (optlen == sizeof(int)) {
- pr_warn("Use of int in max_burst socket option deprecated\n");
- pr_warn("Use struct sctp_assoc_value instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in max_burst socket option deprecated.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
@@ -3167,9 +3322,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authchunk val;
- if (!sctp_auth_enable)
+ if (!ep->auth_enable)
return -EACCES;
if (optlen != sizeof(struct sctp_authchunk))
@@ -3178,15 +3334,15 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
return -EFAULT;
switch (val.sauth_chunk) {
- case SCTP_CID_INIT:
- case SCTP_CID_INIT_ACK:
- case SCTP_CID_SHUTDOWN_COMPLETE:
- case SCTP_CID_AUTH:
- return -EINVAL;
+ case SCTP_CID_INIT:
+ case SCTP_CID_INIT_ACK:
+ case SCTP_CID_SHUTDOWN_COMPLETE:
+ case SCTP_CID_AUTH:
+ return -EINVAL;
}
/* add this chunk id to the endpoint */
- return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk);
+ return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
}
/*
@@ -3199,24 +3355,20 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_hmacalgo *hmacs;
u32 idents;
int err;
- if (!sctp_auth_enable)
+ if (!ep->auth_enable)
return -EACCES;
if (optlen < sizeof(struct sctp_hmacalgo))
return -EINVAL;
- hmacs = kmalloc(optlen, GFP_KERNEL);
- if (!hmacs)
- return -ENOMEM;
-
- if (copy_from_user(hmacs, optval, optlen)) {
- err = -EFAULT;
- goto out;
- }
+ hmacs = memdup_user(optval, optlen);
+ if (IS_ERR(hmacs))
+ return PTR_ERR(hmacs);
idents = hmacs->shmac_num_idents;
if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
@@ -3225,7 +3377,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
goto out;
}
- err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs);
+ err = sctp_auth_ep_set_hmacs(ep, hmacs);
out:
kfree(hmacs);
return err;
@@ -3241,24 +3393,20 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authkey *authkey;
struct sctp_association *asoc;
int ret;
- if (!sctp_auth_enable)
+ if (!ep->auth_enable)
return -EACCES;
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
- authkey = kmalloc(optlen, GFP_KERNEL);
- if (!authkey)
- return -ENOMEM;
-
- if (copy_from_user(authkey, optval, optlen)) {
- ret = -EFAULT;
- goto out;
- }
+ authkey = memdup_user(optval, optlen);
+ if (IS_ERR(authkey))
+ return PTR_ERR(authkey);
if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
ret = -EINVAL;
@@ -3271,9 +3419,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
goto out;
}
- ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
+ ret = sctp_auth_set_key(ep, asoc, authkey);
out:
- kfree(authkey);
+ kzfree(authkey);
return ret;
}
@@ -3287,10 +3435,11 @@ static int sctp_setsockopt_active_key(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authkeyid val;
struct sctp_association *asoc;
- if (!sctp_auth_enable)
+ if (!ep->auth_enable)
return -EACCES;
if (optlen != sizeof(struct sctp_authkeyid))
@@ -3302,8 +3451,7 @@ static int sctp_setsockopt_active_key(struct sock *sk,
if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
- return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc,
- val.scact_keynumber);
+ return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
}
/*
@@ -3315,10 +3463,11 @@ static int sctp_setsockopt_del_key(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authkeyid val;
struct sctp_association *asoc;
- if (!sctp_auth_enable)
+ if (!ep->auth_enable)
return -EACCES;
if (optlen != sizeof(struct sctp_authkeyid))
@@ -3330,11 +3479,100 @@ static int sctp_setsockopt_del_key(struct sock *sk,
if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
- return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc,
- val.scact_keynumber);
+ return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
}
+/*
+ * 8.1.23 SCTP_AUTO_ASCONF
+ *
+ * This option will enable or disable the use of the automatic generation of
+ * ASCONF chunks to add and delete addresses to an existing association. Note
+ * that this option has two caveats namely: a) it only affects sockets that
+ * are bound to all addresses available to the SCTP stack, and b) the system
+ * administrator may have an overriding control that turns the ASCONF feature
+ * off no matter what setting the socket option may have.
+ * This option expects an integer boolean flag, where a non-zero value turns on
+ * the option, and a zero value turns off the option.
+ * Note. In this implementation, socket operation overrides default parameter
+ * being set by sysctl as well as FreeBSD implementation
+ */
+static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+ unsigned int optlen)
+{
+ int val;
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+ if (!sctp_is_ep_boundall(sk) && val)
+ return -EINVAL;
+ if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
+ return 0;
+
+ if (val == 0 && sp->do_auto_asconf) {
+ list_del(&sp->auto_asconf_list);
+ sp->do_auto_asconf = 0;
+ } else if (val && !sp->do_auto_asconf) {
+ list_add_tail(&sp->auto_asconf_list,
+ &sock_net(sk)->sctp.auto_asconf_splist);
+ sp->do_auto_asconf = 1;
+ }
+ return 0;
+}
+
+
+/*
+ * SCTP_PEER_ADDR_THLDS
+ *
+ * This option allows us to alter the partially failed threshold for one or all
+ * transports in an association. See Section 6.1 of:
+ * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
+ */
+static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_paddrthlds val;
+ struct sctp_transport *trans;
+ struct sctp_association *asoc;
+
+ if (optlen < sizeof(struct sctp_paddrthlds))
+ return -EINVAL;
+ if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
+ sizeof(struct sctp_paddrthlds)))
+ return -EFAULT;
+
+
+ if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+ if (!asoc)
+ return -ENOENT;
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports) {
+ if (val.spt_pathmaxrxt)
+ trans->pathmaxrxt = val.spt_pathmaxrxt;
+ trans->pf_retrans = val.spt_pathpfthld;
+ }
+
+ if (val.spt_pathmaxrxt)
+ asoc->pathmaxrxt = val.spt_pathmaxrxt;
+ asoc->pf_retrans = val.spt_pathpfthld;
+ } else {
+ trans = sctp_addr_id2transport(sk, &val.spt_address,
+ val.spt_assoc_id);
+ if (!trans)
+ return -ENOENT;
+
+ if (val.spt_pathmaxrxt)
+ trans->pathmaxrxt = val.spt_pathmaxrxt;
+ trans->pf_retrans = val.spt_pathpfthld;
+ }
+
+ return 0;
+}
/* API 6.2 setsockopt(), getsockopt()
*
@@ -3355,13 +3593,12 @@ static int sctp_setsockopt_del_key(struct sock *sk,
* optval - the buffer to store the value of the option.
* optlen - the size of the buffer.
*/
-SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen)
+static int sctp_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen)
{
int retval = 0;
- SCTP_DEBUG_PRINTK("sctp_setsockopt(sk: %p... optname: %d)\n",
- sk, optname);
+ pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
/* I can hardly begin to describe how wrong this is. This is
* so broken as to be worse than useless. The API draft
@@ -3375,7 +3612,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
goto out_nounlock;
}
- sctp_lock_sock(sk);
+ lock_sock(sk);
switch (optname) {
case SCTP_SOCKOPT_BINDX_ADD:
@@ -3420,7 +3657,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
break;
- case SCTP_DELAYED_ACK:
+ case SCTP_DELAYED_SACK:
retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
break;
case SCTP_PARTIAL_DELIVERY_POINT:
@@ -3482,12 +3719,18 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_AUTH_DELETE_KEY:
retval = sctp_setsockopt_del_key(sk, optval, optlen);
break;
+ case SCTP_AUTO_ASCONF:
+ retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
+ break;
+ case SCTP_PEER_ADDR_THLDS:
+ retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
}
- sctp_release_sock(sk);
+ release_sock(sk);
out_nounlock:
return retval;
@@ -3509,16 +3752,16 @@ out_nounlock:
*
* len: the size of the address.
*/
-SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr,
- int addr_len)
+static int sctp_connect(struct sock *sk, struct sockaddr *addr,
+ int addr_len)
{
int err = 0;
struct sctp_af *af;
- sctp_lock_sock(sk);
+ lock_sock(sk);
- SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d\n",
- __func__, sk, addr, addr_len);
+ pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
+ addr, addr_len);
/* Validate addr_len before calling common connect/connectx routine. */
af = sctp_get_af_specific(addr->sa_family);
@@ -3531,12 +3774,12 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr,
err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
}
- sctp_release_sock(sk);
+ release_sock(sk);
return err;
}
/* FIXME: Write comments. */
-SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags)
+static int sctp_disconnect(struct sock *sk, int flags)
{
return -EOPNOTSUPP; /* STUB */
}
@@ -3548,7 +3791,7 @@ SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags)
* descriptor will be returned from accept() to represent the newly
* formed association.
*/
-SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
+static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
{
struct sctp_sock *sp;
struct sctp_endpoint *ep;
@@ -3557,7 +3800,7 @@ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
long timeo;
int error = 0;
- sctp_lock_sock(sk);
+ lock_sock(sk);
sp = sctp_sk(sk);
ep = sp->ep;
@@ -3595,17 +3838,17 @@ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
out:
- sctp_release_sock(sk);
+ release_sock(sk);
*err = error;
return newsk;
}
/* The SCTP ioctl handler. */
-SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
int rc = -ENOTCONN;
- sctp_lock_sock(sk);
+ lock_sock(sk);
/*
* SEQPACKET-style sockets in LISTENING state are valid, for
@@ -3635,7 +3878,7 @@ SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
break;
}
out:
- sctp_release_sock(sk);
+ release_sock(sk);
return rc;
}
@@ -3643,12 +3886,12 @@ out:
* initialized the SCTP-specific portion of the sock.
* The sock structure should already be zero-filled memory.
*/
-SCTP_STATIC int sctp_init_sock(struct sock *sk)
+static int sctp_init_sock(struct sock *sk)
{
- struct sctp_endpoint *ep;
+ struct net *net = sock_net(sk);
struct sctp_sock *sp;
- SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk);
+ pr_debug("%s: sk:%p\n", __func__, sk);
sp = sctp_sk(sk);
@@ -3674,7 +3917,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->default_timetolive = 0;
sp->default_rcv_context = 0;
- sp->max_burst = sctp_max_burst;
+ sp->max_burst = net->sctp.max_burst;
+
+ sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
/* Initialize default setup parameters. These parameters
* can be modified with the SCTP_INITMSG socket option or
@@ -3682,24 +3927,24 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
*/
sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
sp->initmsg.sinit_max_instreams = sctp_max_instreams;
- sp->initmsg.sinit_max_attempts = sctp_max_retrans_init;
- sp->initmsg.sinit_max_init_timeo = sctp_rto_max;
+ sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
+ sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
/* Initialize default RTO related parameters. These parameters can
* be modified for with the SCTP_RTOINFO socket option.
*/
- sp->rtoinfo.srto_initial = sctp_rto_initial;
- sp->rtoinfo.srto_max = sctp_rto_max;
- sp->rtoinfo.srto_min = sctp_rto_min;
+ sp->rtoinfo.srto_initial = net->sctp.rto_initial;
+ sp->rtoinfo.srto_max = net->sctp.rto_max;
+ sp->rtoinfo.srto_min = net->sctp.rto_min;
/* Initialize default association related parameters. These parameters
* can be modified with the SCTP_ASSOCINFO socket option.
*/
- sp->assocparams.sasoc_asocmaxrxt = sctp_max_retrans_association;
+ sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
sp->assocparams.sasoc_number_peer_destinations = 0;
sp->assocparams.sasoc_peer_rwnd = 0;
sp->assocparams.sasoc_local_rwnd = 0;
- sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life;
+ sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
/* Initialize default event subscriptions. By default, all the
* options are off.
@@ -3709,10 +3954,10 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
/* Default Peer Address Parameters. These defaults can
* be modified via SCTP_PEER_ADDR_PARAMS
*/
- sp->hbinterval = sctp_hb_interval;
- sp->pathmaxrxt = sctp_max_retrans_path;
- sp->pathmtu = 0; // allow default discovery
- sp->sackdelay = sctp_sack_timeout;
+ sp->hbinterval = net->sctp.hb_interval;
+ sp->pathmaxrxt = net->sctp.max_retrans_path;
+ sp->pathmtu = 0; /* allow default discovery */
+ sp->sackdelay = net->sctp.sack_timeout;
sp->sackfreq = 2;
sp->param_flags = SPP_HB_ENABLE |
SPP_PMTUD_ENABLE |
@@ -3752,39 +3997,67 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
* change the data structure relationships, this may still
* be useful for storing pre-connect address information.
*/
- ep = sctp_endpoint_new(sk, GFP_KERNEL);
- if (!ep)
+ sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
+ if (!sp->ep)
return -ENOMEM;
- sp->ep = ep;
sp->hmac = NULL;
+ sk->sk_destruct = sctp_destruct_sock;
+
SCTP_DBG_OBJCNT_INC(sock);
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ sock_prot_inuse_add(net, sk->sk_prot, 1);
+ if (net->sctp.default_auto_asconf) {
+ list_add_tail(&sp->auto_asconf_list,
+ &net->sctp.auto_asconf_splist);
+ sp->do_auto_asconf = 1;
+ } else
+ sp->do_auto_asconf = 0;
local_bh_enable();
return 0;
}
/* Cleanup any SCTP per socket resources. */
-SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
+static void sctp_destroy_sock(struct sock *sk)
{
- struct sctp_endpoint *ep;
+ struct sctp_sock *sp;
- SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk);
+ pr_debug("%s: sk:%p\n", __func__, sk);
/* Release our hold on the endpoint. */
- ep = sctp_sk(sk)->ep;
- sctp_endpoint_free(ep);
+ sp = sctp_sk(sk);
+ /* This could happen during socket init, thus we bail out
+ * early, since the rest of the below is not setup either.
+ */
+ if (sp->ep == NULL)
+ return;
+
+ if (sp->do_auto_asconf) {
+ sp->do_auto_asconf = 0;
+ list_del(&sp->auto_asconf_list);
+ }
+ sctp_endpoint_free(sp->ep);
local_bh_disable();
percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
}
+/* Triggered when there are no references on the socket anymore */
+static void sctp_destruct_sock(struct sock *sk)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ /* Free up the HMAC transform. */
+ crypto_free_hash(sp->hmac);
+
+ inet_sock_destruct(sk);
+}
+
/* API 4.1.7 shutdown() - TCP Style Syntax
* int shutdown(int socket, int how);
*
@@ -3801,8 +4074,9 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
* Disables further send and receive operations
* and initiates the SCTP shutdown sequence.
*/
-SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
+static void sctp_shutdown(struct sock *sk, int how)
{
+ struct net *net = sock_net(sk);
struct sctp_endpoint *ep;
struct sctp_association *asoc;
@@ -3814,7 +4088,7 @@ SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
if (!list_empty(&ep->asocs)) {
asoc = list_entry(ep->asocs.next,
struct sctp_association, asocs);
- sctp_primitive_SHUTDOWN(asoc, NULL);
+ sctp_primitive_SHUTDOWN(net, asoc, NULL);
}
}
}
@@ -3885,9 +4159,9 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
goto out;
}
- SCTP_DEBUG_PRINTK("sctp_getsockopt_sctp_status(%d): %d %d %d\n",
- len, status.sstat_state, status.sstat_rwnd,
- status.sstat_assoc_id);
+ pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
+ __func__, len, status.sstat_state, status.sstat_rwnd,
+ status.sstat_assoc_id);
if (copy_to_user(optval, &status, len)) {
retval = -EFAULT;
@@ -3986,9 +4260,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
- if (len < sizeof(struct sctp_event_subscribe))
+ if (len <= 0)
return -EINVAL;
- len = sizeof(struct sctp_event_subscribe);
+ if (len > sizeof(struct sctp_event_subscribe))
+ len = sizeof(struct sctp_event_subscribe);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
@@ -4023,14 +4298,16 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
}
/* Helper routine to branch off an association to a new socket. */
-SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
- struct socket **sockp)
+int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
{
- struct sock *sk = asoc->base.sk;
+ struct sctp_association *asoc = sctp_id2assoc(sk, id);
struct socket *sock;
struct sctp_af *af;
int err = 0;
+ if (!asoc)
+ return -EINVAL;
+
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
@@ -4059,13 +4336,14 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
return err;
}
+EXPORT_SYMBOL(sctp_do_peeloff);
static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
sctp_peeloff_arg_t peeloff;
struct socket *newsock;
+ struct file *newfile;
int retval = 0;
- struct sctp_association *asoc;
if (len < sizeof(sctp_peeloff_arg_t))
return -EINVAL;
@@ -4073,35 +4351,40 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
if (copy_from_user(&peeloff, optval, len))
return -EFAULT;
- asoc = sctp_id2assoc(sk, peeloff.associd);
- if (!asoc) {
- retval = -EINVAL;
- goto out;
- }
-
- SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __func__, sk, asoc);
-
- retval = sctp_do_peeloff(asoc, &newsock);
+ retval = sctp_do_peeloff(sk, peeloff.associd, &newsock);
if (retval < 0)
goto out;
/* Map the socket to an unused fd that can be returned to the user. */
- retval = sock_map_fd(newsock, 0);
+ retval = get_unused_fd_flags(0);
if (retval < 0) {
sock_release(newsock);
goto out;
}
- SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n",
- __func__, sk, asoc, newsock->sk, retval);
+ newfile = sock_alloc_file(newsock, 0, NULL);
+ if (unlikely(IS_ERR(newfile))) {
+ put_unused_fd(retval);
+ sock_release(newsock);
+ return PTR_ERR(newfile);
+ }
+
+ pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
+ retval);
/* Return the fd mapped to the new socket. */
+ if (put_user(len, optlen)) {
+ fput(newfile);
+ put_unused_fd(retval);
+ return -EFAULT;
+ }
peeloff.sd = retval;
- if (put_user(len, optlen))
+ if (copy_to_user(optval, &peeloff, len)) {
+ fput(newfile);
+ put_unused_fd(retval);
return -EFAULT;
- if (copy_to_user(optval, &peeloff, len))
- retval = -EFAULT;
-
+ }
+ fd_install(retval, newfile);
out:
return retval;
}
@@ -4216,11 +4499,11 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
/* If an address other than INADDR_ANY is specified, and
* no transport is found, then the request is invalid.
*/
- if (!sctp_is_any(sk, ( union sctp_addr *)&params.spp_address)) {
+ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
trans = sctp_addr_id2transport(sk, &params.spp_address,
params.spp_assoc_id);
if (!trans) {
- SCTP_DEBUG_PRINTK("Failed no transport\n");
+ pr_debug("%s: failed no transport\n", __func__);
return -EINVAL;
}
}
@@ -4231,7 +4514,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
*/
asoc = sctp_id2assoc(sk, params.spp_assoc_id);
if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) {
- SCTP_DEBUG_PRINTK("Failed no association\n");
+ pr_debug("%s: failed no association\n", __func__);
return -EINVAL;
}
@@ -4322,12 +4605,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
if (copy_from_user(&params, optval, len))
return -EFAULT;
} else if (len == sizeof(struct sctp_assoc_value)) {
- pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
- pr_warn("Use struct sctp_sack_info instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
+ "Use struct sctp_sack_info instead\n",
+ current->comm, task_pid_nr(current));
if (copy_from_user(&params, optval, len))
return -EFAULT;
} else
- return - EINVAL;
+ return -EINVAL;
/* Get association, if sack_assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
@@ -4417,8 +4703,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
if (!asoc)
return -EINVAL;
- to = optval + offsetof(struct sctp_getaddrs,addrs);
- space_left = len - offsetof(struct sctp_getaddrs,addrs);
+ to = optval + offsetof(struct sctp_getaddrs, addrs);
+ space_left = len - offsetof(struct sctp_getaddrs, addrs);
list_for_each_entry(from, &asoc->peer.transport_addr_list,
transports) {
@@ -4450,9 +4736,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
union sctp_addr temp;
int cnt = 0;
int addrlen;
+ struct net *net = sock_net(sk);
rcu_read_lock();
- list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+ list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
if (!addr->valid)
continue;
@@ -4477,7 +4764,7 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
memcpy(to, &temp, addrlen);
to += addrlen;
- cnt ++;
+ cnt++;
space_left -= addrlen;
*bytes_copied += addrlen;
}
@@ -4526,8 +4813,8 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
bp = &asoc->base.bind_addr;
}
- to = optval + offsetof(struct sctp_getaddrs,addrs);
- space_left = len - offsetof(struct sctp_getaddrs,addrs);
+ to = optval + offsetof(struct sctp_getaddrs, addrs);
+ space_left = len - offsetof(struct sctp_getaddrs, addrs);
addrs = kmalloc(space_left, GFP_KERNEL);
if (!addrs)
@@ -4566,7 +4853,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
memcpy(buf, &temp, addrlen);
buf += addrlen;
bytes_copied += addrlen;
- cnt ++;
+ cnt++;
space_left -= addrlen;
}
@@ -4835,13 +5122,10 @@ static int sctp_getsockopt_associnfo(struct sock *sk, int len,
assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
assocparams.sasoc_local_rwnd = asoc->a_rwnd;
- assocparams.sasoc_cookie_life = (asoc->cookie_life.tv_sec
- * 1000) +
- (asoc->cookie_life.tv_usec
- / 1000);
+ assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
list_for_each(pos, &asoc->peer.transport_addr_list) {
- cnt ++;
+ cnt++;
}
assocparams.sasoc_number_peer_destinations = cnt;
@@ -4969,8 +5253,11 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
struct sctp_association *asoc;
if (len == sizeof(int)) {
- pr_warn("Use of int in maxseg socket option deprecated\n");
- pr_warn("Use struct sctp_assoc_value instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in maxseg socket option.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
@@ -5045,7 +5332,7 @@ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
if (copy_to_user(optval, &val, len))
return -EFAULT;
- return -ENOTSUPP;
+ return 0;
}
/*
@@ -5061,8 +5348,11 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
struct sctp_association *asoc;
if (len == sizeof(int)) {
- pr_warn("Use of int in max_burst socket option deprecated\n");
- pr_warn("Use struct sctp_assoc_value instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in max_burst socket option.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
@@ -5096,15 +5386,16 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_hmacalgo __user *p = (void __user *)optval;
struct sctp_hmac_algo_param *hmacs;
__u16 data_len = 0;
u32 num_idents;
- if (!sctp_auth_enable)
+ if (!ep->auth_enable)
return -EACCES;
- hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
+ hmacs = ep->auth_hmacs_list;
data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
if (len < sizeof(struct sctp_hmacalgo) + data_len)
@@ -5125,10 +5416,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
static int sctp_getsockopt_active_key(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authkeyid val;
struct sctp_association *asoc;
- if (!sctp_auth_enable)
+ if (!ep->auth_enable)
return -EACCES;
if (len < sizeof(struct sctp_authkeyid))
@@ -5143,7 +5435,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
if (asoc)
val.scact_keynumber = asoc->active_key_id;
else
- val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
+ val.scact_keynumber = ep->active_key_id;
len = sizeof(struct sctp_authkeyid);
if (put_user(len, optlen))
@@ -5157,6 +5449,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authchunks __user *p = (void __user *)optval;
struct sctp_authchunks val;
struct sctp_association *asoc;
@@ -5164,7 +5457,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
u32 num_chunks = 0;
char __user *to;
- if (!sctp_auth_enable)
+ if (!ep->auth_enable)
return -EACCES;
if (len < sizeof(struct sctp_authchunks))
@@ -5191,7 +5484,8 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
return -EFAULT;
num:
len = sizeof(struct sctp_authchunks) + num_chunks;
- if (put_user(len, optlen)) return -EFAULT;
+ if (put_user(len, optlen))
+ return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
return 0;
@@ -5200,6 +5494,7 @@ num:
static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authchunks __user *p = (void __user *)optval;
struct sctp_authchunks val;
struct sctp_association *asoc;
@@ -5207,7 +5502,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
u32 num_chunks = 0;
char __user *to;
- if (!sctp_auth_enable)
+ if (!ep->auth_enable)
return -EACCES;
if (len < sizeof(struct sctp_authchunks))
@@ -5222,9 +5517,9 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
return -EINVAL;
if (asoc)
- ch = (struct sctp_chunks_param*)asoc->c.auth_chunks;
+ ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
else
- ch = sctp_sk(sk)->ep->auth_chunk_list;
+ ch = ep->auth_chunk_list;
if (!ch)
goto num;
@@ -5277,14 +5572,193 @@ static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
return 0;
}
-SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
+/*
+ * 8.1.23 SCTP_AUTO_ASCONF
+ * See the corresponding setsockopt entry as description
+ */
+static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ int val = 0;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
+ val = 1;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * 8.2.6. Get the Current Identifiers of Associations
+ * (SCTP_GET_ASSOC_ID_LIST)
+ *
+ * This option gets the current list of SCTP association identifiers of
+ * the SCTP associations handled by a one-to-many style socket.
+ */
+static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ struct sctp_assoc_ids *ids;
+ u32 num = 0;
+
+ if (sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+
+ if (len < sizeof(struct sctp_assoc_ids))
+ return -EINVAL;
+
+ list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
+ num++;
+ }
+
+ if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
+ return -EINVAL;
+
+ len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
+
+ ids = kmalloc(len, GFP_KERNEL);
+ if (unlikely(!ids))
+ return -ENOMEM;
+
+ ids->gaids_number_of_ids = num;
+ num = 0;
+ list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
+ ids->gaids_assoc_id[num++] = asoc->assoc_id;
+ }
+
+ if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
+ kfree(ids);
+ return -EFAULT;
+ }
+
+ kfree(ids);
+ return 0;
+}
+
+/*
+ * SCTP_PEER_ADDR_THLDS
+ *
+ * This option allows us to fetch the partially failed threshold for one or all
+ * transports in an association. See Section 6.1 of:
+ * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
+ */
+static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
+ char __user *optval,
+ int len,
+ int __user *optlen)
+{
+ struct sctp_paddrthlds val;
+ struct sctp_transport *trans;
+ struct sctp_association *asoc;
+
+ if (len < sizeof(struct sctp_paddrthlds))
+ return -EINVAL;
+ len = sizeof(struct sctp_paddrthlds);
+ if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
+ return -EFAULT;
+
+ if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+ if (!asoc)
+ return -ENOENT;
+
+ val.spt_pathpfthld = asoc->pf_retrans;
+ val.spt_pathmaxrxt = asoc->pathmaxrxt;
+ } else {
+ trans = sctp_addr_id2transport(sk, &val.spt_address,
+ val.spt_assoc_id);
+ if (!trans)
+ return -ENOENT;
+
+ val.spt_pathmaxrxt = trans->pathmaxrxt;
+ val.spt_pathpfthld = trans->pf_retrans;
+ }
+
+ if (put_user(len, optlen) || copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * SCTP_GET_ASSOC_STATS
+ *
+ * This option retrieves local per endpoint statistics. It is modeled
+ * after OpenSolaris' implementation
+ */
+static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_stats sas;
+ struct sctp_association *asoc = NULL;
+
+ /* User must provide at least the assoc id */
+ if (len < sizeof(sctp_assoc_t))
+ return -EINVAL;
+
+ /* Allow the struct to grow and fill in as much as possible */
+ len = min_t(size_t, len, sizeof(sas));
+
+ if (copy_from_user(&sas, optval, len))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ sas.sas_rtxchunks = asoc->stats.rtxchunks;
+ sas.sas_gapcnt = asoc->stats.gapcnt;
+ sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
+ sas.sas_osacks = asoc->stats.osacks;
+ sas.sas_isacks = asoc->stats.isacks;
+ sas.sas_octrlchunks = asoc->stats.octrlchunks;
+ sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
+ sas.sas_oodchunks = asoc->stats.oodchunks;
+ sas.sas_iodchunks = asoc->stats.iodchunks;
+ sas.sas_ouodchunks = asoc->stats.ouodchunks;
+ sas.sas_iuodchunks = asoc->stats.iuodchunks;
+ sas.sas_idupchunks = asoc->stats.idupchunks;
+ sas.sas_opackets = asoc->stats.opackets;
+ sas.sas_ipackets = asoc->stats.ipackets;
+
+ /* New high max rto observed, will return 0 if not a single
+ * RTO update took place. obs_rto_ipaddr will be bogus
+ * in such a case
+ */
+ sas.sas_maxrto = asoc->stats.max_obs_rto;
+ memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
+ sizeof(struct sockaddr_storage));
+
+ /* Mark beginning of a new observation period */
+ asoc->stats.max_obs_rto = asoc->rto_min;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
+
+ if (copy_to_user(optval, &sas, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
{
int retval = 0;
int len;
- SCTP_DEBUG_PRINTK("sctp_getsockopt(sk: %p... optname: %d)\n",
- sk, optname);
+ pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
/* I can hardly begin to describe how wrong this is. This is
* so broken as to be worse than useless. The API draft
@@ -5302,7 +5776,7 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- sctp_lock_sock(sk);
+ lock_sock(sk);
switch (optname) {
case SCTP_STATUS:
@@ -5325,7 +5799,7 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
optlen);
break;
- case SCTP_DELAYED_ACK:
+ case SCTP_DELAYED_SACK:
retval = sctp_getsockopt_delayed_ack(sk, len, optval,
optlen);
break;
@@ -5409,12 +5883,24 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_GET_ASSOC_NUMBER:
retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
break;
+ case SCTP_GET_ASSOC_ID_LIST:
+ retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
+ break;
+ case SCTP_AUTO_ASCONF:
+ retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
+ break;
+ case SCTP_PEER_ADDR_THLDS:
+ retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
+ break;
+ case SCTP_GET_ASSOC_STATS:
+ retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
}
- sctp_release_sock(sk);
+ release_sock(sk);
return retval;
}
@@ -5441,45 +5927,47 @@ static void sctp_unhash(struct sock *sk)
* a fastreuse flag (FIXME: NPI ipg).
*/
static struct sctp_bind_bucket *sctp_bucket_create(
- struct sctp_bind_hashbucket *head, unsigned short snum);
+ struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
struct sctp_bind_hashbucket *head; /* hash list */
- struct sctp_bind_bucket *pp; /* hash list port iterator */
- struct hlist_node *node;
+ struct sctp_bind_bucket *pp;
unsigned short snum;
int ret;
snum = ntohs(addr->v4.sin_port);
- SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum);
- sctp_local_bh_disable();
+ pr_debug("%s: begins, snum:%d\n", __func__, snum);
+
+ local_bh_disable();
if (snum == 0) {
/* Search for an available port. */
int low, high, remaining, index;
unsigned int rover;
+ struct net *net = sock_net(sk);
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
- rover = net_random() % remaining + low;
+ rover = prandom_u32() % remaining + low;
do {
rover++;
if ((rover < low) || (rover > high))
rover = low;
- if (inet_is_reserved_local_port(rover))
+ if (inet_is_local_reserved_port(net, rover))
continue;
- index = sctp_phashfn(rover);
+ index = sctp_phashfn(sock_net(sk), rover);
head = &sctp_port_hashtable[index];
- sctp_spin_lock(&head->lock);
- sctp_for_each_hentry(pp, node, &head->chain)
- if (pp->port == rover)
+ spin_lock(&head->lock);
+ sctp_for_each_hentry(pp, &head->chain)
+ if ((pp->port == rover) &&
+ net_eq(sock_net(sk), pp->net))
goto next;
break;
next:
- sctp_spin_unlock(&head->lock);
+ spin_unlock(&head->lock);
} while (--remaining > 0);
/* Exhausted local port range during search? */
@@ -5499,10 +5987,10 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* to the port number (snum) - we detect that with the
* port iterator, pp being NULL.
*/
- head = &sctp_port_hashtable[sctp_phashfn(snum)];
- sctp_spin_lock(&head->lock);
- sctp_for_each_hentry(pp, node, &head->chain) {
- if (pp->port == snum)
+ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
+ spin_lock(&head->lock);
+ sctp_for_each_hentry(pp, &head->chain) {
+ if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
goto pp_found;
}
}
@@ -5518,7 +6006,8 @@ pp_found:
int reuse = sk->sk_reuse;
struct sock *sk2;
- SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
+ pr_debug("%s: found a possible match\n", __func__);
+
if (pp->fastreuse && sk->sk_reuse &&
sk->sk_state != SCTP_SS_LISTENING)
goto success;
@@ -5533,7 +6022,7 @@ pp_found:
* that this port/socket (sk) combination are already
* in an endpoint.
*/
- sk_for_each_bound(sk2, node, &pp->owner) {
+ sk_for_each_bound(sk2, &pp->owner) {
struct sctp_endpoint *ep2;
ep2 = sctp_sk(sk2)->ep;
@@ -5548,12 +6037,13 @@ pp_found:
goto fail_unlock;
}
}
- SCTP_DEBUG_PRINTK("sctp_get_port(): Found a match\n");
+
+ pr_debug("%s: found a match\n", __func__);
}
pp_not_found:
/* If there was a hash table miss, create a new port. */
ret = 1;
- if (!pp && !(pp = sctp_bucket_create(head, snum)))
+ if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
goto fail_unlock;
/* In either case (hit or miss), make sure fastreuse is 1 only
@@ -5582,10 +6072,10 @@ success:
ret = 0;
fail_unlock:
- sctp_spin_unlock(&head->lock);
+ spin_unlock(&head->lock);
fail:
- sctp_local_bh_enable();
+ local_bh_enable();
return ret;
}
@@ -5594,7 +6084,6 @@ fail:
*/
static int sctp_get_port(struct sock *sk, unsigned short snum)
{
- long ret;
union sctp_addr addr;
struct sctp_af *af = sctp_sk(sk)->pf->af;
@@ -5603,28 +6092,26 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
addr.v4.sin_port = htons(snum);
/* Note: sk->sk_num gets filled in if ephemeral port request. */
- ret = sctp_get_port_local(sk, &addr);
-
- return ret ? 1 : 0;
+ return !!sctp_get_port_local(sk, &addr);
}
/*
* Move a socket to LISTENING state.
*/
-SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
+static int sctp_listen_start(struct sock *sk, int backlog)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
struct crypto_hash *tfm = NULL;
+ char alg[32];
/* Allocate HMAC for generating cookie. */
- if (!sctp_sk(sk)->hmac && sctp_hmac_alg) {
- tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
+ if (!sp->hmac && sp->sctp_hmac_alg) {
+ sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
+ tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
- if (net_ratelimit()) {
- pr_info("failed to load transform for %s: %ld\n",
- sctp_hmac_alg, PTR_ERR(tfm));
- }
+ net_info_ratelimited("failed to load transform for %s: %ld\n",
+ sp->sctp_hmac_alg, PTR_ERR(tfm));
return -ENOSYS;
}
sctp_sk(sk)->hmac = tfm;
@@ -5680,7 +6167,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
if (unlikely(backlog < 0))
return err;
- sctp_lock_sock(sk);
+ lock_sock(sk);
/* Peeled-off sockets are not allowed to listen(). */
if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
@@ -5713,7 +6200,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
err = 0;
out:
- sctp_release_sock(sk);
+ release_sock(sk);
return err;
}
@@ -5749,7 +6236,8 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
/* Is there any exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR;
+ mask |= POLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
@@ -5787,7 +6275,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
********************************************************************/
static struct sctp_bind_bucket *sctp_bucket_create(
- struct sctp_bind_hashbucket *head, unsigned short snum)
+ struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
{
struct sctp_bind_bucket *pp;
@@ -5797,6 +6285,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
pp->port = snum;
pp->fastreuse = 0;
INIT_HLIST_HEAD(&pp->owner);
+ pp->net = net;
hlist_add_head(&pp->node, &head->chain);
}
return pp;
@@ -5816,23 +6305,24 @@ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
static inline void __sctp_put_port(struct sock *sk)
{
struct sctp_bind_hashbucket *head =
- &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)];
+ &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
+ inet_sk(sk)->inet_num)];
struct sctp_bind_bucket *pp;
- sctp_spin_lock(&head->lock);
+ spin_lock(&head->lock);
pp = sctp_sk(sk)->bind_hash;
__sk_del_bind_node(sk);
sctp_sk(sk)->bind_hash = NULL;
inet_sk(sk)->inet_num = 0;
sctp_bucket_destroy(pp);
- sctp_spin_unlock(&head->lock);
+ spin_unlock(&head->lock);
}
void sctp_put_port(struct sock *sk)
{
- sctp_local_bh_disable();
+ local_bh_disable();
__sctp_put_port(sk);
- sctp_local_bh_enable();
+ local_bh_enable();
}
/*
@@ -5895,8 +6385,7 @@ static int sctp_autobind(struct sock *sk)
* msg_control
* points here
*/
-SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
- sctp_cmsgs_t *cmsgs)
+static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
{
struct cmsghdr *cmsg;
struct msghdr *my_msg = (struct msghdr *)msg;
@@ -5971,7 +6460,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
* Note: This function is the same function as in core/datagram.c
* with a few modifications to make lksctp work.
*/
-static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
+static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
{
int error;
DEFINE_WAIT(wait);
@@ -6008,9 +6497,9 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
* does not fit in the user's buffer, but this seems to be the
* only way to honor MSG_DONTWAIT realistically.
*/
- sctp_release_sock(sk);
+ release_sock(sk);
*timeo_p = schedule_timeout(*timeo_p);
- sctp_lock_sock(sk);
+ lock_sock(sk);
ready:
finish_wait(sk_sleep(sk), &wait);
@@ -6038,8 +6527,8 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
timeo = sock_rcvtimeo(sk, noblock);
- SCTP_DEBUG_PRINTK("Timeout: timeo: %ld, MAX: %ld.\n",
- timeo, MAX_SCHEDULE_TIMEOUT);
+ pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
+ MAX_SCHEDULE_TIMEOUT);
do {
/* Again only user level code calls this function,
@@ -6070,6 +6559,10 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
+ if (sk_can_busy_loop(sk) &&
+ sk_busy_loop(sk, noblock))
+ continue;
+
/* User doesn't want to wait. */
error = -EAGAIN;
if (!timeo)
@@ -6094,21 +6587,62 @@ static void __sctp_write_space(struct sctp_association *asoc)
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
+ wait_queue_head_t *wq = sk_sleep(sk);
+
+ if (wq && waitqueue_active(wq))
+ wake_up_interruptible(wq);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
- if (sock->wq->fasync_list &&
- !(sk->sk_shutdown & SEND_SHUTDOWN))
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock,
SOCK_WAKE_SPACE, POLL_OUT);
}
}
}
+static void sctp_wake_up_waiters(struct sock *sk,
+ struct sctp_association *asoc)
+{
+ struct sctp_association *tmp = asoc;
+
+ /* We do accounting for the sndbuf space per association,
+ * so we only need to wake our own association.
+ */
+ if (asoc->ep->sndbuf_policy)
+ return __sctp_write_space(asoc);
+
+ /* If association goes down and is just flushing its
+ * outq, then just normally notify others.
+ */
+ if (asoc->base.dead)
+ return sctp_write_space(sk);
+
+ /* Accounting for the sndbuf space is per socket, so we
+ * need to wake up others, try to be fair and in case of
+ * other associations, let them have a go first instead
+ * of just doing a sctp_write_space() call.
+ *
+ * Note that we reach sctp_wake_up_waiters() only when
+ * associations free up queued chunks, thus we are under
+ * lock and the list of associations on a socket is
+ * guaranteed not to change.
+ */
+ for (tmp = list_next_entry(tmp, asocs); 1;
+ tmp = list_next_entry(tmp, asocs)) {
+ /* Manually skip the head element. */
+ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
+ continue;
+ /* Wake up association. */
+ __sctp_write_space(tmp);
+ /* We've reached the end. */
+ if (tmp == asoc)
+ break;
+ }
+}
+
/* Do accounting for the sndbuf space.
* Decrement the used sndbuf space of the corresponding association by the
* data size which was just transmitted(freed).
@@ -6136,7 +6670,7 @@ static void sctp_wfree(struct sk_buff *skb)
sk_mem_uncharge(sk, skb->truesize);
sock_wfree(skb);
- __sctp_write_space(asoc);
+ sctp_wake_up_waiters(sk, asoc);
sctp_association_put(asoc);
}
@@ -6169,8 +6703,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
- SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n",
- asoc, (long)(*timeo_p), msg_len);
+ pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
+ *timeo_p, msg_len);
/* Increment the association's refcnt. */
sctp_association_hold(asoc);
@@ -6192,10 +6726,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
- sctp_release_sock(sk);
+ release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
BUG_ON(sk != asoc->base.sk);
- sctp_lock_sock(sk);
+ lock_sock(sk);
*timeo_p = current_timeo;
}
@@ -6221,7 +6755,7 @@ do_nonblock:
goto out;
}
-void sctp_data_ready(struct sock *sk, int len)
+void sctp_data_ready(struct sock *sk)
{
struct socket_wq *wq;
@@ -6276,8 +6810,7 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
- SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __func__, asoc,
- (long)(*timeo_p));
+ pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
/* Increment the association's refcnt. */
sctp_association_hold(asoc);
@@ -6301,9 +6834,9 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
- sctp_release_sock(sk);
+ release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
- sctp_lock_sock(sk);
+ lock_sock(sk);
*timeo_p = current_timeo;
}
@@ -6346,9 +6879,9 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) {
- sctp_release_sock(sk);
+ release_sock(sk);
timeo = schedule_timeout(timeo);
- sctp_lock_sock(sk);
+ lock_sock(sk);
}
err = -EINVAL;
@@ -6381,9 +6914,9 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs))
break;
- sctp_release_sock(sk);
+ release_sock(sk);
timeout = schedule_timeout(timeout);
- sctp_lock_sock(sk);
+ lock_sock(sk);
} while (!signal_pending(current) && timeout);
finish_wait(sk_sleep(sk), &wait);
@@ -6413,11 +6946,12 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newsk->sk_type = sk->sk_type;
newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
newsk->sk_flags = sk->sk_flags;
- newsk->sk_no_check = sk->sk_no_check;
+ newsk->sk_no_check_tx = sk->sk_no_check_tx;
+ newsk->sk_no_check_rx = sk->sk_no_check_rx;
newsk->sk_reuse = sk->sk_reuse;
newsk->sk_shutdown = sk->sk_shutdown;
- newsk->sk_destruct = inet_sock_destruct;
+ newsk->sk_destruct = sctp_destruct_sock;
newsk->sk_family = sk->sk_family;
newsk->sk_protocol = IPPROTO_SCTP;
newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
@@ -6460,6 +6994,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
struct sctp_bind_hashbucket *head;
+ struct list_head tmplist;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
@@ -6467,7 +7002,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_sndbuf = oldsk->sk_sndbuf;
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
/* Brute force copy old sctp opt. */
- inet_sk_copy_descendant(newsk, oldsk);
+ if (oldsp->do_auto_asconf) {
+ memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
+ inet_sk_copy_descendant(newsk, oldsk);
+ memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
+ } else
+ inet_sk_copy_descendant(newsk, oldsk);
/* Restore the ep value that was overwritten with the above structure
* copy.
@@ -6476,15 +7016,16 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsp->hmac = NULL;
/* Hook this new socket in to the bind_hash list. */
- head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)];
- sctp_local_bh_disable();
- sctp_spin_lock(&head->lock);
+ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
+ inet_sk(oldsk)->inet_num)];
+ local_bh_disable();
+ spin_lock(&head->lock);
pp = sctp_sk(oldsk)->bind_hash;
sk_add_bind_node(newsk, &pp->owner);
sctp_sk(newsk)->bind_hash = pp;
inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
- sctp_spin_unlock(&head->lock);
- sctp_local_bh_enable();
+ spin_unlock(&head->lock);
+ local_bh_enable();
/* Copy the bind_addr list from the original endpoint to the new
* endpoint so that we can handle restarts properly
@@ -6573,7 +7114,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
- sctp_release_sock(newsk);
+ release_sock(newsk);
}
@@ -6608,7 +7149,7 @@ struct proto sctp_prot = {
.sockets_allocated = &sctp_sockets_allocated,
};
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct proto sctpv6_prot = {
.name = "SCTPv6",
@@ -6639,4 +7180,4 @@ struct proto sctpv6_prot = {
.memory_allocated = &sctp_memory_allocated,
.sockets_allocated = &sctp_sockets_allocated,
};
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */