diff options
Diffstat (limited to 'net')
32 files changed, 224 insertions, 168 deletions
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 2e341de3e76..901eff7ebe7 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -213,7 +213,7 @@ static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, in return kernel_sendmsg(sock, &msg, &iv, 1, len); } -static int cmtp_process_transmit(struct cmtp_session *session) +static void cmtp_process_transmit(struct cmtp_session *session) { struct sk_buff *skb, *nskb; unsigned char *hdr; @@ -223,7 +223,7 @@ static int cmtp_process_transmit(struct cmtp_session *session) if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { BT_ERR("Can't allocate memory for new frame"); - return -ENOMEM; + return; } while ((skb = skb_dequeue(&session->transmit))) { @@ -275,8 +275,6 @@ static int cmtp_process_transmit(struct cmtp_session *session) cmtp_send_frame(session, nskb->data, nskb->len); kfree_skb(nskb); - - return skb_queue_len(&session->transmit); } static int cmtp_session(void *arg) diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index affbc55462e..de8af5f4239 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -428,7 +428,7 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) return kernel_sendmsg(sock, &msg, &iv, 1, len); } -static int hidp_process_transmit(struct hidp_session *session) +static void hidp_process_transmit(struct hidp_session *session) { struct sk_buff *skb; @@ -453,9 +453,6 @@ static int hidp_process_transmit(struct hidp_session *session) hidp_set_timer(session); kfree_skb(skb); } - - return skb_queue_len(&session->ctrl_transmit) + - skb_queue_len(&session->intr_transmit); } static int hidp_session(void *arg) diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index f3f6355a278..63a123c5c41 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -590,8 +590,11 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo) for (;;) { set_current_state(TASK_INTERRUPTIBLE); - if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || - signal_pending(current) || !timeo) + if (!skb_queue_empty(&sk->sk_receive_queue) || + sk->sk_err || + (sk->sk_shutdown & RCV_SHUTDOWN) || + signal_pending(current) || + !timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 6d689200bcf..6304590fd36 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -781,7 +781,7 @@ static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) BT_DBG("tty %p dev %p", tty, dev); - if (skb_queue_len(&dlc->tx_queue)) + if (!skb_queue_empty(&dlc->tx_queue)) return dlc->mtu; return 0; diff --git a/net/core/dev.c b/net/core/dev.c index 7f5f62c6511..ff9dc029233 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1127,7 +1127,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) extern void skb_release_data(struct sk_buff *); /* Keep head the same: replace data */ -int __skb_linearize(struct sk_buff *skb, int gfp_mask) +int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) { unsigned int size; u8 *data; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 733deee24b9..d9f7b06fe88 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -129,7 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ -struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) +struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask) { struct sk_buff *skb; u8 *data; @@ -182,7 +182,8 @@ nodata: * %GFP_ATOMIC. */ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, - unsigned int size, int gfp_mask) + unsigned int size, + unsigned int __nocast gfp_mask) { struct sk_buff *skb; u8 *data; @@ -322,7 +323,7 @@ void __kfree_skb(struct sk_buff *skb) * %GFP_ATOMIC. */ -struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) +struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) { struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); @@ -460,7 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) * header is going to be modified. Use pskb_copy() instead. */ -struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) +struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) { int headerlen = skb->data - skb->head; /* @@ -499,7 +500,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) * The returned buffer has a reference count of 1. */ -struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) +struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) { /* * Allocate the copy buffer @@ -557,7 +558,8 @@ out: * reloaded after call to this function. */ -int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) +int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, + unsigned int __nocast gfp_mask) { int i; u8 *data; @@ -647,7 +649,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) * only by netfilter in the cases when checksum is recalculated? --ANK */ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, - int newheadroom, int newtailroom, int gfp_mask) + int newheadroom, int newtailroom, + unsigned int __nocast gfp_mask) { /* * Allocate the copy buffer diff --git a/net/core/sock.c b/net/core/sock.c index a6ec3ada7f9..8b35ccdc2b3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -622,7 +622,8 @@ lenout: * @prot: struct proto associated with this new sock instance * @zero_it: if we should zero the newly allocated sock */ -struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it) +struct sock *sk_alloc(int family, unsigned int __nocast priority, + struct proto *prot, int zero_it) { struct sock *sk = NULL; kmem_cache_t *slab = prot->slab; @@ -750,7 +751,8 @@ unsigned long sock_i_ino(struct sock *sk) /* * Allocate a skb from the socket's send buffer. */ -struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority) +struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, + unsigned int __nocast priority) { if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { struct sk_buff * skb = alloc_skb(size, priority); @@ -765,7 +767,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int /* * Allocate a skb from the socket's receive buffer. */ -struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) +struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, + unsigned int __nocast priority) { if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { struct sk_buff *skb = alloc_skb(size, priority); @@ -780,7 +783,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int /* * Allocate a memory block from the socket's option memory buffer. */ -void *sock_kmalloc(struct sock *sk, int size, int priority) +void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority) { if ((unsigned)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 29bb3cd2196..96a02800cd2 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -536,7 +536,7 @@ static void dn_keepalive(struct sock *sk) * we are double checking that we are not sending too * many of these keepalive frames. */ - if (skb_queue_len(&scp->other_xmit_queue) == 0) + if (skb_queue_empty(&scp->other_xmit_queue)) dn_nsp_send_link(sk, DN_NOCHANGE, 0); } @@ -1191,7 +1191,7 @@ static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table struct dn_scp *scp = DN_SK(sk); int mask = datagram_poll(file, sock, wait); - if (skb_queue_len(&scp->other_receive_queue)) + if (!skb_queue_empty(&scp->other_receive_queue)) mask |= POLLRDBAND; return mask; @@ -1214,7 +1214,7 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCATMARK: lock_sock(sk); - val = (skb_queue_len(&scp->other_receive_queue) != 0); + val = !skb_queue_empty(&scp->other_receive_queue); if (scp->state != DN_RUN) val = -ENOTCONN; release_sock(sk); @@ -1630,7 +1630,7 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int int len = 0; if (flags & MSG_OOB) - return skb_queue_len(q) ? 1 : 0; + return !skb_queue_empty(q) ? 1 : 0; while(skb != (struct sk_buff *)q) { struct dn_skb_cb *cb = DN_SKB_CB(skb); @@ -1707,7 +1707,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, if (sk->sk_err) goto out; - if (skb_queue_len(&scp->other_receive_queue)) { + if (!skb_queue_empty(&scp->other_receive_queue)) { if (!(flags & MSG_OOB)) { msg->msg_flags |= MSG_OOB; if (!scp->other_report) { diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 42abbf3f524..8cce1fdbda9 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -342,7 +342,8 @@ int dn_nsp_xmit_timeout(struct sock *sk) dn_nsp_output(sk); - if (skb_queue_len(&scp->data_xmit_queue) || skb_queue_len(&scp->other_xmit_queue)) + if (!skb_queue_empty(&scp->data_xmit_queue) || + !skb_queue_empty(&scp->other_xmit_queue)) scp->persist = dn_nsp_persist(sk); return 0; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index cb759484979..279f57abfec 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -970,7 +970,8 @@ int icmp_rcv(struct sk_buff *skb) * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently * discarded if to broadcast/multicast. */ - if (icmph->type == ICMP_ECHO && + if ((icmph->type == ICMP_ECHO || + icmph->type == ICMP_TIMESTAMP) && sysctl_icmp_echo_ignore_broadcasts) { goto error; } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 1f3183168a9..5088f90835a 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1615,9 +1615,10 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) { int err; u32 addr = imr->imr_multiaddr.s_addr; - struct ip_mc_socklist *iml, *i; + struct ip_mc_socklist *iml=NULL, *i; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); + int ifindex; int count = 0; if (!MULTICAST(addr)) @@ -1633,37 +1634,30 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) goto done; } - iml = (struct ip_mc_socklist *)sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); - err = -EADDRINUSE; + ifindex = imr->imr_ifindex; for (i = inet->mc_list; i; i = i->next) { - if (memcmp(&i->multi, imr, sizeof(*imr)) == 0) { - /* New style additions are reference counted */ - if (imr->imr_address.s_addr == 0) { - i->count++; - err = 0; - } + if (i->multi.imr_multiaddr.s_addr == addr && + i->multi.imr_ifindex == ifindex) goto done; - } count++; } err = -ENOBUFS; - if (iml == NULL || count >= sysctl_igmp_max_memberships) + if (count >= sysctl_igmp_max_memberships) + goto done; + iml = (struct ip_mc_socklist *)sock_kmalloc(sk,sizeof(*iml),GFP_KERNEL); + if (iml == NULL) goto done; + memcpy(&iml->multi, imr, sizeof(*imr)); iml->next = inet->mc_list; - iml->count = 1; iml->sflist = NULL; iml->sfmode = MCAST_EXCLUDE; inet->mc_list = iml; ip_mc_inc_group(in_dev, addr); - iml = NULL; err = 0; - done: rtnl_shunlock(); - if (iml) - sock_kfree_s(sk, iml, sizeof(*iml)); return err; } @@ -1693,30 +1687,25 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml, **imlp; + struct in_device *in_dev; + u32 group = imr->imr_multiaddr.s_addr; + u32 ifindex; rtnl_lock(); + in_dev = ip_mc_find_dev(imr); + if (!in_dev) { + rtnl_unlock(); + return -ENODEV; + } + ifindex = imr->imr_ifindex; for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { - if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr && - iml->multi.imr_address.s_addr==imr->imr_address.s_addr && - (!imr->imr_ifindex || iml->multi.imr_ifindex==imr->imr_ifindex)) { - struct in_device *in_dev; - - in_dev = inetdev_by_index(iml->multi.imr_ifindex); - if (in_dev) - (void) ip_mc_leave_src(sk, iml, in_dev); - if (--iml->count) { - rtnl_unlock(); - if (in_dev) - in_dev_put(in_dev); - return 0; - } + if (iml->multi.imr_multiaddr.s_addr == group && + iml->multi.imr_ifindex == ifindex) { + (void) ip_mc_leave_src(sk, iml, in_dev); *imlp = iml->next; - if (in_dev) { - ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr); - in_dev_put(in_dev); - } + ip_mc_dec_group(in_dev, group); rtnl_unlock(); sock_kfree_s(sk, iml, sizeof(*iml)); return 0; @@ -1736,6 +1725,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct struct in_device *in_dev = NULL; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *psl; + int leavegroup = 0; int i, j, rv; if (!MULTICAST(addr)) @@ -1755,15 +1745,20 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct err = -EADDRNOTAVAIL; for (pmc=inet->mc_list; pmc; pmc=pmc->next) { - if (memcmp(&pmc->multi, mreqs, 2*sizeof(__u32)) == 0) + if (pmc->multi.imr_multiaddr.s_addr == imr.imr_multiaddr.s_addr + && pmc->multi.imr_ifindex == imr.imr_ifindex) break; } - if (!pmc) /* must have a prior join */ + if (!pmc) { /* must have a prior join */ + err = -EINVAL; goto done; + } /* if a source filter was set, must be the same mode as before */ if (pmc->sflist) { - if (pmc->sfmode != omode) + if (pmc->sfmode != omode) { + err = -EINVAL; goto done; + } } else if (pmc->sfmode != omode) { /* allow mode switches for empty-set filters */ ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); @@ -1775,7 +1770,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct psl = pmc->sflist; if (!add) { if (!psl) - goto done; + goto done; /* err = -EADDRNOTAVAIL */ rv = !0; for (i=0; i<psl->sl_count; i++) { rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, @@ -1784,7 +1779,13 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct break; } if (rv) /* source not found */ + goto done; /* err = -EADDRNOTAVAIL */ + + /* special case - (INCLUDE, empty) == LEAVE_GROUP */ + if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { + leavegroup = 1; goto done; + } /* update the interface filter */ ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, @@ -1842,18 +1843,21 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct &mreqs->imr_sourceaddr, 1); done: rtnl_shunlock(); + if (leavegroup) + return ip_mc_leave_group(sk, &imr); return err; } int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) { - int err; + int err = 0; struct ip_mreqn imr; u32 addr = msf->imsf_multiaddr; struct ip_mc_socklist *pmc; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *newpsl, *psl; + int leavegroup = 0; if (!MULTICAST(addr)) return -EINVAL; @@ -1872,15 +1876,22 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) err = -ENODEV; goto done; } - err = -EADDRNOTAVAIL; + + /* special case - (INCLUDE, empty) == LEAVE_GROUP */ + if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) { + leavegroup = 1; + goto done; + } for (pmc=inet->mc_list; pmc; pmc=pmc->next) { if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && pmc->multi.imr_ifindex == imr.imr_ifindex) break; } - if (!pmc) /* must have a prior join */ + if (!pmc) { /* must have a prior join */ + err = -EINVAL; goto done; + } if (msf->imsf_numsrc) { newpsl = (struct ip_sf_socklist *)sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc), GFP_KERNEL); @@ -1909,8 +1920,11 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) 0, NULL, 0); pmc->sflist = newpsl; pmc->sfmode = msf->imsf_fmode; + err = 0; done: rtnl_shunlock(); + if (leavegroup) + err = ip_mc_leave_group(sk, &imr); return err; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f8b172f8981..fc7c481d0d7 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -677,11 +677,11 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, mreq.imr_address.s_addr = mreqs.imr_interface; mreq.imr_ifindex = 0; err = ip_mc_join_group(sk, &mreq); - if (err) + if (err && err != -EADDRINUSE) break; omode = MCAST_INCLUDE; add = 1; - } else /*IP_DROP_SOURCE_MEMBERSHIP */ { + } else /* IP_DROP_SOURCE_MEMBERSHIP */ { omode = MCAST_INCLUDE; add = 0; } @@ -754,7 +754,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, mreq.imr_address.s_addr = 0; mreq.imr_ifindex = greqs.gsr_interface; err = ip_mc_join_group(sk, &mreq); - if (err) + if (err && err != -EADDRINUSE) break; greqs.gsr_interface = mreq.imr_ifindex; omode = MCAST_INCLUDE; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 29894c74916..ddb6ce4ecff 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1105,7 +1105,7 @@ static void tcp_prequeue_process(struct sock *sk) struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); - NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); + NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); /* RX process wants to run with disabled BHs, though it is not * necessary */ @@ -1369,7 +1369,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * is not empty. It is more elegant, but eats cycles, * unfortunately. */ - if (skb_queue_len(&tp->ucopy.prequeue)) + if (!skb_queue_empty(&tp->ucopy.prequeue)) goto do_prequeue; /* __ Set realtime policy in scheduler __ */ @@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } if (tp->rcv_nxt == tp->copied_seq && - skb_queue_len(&tp->ucopy.prequeue)) { + !skb_queue_empty(&tp->ucopy.prequeue)) { do_prequeue: tcp_prequeue_process(sk); @@ -1476,7 +1476,7 @@ skip_copy: } while (len > 0); if (user_recv) { - if (skb_queue_len(&tp->ucopy.prequeue)) { + if (!skb_queue_empty(&tp->ucopy.prequeue)) { int chunk; tp->ucopy.len = copied > 0 ? len : 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8de2f1071c2..53a8a5399f1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2802,7 +2802,7 @@ static void tcp_sack_remove(struct tcp_sock *tp) int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ - if (skb_queue_len(&tp->out_of_order_queue) == 0) { + if (skb_queue_empty(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; tp->rx_opt.eff_sacks = tp->rx_opt.dsack; return; @@ -2935,13 +2935,13 @@ queue_and_out: if(th->fin) tcp_fin(skb, sk, th); - if (skb_queue_len(&tp->out_of_order_queue)) { + if (!skb_queue_empty(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ - if (!skb_queue_len(&tp->out_of_order_queue)) + if (skb_queue_empty(&tp->out_of_order_queue)) tp->ack.pingpong = 0; } @@ -3249,9 +3249,8 @@ static int tcp_prune_queue(struct sock *sk) * This must not ever occur. */ /* First, purge the out_of_order queue. */ - if (skb_queue_len(&tp->out_of_order_queue)) { - NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED, - skb_queue_len(&tp->out_of_order_queue)); + if (!skb_queue_empty(&tp->out_of_order_queue)) { + NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e041d057ec8..e3f8ea1bfa9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1613,7 +1613,7 @@ void tcp_send_fin(struct sock *sk) * was unread data in the receive queue. This behavior is recommended * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM */ -void tcp_send_active_reset(struct sock *sk, int priority) +void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b127b449856..0084227438c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -231,11 +231,10 @@ static void tcp_delack_timer(unsigned long data) } tp->ack.pending &= ~TCP_ACK_TIMER; - if (skb_queue_len(&tp->ucopy.prequeue)) { + if (!skb_queue_empty(&tp->ucopy.prequeue)) { struct sk_buff *skb; - NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, - skb_queue_len(&tp->ucopy.prequeue)); + NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk->sk_backlog_rcv(sk, skb); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 562fcd14fde..29fed6e58d0 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -281,7 +281,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) } write_unlock_bh(&ipv6_sk_mc_lock); - return -ENOENT; + return -EADDRNOTAVAIL; } static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex) @@ -386,12 +386,16 @@ int ip6_mc_source(int add, int omode, struct sock *sk, if (ipv6_addr_equal(&pmc->addr, group)) break; } - if (!pmc) /* must have a prior join */ + if (!pmc) { /* must have a prior join */ + err = -EINVAL; goto done; + } /* if a source filter was set, must be the same mode as before */ if (pmc->sflist) { - if (pmc->sfmode != omode) + if (pmc->sfmode != omode) { + err = -EINVAL; goto done; + } } else if (pmc->sfmode != omode) { /* allow mode switches for empty-set filters */ ip6_mc_add_src(idev, group, omode, 0, NULL, 0); @@ -402,7 +406,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, psl = pmc->sflist; if (!add) { if (!psl) - goto done; + goto done; /* err = -EADDRNOTAVAIL */ rv = !0; for (i=0; i<psl->sl_count; i++) { rv = memcmp(&psl->sl_addr[i], source, @@ -411,7 +415,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, break; } if (rv) /* source not found */ - goto done; + goto done; /* err = -EADDRNOTAVAIL */ /* special case - (INCLUDE, empty) == LEAVE_GROUP */ if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { @@ -488,6 +492,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) struct inet6_dev *idev; struct ipv6_pinfo *inet6 = inet6_sk(sk); struct ip6_sf_socklist *newpsl, *psl; + int leavegroup = 0; int i, err; group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; @@ -503,7 +508,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) if (!idev) return -ENODEV; dev = idev->dev; - err = -EADDRNOTAVAIL; + + err = 0; + if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { + leavegroup = 1; + goto done; + } for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { if (pmc->ifindex != gsf->gf_interface) @@ -511,8 +521,10 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) if (ipv6_addr_equal(&pmc->addr, group)) break; } - if (!pmc) /* must have a prior join */ + if (!pmc) { /* must have a prior join */ + err = -EINVAL; goto done; + } if (gsf->gf_numsrc) { newpsl = (struct ip6_sf_socklist *)sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc), GFP_ATOMIC); @@ -544,10 +556,13 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); pmc->sflist = newpsl; pmc->sfmode = gsf->gf_fmode; + err = 0; done: read_unlock_bh(&idev->lock); in6_dev_put(idev); dev_put(dev); + if (leavegroup) + err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); return err; } diff --git a/net/irda/irlap.c b/net/irda/irlap.c index 046ad0750e4..7029618f571 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -445,9 +445,8 @@ void irlap_disconnect_request(struct irlap_cb *self) IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Don't disconnect until all data frames are successfully sent */ - if (skb_queue_len(&self->txq) > 0) { + if (!skb_queue_empty(&self->txq)) { self->disconnect_pending = TRUE; - return; } diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index 1cd89f5f3b7..a505b545760 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c @@ -191,7 +191,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout) * Send out the RR frames faster if our own transmit queue is empty, or * if the peer is busy. The effect is a much faster conversation */ - if ((skb_queue_len(&self->txq) == 0) || (self->remote_busy)) { + if (skb_queue_empty(&self->txq) || self->remote_busy) { if (self->fast_RR == TRUE) { /* * Assert that the fast poll timer has not reached the @@ -263,7 +263,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, skb_queue_len(&self->txq)); - if (skb_queue_len(&self->txq)) { + if (!skb_queue_empty(&self->txq)) { /* Prevent race conditions with irlap_data_request() */ self->local_busy = TRUE; @@ -1074,7 +1074,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* Window has been adjusted for the max packet * size, so much simpler... - Jean II */ - nextfit = (skb_queue_len(&self->txq) > 0); + nextfit = !skb_queue_empty(&self->txq); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Send data with poll bit cleared only if window > 1 @@ -1814,7 +1814,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* Window has been adjusted for the max packet * size, so much simpler... - Jean II */ - nextfit = (skb_queue_len(&self->txq) > 0); + nextfit = !skb_queue_empty(&self->txq); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Send data with final bit cleared only if window > 1 @@ -1937,7 +1937,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, irlap_data_indication(self, skb, FALSE); /* Any pending data requests? */ - if ((skb_queue_len(&self->txq) > 0) && + if (!skb_queue_empty(&self->txq) && (self->window > 0)) { self->ack_required = TRUE; @@ -2038,7 +2038,7 @@ static |