aboutsummaryrefslogtreecommitdiff
path: root/net/iucv/af_iucv.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r--net/iucv/af_iucv.c95
1 files changed, 47 insertions, 48 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index cd6f7a991d8..7a95fa4a3de 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =
#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
-/* macros to set/get socket control buffer at correct offset */
-#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
-#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
-#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
-#define CB_TRGCLS_LEN (TRGCLS_SIZE)
-
#define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
DEFINE_WAIT(__wait); \
@@ -156,14 +150,13 @@ static int afiucv_pm_freeze(struct device *dev)
{
struct iucv_sock *iucv;
struct sock *sk;
- struct hlist_node *node;
int err = 0;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_freeze\n");
#endif
read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, node, &iucv_sk_list.head) {
+ sk_for_each(sk, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
switch (sk->sk_state) {
case IUCV_DISCONN:
@@ -194,13 +187,12 @@ static int afiucv_pm_freeze(struct device *dev)
static int afiucv_pm_restore_thaw(struct device *dev)
{
struct sock *sk;
- struct hlist_node *node;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
#endif
read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, node, &iucv_sk_list.head) {
+ sk_for_each(sk, &iucv_sk_list.head) {
switch (sk->sk_state) {
case IUCV_CONNECTED:
sk->sk_err = EPIPE;
@@ -390,9 +382,8 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
static struct sock *__iucv_get_sock_by_name(char *nm)
{
struct sock *sk;
- struct hlist_node *node;
- sk_for_each(sk, node, &iucv_sk_list.head)
+ sk_for_each(sk, &iucv_sk_list.head)
if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
return sk;
@@ -691,6 +682,18 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
return NULL;
}
+static void __iucv_auto_name(struct iucv_sock *iucv)
+{
+ char name[12];
+
+ sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
+ while (__iucv_get_sock_by_name(name)) {
+ sprintf(name, "%08x",
+ atomic_inc_return(&iucv_sk_list.autobind_name));
+ }
+ memcpy(iucv->src_name, name, 8);
+}
+
/* Bind an unbound socket */
static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
@@ -733,8 +736,12 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if (!memcmp(dev->perm_addr, uid, 8)) {
- memcpy(iucv->src_name, sa->siucv_name, 8);
memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
+ /* Check for unitialized siucv_name */
+ if (strncmp(sa->siucv_name, " ", 8) == 0)
+ __iucv_auto_name(iucv);
+ else
+ memcpy(iucv->src_name, sa->siucv_name, 8);
sk->sk_bound_dev_if = dev->ifindex;
iucv->hs_dev = dev;
dev_hold(dev);
@@ -772,7 +779,6 @@ done:
static int iucv_sock_autobind(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
- char name[12];
int err = 0;
if (unlikely(!pr_iucv))
@@ -781,17 +787,9 @@ static int iucv_sock_autobind(struct sock *sk)
memcpy(iucv->src_user_id, iucv_userid, 8);
write_lock_bh(&iucv_sk_list.lock);
-
- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
- while (__iucv_get_sock_by_name(name)) {
- sprintf(name, "%08x",
- atomic_inc_return(&iucv_sk_list.autobind_name));
- }
-
+ __iucv_auto_name(iucv);
write_unlock_bh(&iucv_sk_list.lock);
- memcpy(&iucv->src_name, name, 8);
-
if (!iucv->msglimit)
iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
@@ -1144,7 +1142,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
- memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+ IUCV_SKB_CB(skb)->tag = txmsg.tag;
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_sent);
@@ -1227,7 +1225,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
return -ENOMEM;
/* copy target class to control buffer of new skb */
- memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
+ IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
/* copy data fragment */
memcpy(nskb->data, skb->data + copied, size);
@@ -1259,7 +1257,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
/* store msg target class in the second 4 bytes of skb ctrl buffer */
/* Note: the first 4 bytes are reserved for msg tag */
- memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
+ IUCV_SKB_CB(skb)->class = msg->class;
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
@@ -1295,6 +1293,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
}
}
+ IUCV_SKB_CB(skb)->offset = 0;
if (sock_queue_rcv_skb(sk, skb))
skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
}
@@ -1330,6 +1329,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
unsigned int copied, rlen;
struct sk_buff *skb, *rskb, *cskb;
int err = 0;
+ u32 offset;
if ((sk->sk_state == IUCV_DISCONN) &&
skb_queue_empty(&iucv->backlog_skb_q) &&
@@ -1349,13 +1349,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
- rlen = skb->len; /* real length of skb */
+ offset = IUCV_SKB_CB(skb)->offset;
+ rlen = skb->len - offset; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
if (!rlen)
sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb;
- if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
+ if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
@@ -1373,7 +1374,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
* get the trgcls from the control buffer of the skb due to
* fragmentation of original iucv message. */
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
- CB_TRGCLS_LEN, CB_TRGCLS(skb));
+ sizeof(IUCV_SKB_CB(skb)->class),
+ (void *)&IUCV_SKB_CB(skb)->class);
if (err) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1385,8 +1387,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
if (sk->sk_type == SOCK_STREAM) {
- skb_pull(skb, copied);
- if (skb->len) {
+ if (copied < rlen) {
+ IUCV_SKB_CB(skb)->offset = offset + copied;
skb_queue_head(&sk->sk_receive_queue, skb);
goto done;
}
@@ -1406,6 +1408,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
spin_lock_bh(&iucv->message_q.lock);
rskb = skb_dequeue(&iucv->backlog_skb_q);
while (rskb) {
+ IUCV_SKB_CB(rskb)->offset = 0;
if (sock_queue_rcv_skb(sk, rskb)) {
skb_queue_head(&iucv->backlog_skb_q,
rskb);
@@ -1464,7 +1467,8 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
return iucv_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR;
+ mask |= POLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP;
@@ -1678,7 +1682,6 @@ static int iucv_callback_connreq(struct iucv_path *path,
unsigned char user_data[16];
unsigned char nuser_data[16];
unsigned char src_name[8];
- struct hlist_node *node;
struct sock *sk, *nsk;
struct iucv_sock *iucv, *niucv;
int err;
@@ -1689,7 +1692,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
read_lock(&iucv_sk_list.lock);
iucv = NULL;
sk = NULL;
- sk_for_each(sk, node, &iucv_sk_list.head)
+ sk_for_each(sk, &iucv_sk_list.head)
if (sk->sk_state == IUCV_LISTEN &&
!memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
/*
@@ -1761,7 +1764,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* Wake up accept */
nsk->sk_state = IUCV_CONNECTED;
- sk->sk_data_ready(sk, 1);
+ sk->sk_data_ready(sk);
err = 0;
fail:
bh_unlock_sock(sk);
@@ -1834,7 +1837,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
spin_lock_irqsave(&list->lock, flags);
while (list_skb != (struct sk_buff *)list) {
- if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
+ if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
this = list_skb;
break;
}
@@ -1940,11 +1943,10 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
sk_acceptq_is_full(sk) ||
!nsk) {
/* error on server socket - connection refused */
- if (nsk)
- sk_free(nsk);
afiucv_swap_src_dest(skb);
trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
err = dev_queue_xmit(skb);
+ iucv_sock_kill(nsk);
bh_unlock_sock(sk);
goto out;
}
@@ -1972,7 +1974,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
if (!err) {
iucv_accept_enqueue(sk, nsk);
nsk->sk_state = IUCV_CONNECTED;
- sk->sk_data_ready(sk, 1);
+ sk->sk_data_ready(sk);
} else
iucv_sock_kill(nsk);
bh_unlock_sock(sk);
@@ -2095,6 +2097,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
+ IUCV_SKB_CB(skb)->offset = 0;
spin_lock(&iucv->message_q.lock);
if (skb_queue_empty(&iucv->backlog_skb_q)) {
if (sock_queue_rcv_skb(sk, skb)) {
@@ -2115,7 +2118,6 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- struct hlist_node *node;
struct sock *sk;
struct iucv_sock *iucv;
struct af_iucv_trans_hdr *trans_hdr;
@@ -2132,7 +2134,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
iucv = NULL;
sk = NULL;
read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, node, &iucv_sk_list.head) {
+ sk_for_each(sk, &iucv_sk_list.head) {
if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
if ((!memcmp(&iucv_sk(sk)->src_name,
trans_hdr->destAppName, 8)) &&
@@ -2200,8 +2202,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
/* fall through and receive zero length data */
case 0:
/* plain data frame */
- memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
- CB_TRGCLS_LEN);
+ IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
err = afiucv_hs_callback_rx(sk, skb);
break;
default:
@@ -2225,10 +2226,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
struct sk_buff *list_skb;
struct sk_buff *nskb;
unsigned long flags;
- struct hlist_node *node;
read_lock_irqsave(&iucv_sk_list.lock, flags);
- sk_for_each(sk, node, &iucv_sk_list.head)
+ sk_for_each(sk, &iucv_sk_list.head)
if (sk == isk) {
iucv = iucv_sk(sk);
break;
@@ -2298,15 +2298,14 @@ out_unlock:
static int afiucv_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
- struct net_device *event_dev = (struct net_device *)ptr;
- struct hlist_node *node;
+ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
struct sock *sk;
struct iucv_sock *iucv;
switch (event) {
case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:
- sk_for_each(sk, node, &iucv_sk_list.head) {
+ sk_for_each(sk, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
if ((iucv->hs_dev == event_dev) &&
(sk->sk_state == IUCV_CONNECTED)) {