aboutsummaryrefslogtreecommitdiff
path: root/net/iucv
diff options
context:
space:
mode:
Diffstat (limited to 'net/iucv')
-rw-r--r--net/iucv/Kconfig14
-rw-r--r--net/iucv/af_iucv.c1167
-rw-r--r--net/iucv/iucv.c253
3 files changed, 1064 insertions, 370 deletions
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
index 16ce9cd4f39..497fbe732de 100644
--- a/net/iucv/Kconfig
+++ b/net/iucv/Kconfig
@@ -1,15 +1,17 @@
config IUCV
- tristate "IUCV support (S390 - z/VM only)"
depends on S390
+ def_tristate y if S390
+ prompt "IUCV support (S390 - z/VM only)"
help
Select this option if you want to use inter-user communication
under VM or VIF. If you run on z/VM, say "Y" to enable a fast
communication link between VM guests.
config AFIUCV
- tristate "AF_IUCV support (S390 - z/VM only)"
- depends on IUCV
+ depends on S390
+ def_tristate m if QETH_L3 || IUCV
+ prompt "AF_IUCV Socket support (S390 - z/VM and HiperSockets transport)"
help
- Select this option if you want to use inter-user communication under
- VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast
- communication link between VM guests.
+ Select this option if you want to use AF_IUCV socket applications
+ based on z/VM inter-user communication vehicle or based on
+ HiperSockets.
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 9637e45744f..7a95fa4a3de 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -27,10 +27,9 @@
#include <asm/cpcmd.h>
#include <linux/kmod.h>
-#include <net/iucv/iucv.h>
#include <net/iucv/af_iucv.h>
-#define VERSION "1.1"
+#define VERSION "1.2"
static char iucv_userid[80];
@@ -42,18 +41,14 @@ static struct proto iucv_proto = {
.obj_size = sizeof(struct iucv_sock),
};
+static struct iucv_interface *pr_iucv;
+
/* special AF_IUCV IPRM messages */
static const u8 iprm_shutdown[8] =
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
-/* macros to set/get socket control buffer at correct offset */
-#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
-#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
-#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
-#define CB_TRGCLS_LEN (TRGCLS_SIZE)
-
#define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
DEFINE_WAIT(__wait); \
@@ -89,6 +84,13 @@ do { \
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
+static void iucv_sever_path(struct sock *, int);
+
+static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
+static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ struct sk_buff *skb, u8 flags);
+static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
/* Call Back functions */
static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
@@ -148,27 +150,19 @@ static int afiucv_pm_freeze(struct device *dev)
{
struct iucv_sock *iucv;
struct sock *sk;
- struct hlist_node *node;
int err = 0;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_freeze\n");
#endif
read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, node, &iucv_sk_list.head) {
+ sk_for_each(sk, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
- skb_queue_purge(&iucv->send_skb_q);
- skb_queue_purge(&iucv->backlog_skb_q);
switch (sk->sk_state) {
- case IUCV_SEVERED:
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_CONNECTED:
- if (iucv->path) {
- err = iucv_path_sever(iucv->path, NULL);
- iucv_path_free(iucv->path);
- iucv->path = NULL;
- }
+ iucv_sever_path(sk, 0);
break;
case IUCV_OPEN:
case IUCV_BOUND:
@@ -177,6 +171,8 @@ static int afiucv_pm_freeze(struct device *dev)
default:
break;
}
+ skb_queue_purge(&iucv->send_skb_q);
+ skb_queue_purge(&iucv->backlog_skb_q);
}
read_unlock(&iucv_sk_list.lock);
return err;
@@ -190,16 +186,13 @@ static int afiucv_pm_freeze(struct device *dev)
*/
static int afiucv_pm_restore_thaw(struct device *dev)
{
- struct iucv_sock *iucv;
struct sock *sk;
- struct hlist_node *node;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
#endif
read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, node, &iucv_sk_list.head) {
- iucv = iucv_sk(sk);
+ sk_for_each(sk, &iucv_sk_list.head) {
switch (sk->sk_state) {
case IUCV_CONNECTED:
sk->sk_err = EPIPE;
@@ -207,7 +200,6 @@ static int afiucv_pm_restore_thaw(struct device *dev)
sk->sk_state_change(sk);
break;
case IUCV_DISCONN:
- case IUCV_SEVERED:
case IUCV_CLOSING:
case IUCV_LISTEN:
case IUCV_BOUND:
@@ -231,7 +223,7 @@ static const struct dev_pm_ops afiucv_pm_ops = {
static struct device_driver af_iucv_driver = {
.owner = THIS_MODULE,
.name = "afiucv",
- .bus = &iucv_bus,
+ .bus = NULL,
.pm = &afiucv_pm_ops,
};
@@ -250,7 +242,7 @@ static struct device *af_iucv_dev;
* PRMDATA[0..6] socket data (max 7 bytes);
* PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
*
- * The socket data length is computed by substracting the socket data length
+ * The socket data length is computed by subtracting the socket data length
* value from 0xFF.
* If the socket data len is greater 7, then PRMDATA can be used for special
* notifications (see iucv_sock_shutdown); and further,
@@ -296,7 +288,11 @@ static inline int iucv_below_msglim(struct sock *sk)
if (sk->sk_state != IUCV_CONNECTED)
return 1;
- return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+ if (iucv->transport == AF_IUCV_TRANS_IUCV)
+ return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+ else
+ return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
+ (atomic_read(&iucv->pendings) <= 0));
}
/**
@@ -314,31 +310,80 @@ static void iucv_sock_wake_msglim(struct sock *sk)
rcu_read_unlock();
}
-/* Timers */
-static void iucv_sock_timeout(unsigned long arg)
-{
- struct sock *sk = (struct sock *)arg;
-
- bh_lock_sock(sk);
- sk->sk_err = ETIMEDOUT;
- sk->sk_state_change(sk);
- bh_unlock_sock(sk);
-
- iucv_sock_kill(sk);
- sock_put(sk);
-}
-
-static void iucv_sock_clear_timer(struct sock *sk)
+/**
+ * afiucv_hs_send() - send a message through HiperSockets transport
+ */
+static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ struct sk_buff *skb, u8 flags)
{
- sk_stop_timer(sk, &sk->sk_timer);
+ struct iucv_sock *iucv = iucv_sk(sock);
+ struct af_iucv_trans_hdr *phs_hdr;
+ struct sk_buff *nskb;
+ int err, confirm_recv = 0;
+
+ memset(skb->head, 0, ETH_HLEN);
+ phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
+ sizeof(struct af_iucv_trans_hdr));
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
+
+ phs_hdr->magic = ETH_P_AF_IUCV;
+ phs_hdr->version = 1;
+ phs_hdr->flags = flags;
+ if (flags == AF_IUCV_FLAG_SYN)
+ phs_hdr->window = iucv->msglimit;
+ else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
+ confirm_recv = atomic_read(&iucv->msg_recv);
+ phs_hdr->window = confirm_recv;
+ if (confirm_recv)
+ phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
+ }
+ memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
+ memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
+ memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
+ memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
+ ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
+ ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
+ ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
+ ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
+ if (imsg)
+ memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
+
+ skb->dev = iucv->hs_dev;
+ if (!skb->dev)
+ return -ENODEV;
+ if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
+ return -ENETDOWN;
+ if (skb->len > skb->dev->mtu) {
+ if (sock->sk_type == SOCK_SEQPACKET)
+ return -EMSGSIZE;
+ else
+ skb_trim(skb, skb->dev->mtu);
+ }
+ skb->protocol = ETH_P_AF_IUCV;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+ skb_queue_tail(&iucv->send_skb_q, nskb);
+ err = dev_queue_xmit(skb);
+ if (net_xmit_eval(err)) {
+ skb_unlink(nskb, &iucv->send_skb_q);
+ kfree_skb(nskb);
+ } else {
+ atomic_sub(confirm_recv, &iucv->msg_recv);
+ WARN_ON(atomic_read(&iucv->msg_recv) < 0);
+ }
+ return net_xmit_eval(err);
}
static struct sock *__iucv_get_sock_by_name(char *nm)
{
struct sock *sk;
- struct hlist_node *node;
- sk_for_each(sk, node, &iucv_sk_list.head)
+ sk_for_each(sk, &iucv_sk_list.head)
if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
return sk;
@@ -348,7 +393,19 @@ static struct sock *__iucv_get_sock_by_name(char *nm)
static void iucv_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&sk->sk_error_queue);
+
+ sk_mem_reclaim(sk);
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ pr_err("Attempt to release alive iucv socket %p\n", sk);
+ return;
+ }
+
+ WARN_ON(atomic_read(&sk->sk_rmem_alloc));
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(sk->sk_wmem_queued);
+ WARN_ON(sk->sk_forward_alloc);
}
/* Cleanup Listen */
@@ -376,15 +433,49 @@ static void iucv_sock_kill(struct sock *sk)
sock_put(sk);
}
+/* Terminate an IUCV path */
+static void iucv_sever_path(struct sock *sk, int with_user_data)
+{
+ unsigned char user_data[16];
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct iucv_path *path = iucv->path;
+
+ if (iucv->path) {
+ iucv->path = NULL;
+ if (with_user_data) {
+ low_nmcpy(user_data, iucv->src_name);
+ high_nmcpy(user_data, iucv->dst_name);
+ ASCEBC(user_data, sizeof(user_data));
+ pr_iucv->path_sever(path, user_data);
+ } else
+ pr_iucv->path_sever(path, NULL);
+ iucv_path_free(path);
+ }
+}
+
+/* Send FIN through an IUCV socket for HIPER transport */
+static int iucv_send_ctrl(struct sock *sk, u8 flags)
+{
+ int err = 0;
+ int blen;
+ struct sk_buff *skb;
+
+ blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ skb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (skb) {
+ skb_reserve(skb, blen);
+ err = afiucv_hs_send(NULL, sk, skb, flags);
+ }
+ return err;
+}
+
/* Close an IUCV socket */
static void iucv_sock_close(struct sock *sk)
{
- unsigned char user_data[16];
struct iucv_sock *iucv = iucv_sk(sk);
- int err;
unsigned long timeo;
+ int err = 0;
- iucv_sock_clear_timer(sk);
lock_sock(sk);
switch (sk->sk_state) {
@@ -393,18 +484,21 @@ static void iucv_sock_close(struct sock *sk)
break;
case IUCV_CONNECTED:
- case IUCV_DISCONN:
- err = 0;
-
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+ case IUCV_DISCONN: /* fall through */
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
- if (!skb_queue_empty(&iucv->send_skb_q)) {
+ if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
timeo = sk->sk_lingertime;
else
timeo = IUCV_DISCONN_TIMEOUT;
- err = iucv_sock_wait(sk,
+ iucv_sock_wait(sk,
iucv_sock_in_state(sk, IUCV_CLOSED, 0),
timeo);
}
@@ -413,25 +507,20 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
- if (iucv->path) {
- low_nmcpy(user_data, iucv->src_name);
- high_nmcpy(user_data, iucv->dst_name);
- ASCEBC(user_data, sizeof(user_data));
- err = iucv_path_sever(iucv->path, user_data);
- iucv_path_free(iucv->path);
- iucv->path = NULL;
- }
-
sk->sk_err = ECONNRESET;
sk->sk_state_change(sk);
skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
- break;
- default:
- /* nothing to do here */
- break;
+ default: /* fall through */
+ iucv_sever_path(sk, 1);
+ }
+
+ if (iucv->hs_dev) {
+ dev_put(iucv->hs_dev);
+ iucv->hs_dev = NULL;
+ sk->sk_bound_dev_if = 0;
}
/* mark socket for deletion by iucv_sock_kill() */
@@ -449,23 +538,33 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent)
static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
+ struct iucv_sock *iucv;
sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
if (!sk)
return NULL;
+ iucv = iucv_sk(sk);
sock_init_data(sock, sk);
- INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
- spin_lock_init(&iucv_sk(sk)->accept_q_lock);
- skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
- INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
- spin_lock_init(&iucv_sk(sk)->message_q.lock);
- skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
- iucv_sk(sk)->send_tag = 0;
- iucv_sk(sk)->flags = 0;
- iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
- iucv_sk(sk)->path = NULL;
- memset(&iucv_sk(sk)->src_user_id , 0, 32);
+ INIT_LIST_HEAD(&iucv->accept_q);
+ spin_lock_init(&iucv->accept_q_lock);
+ skb_queue_head_init(&iucv->send_skb_q);
+ INIT_LIST_HEAD(&iucv->message_q.list);
+ spin_lock_init(&iucv->message_q.lock);
+ skb_queue_head_init(&iucv->backlog_skb_q);
+ iucv->send_tag = 0;
+ atomic_set(&iucv->pendings, 0);
+ iucv->flags = 0;
+ iucv->msglimit = 0;
+ atomic_set(&iucv->msg_sent, 0);
+ atomic_set(&iucv->msg_recv, 0);
+ iucv->path = NULL;
+ iucv->sk_txnotify = afiucv_hs_callback_txnotify;
+ memset(&iucv->src_user_id , 0, 32);
+ if (pr_iucv)
+ iucv->transport = AF_IUCV_TRANS_IUCV;
+ else
+ iucv->transport = AF_IUCV_TRANS_HIPER;
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -476,8 +575,6 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
sk->sk_protocol = proto;
sk->sk_state = IUCV_OPEN;
- setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
-
iucv_sock_link(&iucv_sk_list, sk);
return sk;
}
@@ -570,16 +667,12 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
}
if (sk->sk_state == IUCV_CONNECTED ||
- sk->sk_state == IUCV_SEVERED ||
- sk->sk_state == IUCV_DISCONN || /* due to PM restore */
+ sk->sk_state == IUCV_DISCONN ||
!newsock) {
iucv_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
- if (sk->sk_state == IUCV_SEVERED)
- sk->sk_state = IUCV_DISCONN;
-
release_sock(sk);
return sk;
}
@@ -589,6 +682,18 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
return NULL;
}
+static void __iucv_auto_name(struct iucv_sock *iucv)
+{
+ char name[12];
+
+ sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
+ while (__iucv_get_sock_by_name(name)) {
+ sprintf(name, "%08x",
+ atomic_inc_return(&iucv_sk_list.autobind_name));
+ }
+ memcpy(iucv->src_name, name, 8);
+}
+
/* Bind an unbound socket */
static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
@@ -596,7 +701,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
struct sock *sk = sock->sk;
struct iucv_sock *iucv;
- int err;
+ int err = 0;
+ struct net_device *dev;
+ char uid[9];
/* Verify the input sockaddr */
if (!addr || addr->sa_family != AF_IUCV)
@@ -615,19 +722,51 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
err = -EADDRINUSE;
goto done_unlock;
}
- if (iucv->path) {
- err = 0;
+ if (iucv->path)
goto done_unlock;
- }
/* Bind the socket */
- memcpy(iucv->src_name, sa->siucv_name, 8);
-
- /* Copy the user id */
- memcpy(iucv->src_user_id, iucv_userid, 8);
- sk->sk_state = IUCV_BOUND;
- err = 0;
+ if (pr_iucv)
+ if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
+ goto vm_bind; /* VM IUCV transport */
+ /* try hiper transport */
+ memcpy(uid, sa->siucv_user_id, sizeof(uid));
+ ASCEBC(uid, 8);
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
+ if (!memcmp(dev->perm_addr, uid, 8)) {
+ memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
+ /* Check for unitialized siucv_name */
+ if (strncmp(sa->siucv_name, " ", 8) == 0)
+ __iucv_auto_name(iucv);
+ else
+ memcpy(iucv->src_name, sa->siucv_name, 8);
+ sk->sk_bound_dev_if = dev->ifindex;
+ iucv->hs_dev = dev;
+ dev_hold(dev);
+ sk->sk_state = IUCV_BOUND;
+ iucv->transport = AF_IUCV_TRANS_HIPER;
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
+ rcu_read_unlock();
+ goto done_unlock;
+ }
+ }
+ rcu_read_unlock();
+vm_bind:
+ if (pr_iucv) {
+ /* use local userid for backward compat */
+ memcpy(iucv->src_name, sa->siucv_name, 8);
+ memcpy(iucv->src_user_id, iucv_userid, 8);
+ sk->sk_state = IUCV_BOUND;
+ iucv->transport = AF_IUCV_TRANS_IUCV;
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
+ goto done_unlock;
+ }
+ /* found no dev to bind */
+ err = -ENODEV;
done_unlock:
/* Release the socket list lock */
write_unlock_bh(&iucv_sk_list.lock);
@@ -640,68 +779,35 @@ done:
static int iucv_sock_autobind(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
- char query_buffer[80];
- char name[12];
int err = 0;
- /* Set the userid and name */
- cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
- if (unlikely(err))
+ if (unlikely(!pr_iucv))
return -EPROTO;
- memcpy(iucv->src_user_id, query_buffer, 8);
+ memcpy(iucv->src_user_id, iucv_userid, 8);
write_lock_bh(&iucv_sk_list.lock);
-
- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
- while (__iucv_get_sock_by_name(name)) {
- sprintf(name, "%08x",
- atomic_inc_return(&iucv_sk_list.autobind_name));
- }
-
+ __iucv_auto_name(iucv);
write_unlock_bh(&iucv_sk_list.lock);
- memcpy(&iucv->src_name, name, 8);
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
return err;
}
-/* Connect an unconnected socket */
-static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
- int alen, int flags)
+static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
{
struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
struct sock *sk = sock->sk;
- struct iucv_sock *iucv;
+ struct iucv_sock *iucv = iucv_sk(sk);
unsigned char user_data[16];
int err;
- if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
- return -EINVAL;
-
- if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
- return -EBADFD;
-
- if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
- return -EINVAL;
-
- if (sk->sk_state == IUCV_OPEN) {
- err = iucv_sock_autobind(sk);
- if (unlikely(err))
- return err;
- }
-
- lock_sock(sk);
-
- /* Set the destination information */
- memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
- memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
-
high_nmcpy(user_data, sa->siucv_name);
- low_nmcpy(user_data, iucv_sk(sk)->src_name);
+ low_nmcpy(user_data, iucv->src_name);
ASCEBC(user_data, sizeof(user_data));
- iucv = iucv_sk(sk);
/* Create path. */
iucv->path = iucv_path_alloc(iucv->msglimit,
IUCV_IPRMDATA, GFP_KERNEL);
@@ -709,8 +815,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
err = -ENOMEM;
goto done;
}
- err = iucv_path_connect(iucv->path, &af_iucv_handler,
- sa->siucv_user_id, NULL, user_data, sk);
+ err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
+ sa->siucv_user_id, NULL, user_data,
+ sk);
if (err) {
iucv_path_free(iucv->path);
iucv->path = NULL;
@@ -729,24 +836,62 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
err = -ECONNREFUSED;
break;
}
- goto done;
}
+done:
+ return err;
+}
- if (sk->sk_state != IUCV_CONNECTED) {
+/* Connect an unconnected socket */
+static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags)
+{
+ struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+ int err;
+
+ if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
+ return -EINVAL;
+
+ if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
+ return -EBADFD;
+
+ if (sk->sk_state == IUCV_OPEN &&
+ iucv->transport == AF_IUCV_TRANS_HIPER)
+ return -EBADFD; /* explicit bind required */
+
+ if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
+ return -EINVAL;
+
+ if (sk->sk_state == IUCV_OPEN) {
+ err = iucv_sock_autobind(sk);
+ if (unlikely(err))
+ return err;
+ }
+
+ lock_sock(sk);
+
+ /* Set the destination information */
+ memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
+ memcpy(iucv->dst_name, sa->siucv_name, 8);
+
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
+ else
+ err = afiucv_path_connect(sock, addr);
+ if (err)
+ goto done;
+
+ if (sk->sk_state != IUCV_CONNECTED)
err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
IUCV_DISCONN),
sock_sndtimeo(sk, flags & O_NONBLOCK));
- }
- if (sk->sk_state == IUCV_DISCONN) {
+ if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
err = -ECONNREFUSED;
- }
- if (err) {
- iucv_path_sever(iucv->path, NULL);
- iucv_path_free(iucv->path);
- iucv->path = NULL;
- }
+ if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
+ iucv_sever_path(sk, 0);
done:
release_sock(sk);
@@ -838,20 +983,21 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
{
struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
addr->sa_family = AF_IUCV;
*len = sizeof(struct sockaddr_iucv);
if (peer) {
- memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
- memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
+ memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
+ memcpy(siucv->siucv_name, iucv->dst_name, 8);
} else {
- memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
- memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
+ memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
+ memcpy(siucv->siucv_name, iucv->src_name, 8);
}
memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
- memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
+ memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
return 0;
}
@@ -876,7 +1022,7 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
memcpy(prmdata, (void *) skb->data, skb->len);
prmdata[7] = 0xff - (u8) skb->len;
- return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
+ return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
(void *) prmdata, 8);
}
@@ -965,9 +1111,18 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
* this is fine for SOCK_SEQPACKET (unless we want to support
* segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */
- skb = sock_alloc_send_skb(sk, len, noblock, &err);
- if (!skb)
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ skb = sock_alloc_send_skb(sk,
+ len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
+ noblock, &err);
+ else
+ skb = sock_alloc_send_skb(sk, len, noblock, &err);
+ if (!skb) {
+ err = -ENOMEM;
goto out;
+ }
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
goto fail;
@@ -987,7 +1142,17 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
- memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+ IUCV_SKB_CB(skb)->tag = txmsg.tag;
+
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ atomic_inc(&iucv->msg_sent);
+ err = afiucv_hs_send(&txmsg, sk, skb, 0);
+ if (err) {
+ atomic_dec(&iucv->msg_sent);
+ goto fail;
+ }
+ goto release;
+ }
skb_queue_tail(&iucv->send_skb_q, skb);
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
@@ -1004,13 +1169,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* this error should never happen since the
* IUCV_IPRMDATA path flag is set... sever path */
if (err == 0x15) {
- iucv_path_sever(iucv->path, NULL);
+ pr_iucv->path_sever(iucv->path, NULL);
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
}
} else
- err = iucv_message_send(iucv->path, &txmsg, 0, 0,
+ err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
(void *) skb->data, skb->len);
if (err) {
if (err == 3) {
@@ -1028,6 +1193,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto fail;
}
+release:
release_sock(sk);
return len;
@@ -1059,7 +1225,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
return -ENOMEM;
/* copy target class to control buffer of new skb */
- memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
+ IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
/* copy data fragment */
memcpy(nskb->data, skb->data + copied, size);
@@ -1091,7 +1257,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
/* store msg target class in the second 4 bytes of skb ctrl buffer */
/* Note: the first 4 bytes are reserved for msg tag */
- memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
+ IUCV_SKB_CB(skb)->class = msg->class;
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
@@ -1100,8 +1266,9 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
skb->len = 0;
}
} else {
- rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
- skb->data, len, NULL);
+ rc = pr_iucv->message_receive(path, msg,
+ msg->flags & IUCV_IPRMDATA,
+ skb->data, len, NULL);
if (rc) {
kfree_skb(skb);
return;
@@ -1115,7 +1282,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
kfree_skb(skb);
skb = NULL;
if (rc) {
- iucv_path_sever(path, NULL);
+ pr_iucv->path_sever(path, NULL);
return;
}
skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
@@ -1126,6 +1293,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
}
}
+ IUCV_SKB_CB(skb)->offset = 0;
if (sock_queue_rcv_skb(sk, skb))
skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
}
@@ -1161,8 +1329,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
unsigned int copied, rlen;
struct sk_buff *skb, *rskb, *cskb;
int err = 0;
+ u32 offset;
- if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+ if ((sk->sk_state == IUCV_DISCONN) &&
skb_queue_empty(&iucv->backlog_skb_q) &&
skb_queue_empty(&sk->sk_receive_queue) &&
list_empty(&iucv->message_q.list))
@@ -1180,11 +1349,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
- rlen = skb->len; /* real length of skb */
+ offset = IUCV_SKB_CB(skb)->offset;
+ rlen = skb->len - offset; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
+ if (!rlen)
+ sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb;
- if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
+ if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
@@ -1202,7 +1374,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
* get the trgcls from the control buffer of the skb due to
* fragmentation of original iucv message. */
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
- CB_TRGCLS_LEN, CB_TRGCLS(skb));
+ sizeof(IUCV_SKB_CB(skb)->class),
+ (void *)&IUCV_SKB_CB(skb)->class);
if (err) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1214,19 +1387,28 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
if (sk->sk_type == SOCK_STREAM) {
- skb_pull(skb, copied);
- if (skb->len) {
+ if (copied < rlen) {
+ IUCV_SKB_CB(skb)->offset = offset + copied;
skb_queue_head(&sk->sk_receive_queue, skb);
goto done;
}
}
kfree_skb(skb);
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ atomic_inc(&iucv->msg_recv);
+ if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
+ WARN_ON(1);
+ iucv_sock_close(sk);
+ return -EFAULT;
+ }
+ }
/* Queue backlog skbs */
spin_lock_bh(&iucv->message_q.lock);
rskb = skb_dequeue(&iucv->backlog_skb_q);
while (rskb) {
+ IUCV_SKB_CB(rskb)->offset = 0;
if (sock_queue_rcv_skb(sk, rskb)) {
skb_queue_head(&iucv->backlog_skb_q,
rskb);
@@ -1238,6 +1420,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (skb_queue_empty(&iucv->backlog_skb_q)) {
if (!list_empty(&iucv->message_q.list))
iucv_process_message_q(sk);
+ if (atomic_read(&iucv->msg_recv) >=
+ iucv->msglimit / 2) {
+ err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
+ if (err) {
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+ }
}
spin_unlock_bh(&iucv->message_q.lock);
}
@@ -1277,7 +1467,8 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
return iucv_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR;
+ mask |= POLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP;
@@ -1292,10 +1483,10 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
if (sk->sk_state == IUCV_CLOSED)
mask |= POLLHUP;
- if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
+ if (sk->sk_state == IUCV_DISCONN)
mask |= POLLIN;
- if (sock_writeable(sk))
+ if (sock_writeable(sk) && iucv_below_msglim(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -1317,43 +1508,47 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
switch (sk->sk_state) {
+ case IUCV_LISTEN:
case IUCV_DISCONN:
case IUCV_CLOSING:
- case IUCV_SEVERED:
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
-
default:
- sk->sk_shutdown |= how;
break;
}
if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
- txmsg.class = 0;
- txmsg.tag = 0;
- err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
- (void *) iprm_shutdown, 8);
- if (err) {
- switch (err) {
- case 1:
- err = -ENOTCONN;
- break;
- case 2:
- err = -ECONNRESET;
- break;
- default:
- err = -ENOTCONN;
- break;
+ if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+ txmsg.class = 0;
+ txmsg.tag = 0;
+ err = pr_iucv->message_send(iucv->path, &txmsg,
+ IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
+ if (err) {
+ switch (err) {
+ case 1:
+ err = -ENOTCONN;
+ break;
+ case 2:
+ err = -ECONNRESET;
+ break;
+ default:
+ err = -ENOTCONN;
+ break;
+ }
}
- }
+ } else
+ iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
}
+ sk->sk_shutdown |= how;
if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
- err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
- if (err)
- err = -ENOTCONN;
-
+ if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+ err = pr_iucv->path_quiesce(iucv->path, NULL);
+ if (err)
+ err = -ENOTCONN;
+/* skb_queue_purge(&sk->sk_receive_queue); */
+ }
skb_queue_purge(&sk->sk_receive_queue);
}
@@ -1375,13 +1570,6 @@ static int iucv_sock_release(struct socket *sock)
iucv_sock_close(sk);
- /* Unregister with IUCV base support */
- if (iucv_sk(sk)->path) {
- iucv_path_sever(iucv_sk(sk)->path, NULL);
- iucv_path_free(iucv_sk(sk)->path);
- iucv_sk(sk)->path = NULL;
- }
-
sock_orphan(sk);
iucv_sock_kill(sk);
return err;
@@ -1443,7 +1631,8 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
- int val, len;
+ unsigned int val;
+ int len;
if (level != SOL_IUCV)
return -ENOPROTOOPT;
@@ -1466,6 +1655,13 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
: iucv->msglimit; /* default */
release_sock(sk);
break;
+ case SO_MSGSIZE:
+ if (sk->sk_state == IUCV_OPEN)
+ return -EBADFD;
+ val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
+ sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
+ 0x7fffffff;
+ break;
default:
return -ENOPROTOOPT;
}
@@ -1486,7 +1682,6 @@ static int iucv_callback_connreq(struct iucv_path *path,
unsigned char user_data[16];
unsigned char nuser_data[16];
unsigned char src_name[8];
- struct hlist_node *node;
struct sock *sk, *nsk;
struct iucv_sock *iucv, *niucv;
int err;
@@ -1497,7 +1692,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
read_lock(&iucv_sk_list.lock);
iucv = NULL;
sk = NULL;
- sk_for_each(sk, node, &iucv_sk_list.head)
+ sk_for_each(sk, &iucv_sk_list.head)
if (sk->sk_state == IUCV_LISTEN &&
!memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
/*
@@ -1519,14 +1714,14 @@ static int iucv_callback_connreq(struct iucv_path *path,
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data));
if (sk->sk_state != IUCV_LISTEN) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
/* Check for backlog size */
if (sk_acceptq_is_full(sk)) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
@@ -1534,7 +1729,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* Create the new socket */
nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
if (!nsk) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
@@ -1558,10 +1753,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* set message limit for path based on msglimit of accepting socket */
niucv->msglimit = iucv->msglimit;
path->msglim = iucv->msglimit;
- err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
+ err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
if (err) {
- err = iucv_path_sever(path, user_data);
- iucv_path_free(path);
+ iucv_sever_path(nsk, 1);
iucv_sock_kill(nsk);
goto fail;
}
@@ -1570,7 +1764,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* Wake up accept */
nsk->sk_state = IUCV_CONNECTED;
- sk->sk_data_ready(sk, 1);
+ sk->sk_data_ready(sk);
err = 0;
fail:
bh_unlock_sock(sk);
@@ -1594,7 +1788,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
int len;
if (sk->sk_shutdown & RCV_SHUTDOWN) {
- iucv_message_reject(path, msg);
+ pr_iucv->message_reject(path, msg);
return;
}
@@ -1605,7 +1799,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
goto save_message;
len = atomic_read(&sk->sk_rmem_alloc);
- len += iucv_msg_length(msg) + sizeof(struct sk_buff);
+ len += SKB_TRUESIZE(iucv_msg_length(msg));
if (len > sk->sk_rcvbuf)
goto save_message;
@@ -1638,11 +1832,12 @@ static void iucv_callback_txdone(struct iucv_path *path,
struct sk_buff *list_skb = list->next;
unsigned long flags;
+ bh_lock_sock(sk);
if (!skb_queue_empty(list)) {
spin_lock_irqsave(&list->lock, flags);
while (list_skb != (struct sk_buff *)list) {
- if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
+ if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
this = list_skb;
break;
}
@@ -1659,7 +1854,6 @@ static void iucv_callback_txdone(struct iucv_path *path,
iucv_sock_wake_msglim(sk);
}
}
- BUG_ON(!this);
if (sk->sk_state == IUCV_CLOSING) {
if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
@@ -1667,6 +1861,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
sk->sk_state_change(sk);
}
}
+ bh_unlock_sock(sk);
}
@@ -1674,12 +1869,15 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
{
struct sock *sk = path->private;
- if (!list_empty(&iucv_sk(sk)->accept_q))
- sk->sk_state = IUCV_SEVERED;
- else
- sk->sk_state = IUCV_DISCONN;
+ if (sk->sk_state == IUCV_CLOSED)
+ return;
+
+ bh_lock_sock(sk);
+ iucv_sever_path(sk, 1);
+ sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
}
/* called if the other communication side shuts down its RECV direction;
@@ -1697,6 +1895,439 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
bh_unlock_sock(sk);
}
+/***************** HiperSockets transport callbacks ********************/
+static void afiucv_swap_src_dest(struct sk_buff *skb)
+{
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+ char tmpID[8];
+ char tmpName[8];
+
+ ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
+ ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
+ ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
+ ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
+ memcpy(tmpID, trans_hdr->srcUserID, 8);
+ memcpy(tmpName, trans_hdr->srcAppName, 8);
+ memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
+ memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
+ memcpy(trans_hdr->destUserID, tmpID, 8);
+ memcpy(trans_hdr->destAppName, tmpName, 8);
+ skb_push(skb, ETH_HLEN);
+ memset(skb->data, 0, ETH_HLEN);
+}
+
+/**
+ * afiucv_hs_callback_syn - react on received SYN
+ **/
+static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
+{
+ struct sock *nsk;
+ struct iucv_sock *iucv, *niucv;
+ struct af_iucv_trans_hdr *trans_hdr;
+ int err;
+
+ iucv = iucv_sk(sk);
+ trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
+ if (!iucv) {
+ /* no sock - connection refused */
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
+ err = dev_queue_xmit(skb);
+ goto out;
+ }
+
+ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
+ bh_lock_sock(sk);
+ if ((sk->sk_state != IUCV_LISTEN) ||
+ sk_acceptq_is_full(sk) ||
+ !nsk) {
+ /* error on server socket - connection refused */
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
+ err = dev_queue_xmit(skb);
+ iucv_sock_kill(nsk);
+ bh_unlock_sock(sk);
+ goto out;
+ }
+
+ niucv = iucv_sk(nsk);
+ iucv_sock_init(nsk, sk);
+ niucv->transport = AF_IUCV_TRANS_HIPER;
+ niucv->msglimit = iucv->msglimit;
+ if (!trans_hdr->window)
+ niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
+ else
+ niucv->msglimit_peer = trans_hdr->window;
+ memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
+ memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
+ memcpy(niucv->src_name, iucv->src_name, 8);
+ memcpy(niucv->src_user_id, iucv->src_user_id, 8);
+ nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
+ niucv->hs_dev = iucv->hs_dev;
+ dev_hold(niucv->hs_dev);
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
+ trans_hdr->window = niucv->msglimit;
+ /* if receiver acks the xmit connection is established */
+ err = dev_queue_xmit(skb);
+ if (!err) {
+ iucv_accept_enqueue(sk, nsk);
+ nsk->sk_state = IUCV_CONNECTED;
+ sk->sk_data_ready(sk);
+ } else
+ iucv_sock_kill(nsk);
+ bh_unlock_sock(sk);
+
+out:
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_synack() - react on received SYN-ACK
+ **/
+static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+
+ if (!iucv)
+ goto out;
+ if (sk->sk_state != IUCV_BOUND)
+ goto out;
+ bh_lock_sock(sk);
+ iucv->msglimit_peer = trans_hdr->window;
+ sk->sk_state = IUCV_CONNECTED;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+out:
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_synfin() - react on received SYN_FIN
+ **/
+static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ if (!iucv)
+ goto out;
+ if (sk->sk_state != IUCV_BOUND)
+ goto out;
+ bh_lock_sock(sk);
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+out:
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_fin() - react on received FIN
+ **/
+static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ /* other end of connection closed */
+ if (!iucv)
+ goto out;
+ bh_lock_sock(sk);
+ if (sk->sk_state == IUCV_CONNECTED) {
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+ bh_unlock_sock(sk);
+out:
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_win() - react on received WIN
+ **/
+static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+
+ if (!iucv)
+ return NET_RX_SUCCESS;
+
+ if (sk->sk_state != IUCV_CONNECTED)
+ return NET_RX_SUCCESS;
+
+ atomic_sub(trans_hdr->window, &iucv->msg_sent);
+ iucv_sock_wake_msglim(sk);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_rx() - react on received data
+ **/
+static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ if (!iucv) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ if (sk->sk_state != IUCV_CONNECTED) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ /* write stuff from iucv_msg to skb cb */
+ if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+ skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ IUCV_SKB_CB(skb)->offset = 0;
+ spin_lock(&iucv->message_q.lock);
+ if (skb_queue_empty(&iucv->backlog_skb_q)) {
+ if (sock_queue_rcv_skb(sk, skb)) {
+ /* handle rcv queue full */
+ skb_queue_tail(&iucv->backlog_skb_q, skb);
+ }
+ } else
+ skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
+ spin_unlock(&iucv->message_q.lock);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_rcv() - base function for arriving data through HiperSockets
+ * transport
+ * called from netif RX softirq
+ **/
+static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct sock *sk;
+ struct iucv_sock *iucv;
+ struct af_iucv_trans_hdr *trans_hdr;
+ char nullstring[8];
+ int err = 0;
+
+ skb_pull(skb, ETH_HLEN);
+ trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
+ EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
+ EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
+ EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
+ EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
+ memset(nullstring, 0, sizeof(nullstring));
+ iucv = NULL;
+ sk = NULL;
+ read_lock(&iucv_sk_list.lock);
+ sk_for_each(sk, &iucv_sk_list.head) {
+ if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
+ if ((!memcmp(&iucv_sk(sk)->src_name,
+ trans_hdr->destAppName, 8)) &&
+ (!memcmp(&iucv_sk(sk)->src_user_id,
+ trans_hdr->destUserID, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_user_id,
+ nullstring, 8))) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+ } else {
+ if ((!memcmp(&iucv_sk(sk)->src_name,
+ trans_hdr->destAppName, 8)) &&
+ (!memcmp(&iucv_sk(sk)->src_user_id,
+ trans_hdr->destUserID, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_name,
+ trans_hdr->srcAppName, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_user_id,
+ trans_hdr->srcUserID, 8))) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+ }
+ }
+ read_unlock(&iucv_sk_list.lock);
+ if (!iucv)
+ sk = NULL;
+
+ /* no sock
+ how should we send with no sock
+ 1) send without sock no send rc checking?
+ 2) introduce default sock to handle this cases
+
+ SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
+ data -> send FIN
+ SYN|ACK, SYN|FIN, FIN -> no action? */
+
+ switch (trans_hdr->flags) {
+ case AF_IUCV_FLAG_SYN:
+ /* connect request */
+ err = afiucv_hs_callback_syn(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
+ /* connect request confirmed */
+ err = afiucv_hs_callback_synack(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
+ /* connect request refused */
+ err = afiucv_hs_callback_synfin(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_FIN):
+ /* close request */
+ err = afiucv_hs_callback_fin(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_WIN):
+ err = afiucv_hs_callback_win(sk, skb);
+ if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
+ kfree_skb(skb);
+ break;
+ }
+ /* fall through and receive non-zero length data */
+ case (AF_IUCV_FLAG_SHT):
+ /* shutdown request */
+ /* fall through and receive zero length data */
+ case 0:
+ /* plain data frame */
+ IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
+ err = afiucv_hs_callback_rx(sk, skb);
+ break;
+ default:
+ ;
+ }
+
+ return err;
+}
+
+/**
+ * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
+ * transport
+ **/
+static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
+ enum iucv_tx_notify n)
+{
+ struct sock *isk = skb->sk;
+ struct sock *sk = NULL;
+ struct iucv_sock *iucv = NULL;
+ struct sk_buff_head *list;
+ struct sk_buff *list_skb;
+ struct sk_buff *nskb;
+ unsigned long flags;
+
+ read_lock_irqsave(&iucv_sk_list.lock, flags);
+ sk_for_each(sk, &iucv_sk_list.head)
+ if (sk == isk) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+ read_unlock_irqrestore(&iucv_sk_list.lock, flags);
+
+ if (!iucv || sock_flag(sk, SOCK_ZAPPED))
+ return;
+
+ list = &iucv->send_skb_q;
+ spin_lock_irqsave(&list->lock, flags);
+ if (skb_queue_empty(list))
+ goto out_unlock;
+ list_skb = list->next;
+ nskb = list_skb->next;
+ while (list_skb != (struct sk_buff *)list) {
+ if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
+ switch (n) {
+ case TX_NOTIFY_OK:
+ __skb_unlink(list_skb, list);
+ kfree_skb(list_skb);
+ iucv_sock_wake_msglim(sk);
+ break;
+ case TX_NOTIFY_PENDING:
+ atomic_inc(&iucv->pendings);
+ break;
+ case TX_NOTIFY_DELAYED_OK:
+ __skb_unlink(list_skb, list);
+ atomic_dec(&iucv->pendings);
+ if (atomic_read(&iucv->pendings) <= 0)
+ iucv_sock_wake_msglim(sk);
+ kfree_skb(list_skb);
+ break;
+ case TX_NOTIFY_UNREACHABLE:
+ case TX_NOTIFY_DELAYED_UNREACHABLE:
+ case TX_NOTIFY_TPQFULL: /* not yet used */
+ case TX_NOTIFY_GENERALERROR:
+ case TX_NOTIFY_DELAYED_GENERALERROR:
+ __skb_unlink(list_skb, list);
+ kfree_skb(list_skb);
+ if (sk->sk_state == IUCV_CONNECTED) {
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+ break;
+ }
+ break;
+ }
+ list_skb = nskb;
+ nskb = nskb->next;
+ }
+out_unlock:
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ if (sk->sk_state == IUCV_CLOSING) {
+ if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
+ sk->sk_state = IUCV_CLOSED;
+ sk->sk_state_change(sk);
+ }
+ }
+
+}
+
+/*
+ * afiucv_netdev_event: handle netdev notifier chain events
+ */
+static int afiucv_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ struct sock *sk;
+ struct iucv_sock *iucv;
+
+ switch (event) {
+ case NETDEV_REBOOT:
+ case NETDEV_GOING_DOWN:
+ sk_for_each(sk, &iucv_sk_list.head) {
+ iucv = iucv_sk(sk);
+ if ((iucv->hs_dev == event_dev) &&
+ (sk->sk_state == IUCV_CONNECTED)) {
+ if (event == NETDEV_GOING_DOWN)
+ iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+ }
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block afiucv_netdev_notifier = {
+ .notifier_call = afiucv_netdev_event,
+};
+
static const struct proto_ops iucv_sock_ops = {
.family = PF_IUCV,
.owner = THIS_MODULE,
@@ -1723,71 +2354,106 @@ static const struct net_proto_family iucv_sock_family_ops = {
.create = iucv_sock_create,
};
-static int __init afiucv_init(void)
+static struct packet_type iucv_packet_type = {
+ .type = cpu_to_be16(ETH_P_AF_IUCV),
+ .func = afiucv_hs_rcv,
+};
+
+static int afiucv_iucv_init(void)
{
int err;
- if (!MACHINE_IS_VM) {
- pr_err("The af_iucv module cannot be loaded"
- " without z/VM\n");
- err = -EPROTONOSUPPORT;
- goto out;
- }
- cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
- if (unlikely(err)) {
- WARN_ON(err);
- err = -EPROTONOSUPPORT;
- goto out;
- }
-
- err = iucv_register(&af_iucv_handler, 0);
+ err = pr_iucv->iucv_register(&af_iucv_handler, 0);
if (err)
goto out;
- err = proto_register(&iucv_proto, 0);
- if (err)
- goto out_iucv;
- err = sock_register(&iucv_sock_family_ops);
- if (err)
- goto out_proto;
/* establish dummy device */
+ af_iucv_driver.bus = pr_iucv->bus;
err = driver_register(&af_iucv_driver);
if (err)
- goto out_sock;
+ goto out_iucv;
af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!af_iucv_dev) {
err = -ENOMEM;
goto out_driver;
}
dev_set_name(af_iucv_dev, "af_iucv");
- af_iucv_dev->bus = &iucv_bus;
- af_iucv_dev->parent = iucv_root;
+ af_iucv_dev->bus = pr_iucv->bus;
+ af_iucv_dev->parent = pr_iucv->root;
af_iucv_dev->release = (void (*)(struct device *))kfree;
af_iucv_dev->driver = &af_iucv_driver;
err = device_register(af_iucv_dev);
if (err)
goto out_driver;
-
return 0;
out_driver:
driver_unregister(&af_iucv_driver);
+out_iucv:
+ pr_iucv->iucv_unregister(&af_iucv_handler, 0);
+out:
+ return err;
+}
+
+static int __init afiucv_init(void)
+{
+ int err;
+
+ if (MACHINE_IS_VM) {
+ cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
+ if (unlikely(err)) {
+ WARN_ON(err);
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
+ if (!pr_iucv) {
+ printk(KERN_WARNING "iucv_if lookup failed\n");
+ memset(&iucv_userid, 0, sizeof(iucv_userid));
+ }
+ } else {
+ memset(&iucv_userid, 0, sizeof(iucv_userid));
+ pr_iucv = NULL;
+ }
+
+ err = proto_register(&iucv_proto, 0);
+ if (err)
+ goto out;
+ err = sock_register(&iucv_sock_family_ops);
+ if (err)
+ goto out_proto;
+
+ if (pr_iucv) {
+ err = afiucv_iucv_init();
+ if (err)
+ goto out_sock;
+ } else
+ register_netdevice_notifier(&afiucv_netdev_notifier);
+ dev_add_pack(&iucv_packet_type);
+ return 0;
+
out_sock:
sock_unregister(PF_IUCV);
out_proto:
proto_unregister(&iucv_proto);
-out_iucv:
- iucv_unregister(&af_iucv_handler, 0);
out:
+ if (pr_iucv)
+ symbol_put(iucv_if);
return err;
}
static void __exit afiucv_exit(void)
{
- device_unregister(af_iucv_dev);
- driver_unregister(&af_iucv_driver);
+ if (pr_iucv) {
+ device_unregister(af_iucv_dev);
+ driver_unregister(&af_iucv_driver);
+ pr_iucv->iucv_unregister(&af_iucv_handler, 0);
+ symbol_put(iucv_if);
+ } else
+ unregister_netdevice_notifier(&afiucv_netdev_notifier);
+ dev_remove_pack(&iucv_packet_type);
sock_unregister(PF_IUCV);
proto_unregister(&iucv_proto);
- iucv_unregister(&af_iucv_handler, 0);
}
module_init(afiucv_init);
@@ -1798,3 +2464,4 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_IUCV);
+
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f7db676de77..da787930df0 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -36,6 +36,7 @@
#define KMSG_COMPONENT "iucv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
@@ -50,10 +51,10 @@
#include <linux/cpu.h>
#include <linux/reboot.h>
#include <net/iucv/iucv.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include <asm/smp.h>
/*
@@ -127,8 +128,8 @@ struct iucv_irq_list {
};
static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
-static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
-static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
+static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE };
+static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE };
/*
* Queue of interrupt buffers lock for delivery via the tasklet
@@ -405,7 +406,7 @@ static void iucv_allow_cpu(void *data)
parm->set_mask.ipmask = 0xf8;
iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
/* Set indication that iucv interrupts are allowed for this cpu. */
- cpu_set(cpu, iucv_irq_cpumask);
+ cpumask_set_cpu(cpu, &iucv_irq_cpumask);
}
/**
@@ -425,7 +426,7 @@ static void iucv_block_cpu(void *data)
iucv_call_b2f0(IUCV_SETMASK, parm);
/* Clear indication that iucv interrupts are allowed for this cpu. */
- cpu_clear(cpu, iucv_irq_cpumask);
+ cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
}
/**
@@ -450,7 +451,7 @@ static void iucv_block_cpu_almost(void *data)
iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
/* Clear indication that iucv interrupts are allowed for this cpu. */
- cpu_clear(cpu, iucv_irq_cpumask);
+ cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
}
/**
@@ -465,7 +466,7 @@ static void iucv_declare_cpu(void *data)
union iucv_param *parm;
int rc;
- if (cpu_isset(cpu, iucv_buffer_cpumask))
+ if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
return;
/* Declare interrupt buffer. */
@@ -498,9 +499,9 @@ static void iucv_declare_cpu(void *data)
}
/* Set indication that an iucv buffer exists for this cpu. */
- cpu_set(cpu, iucv_buffer_cpumask);
+ cpumask_set_cpu(cpu, &iucv_buffer_cpumask);
- if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask))
+ if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask))
/* Enable iucv interrupts on this cpu. */
iucv_allow_cpu(NULL);
else
@@ -519,7 +520,7 @@ static void iucv_retrieve_cpu(void *data)
int cpu = smp_processor_id();
union iucv_param *parm;
- if (!cpu_isset(cpu, iucv_buffer_cpumask))
+ if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
return;
/* Block iucv interrupts. */
@@ -530,7 +531,7 @@ static void iucv_retrieve_cpu(void *data)
iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
/* Clear indication that an iucv buffer exists for this cpu. */
- cpu_clear(cpu, iucv_buffer_cpumask);
+ cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
}
/**
@@ -545,8 +546,8 @@ static void iucv_setmask_mp(void)
get_online_cpus();
for_each_online_cpu(cpu)
/* Enable all cpus with a declared buffer. */
- if (cpu_isset(cpu, iucv_buffer_cpumask) &&
- !cpu_isset(cpu, iucv_irq_cpumask))
+ if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
+ !cpumask_test_cpu(cpu, &iucv_irq_cpumask))
smp_call_function_single(cpu, iucv_allow_cpu,
NULL, 1);
put_online_cpus();
@@ -563,9 +564,9 @@ static void iucv_setmask_up(void)
int cpu;
/* Disable all cpu but the first in cpu_irq_cpumask. */
- cpumask = iucv_irq_cpumask;
- cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
- for_each_cpu_mask_nr(cpu, cpumask)
+ cpumask_copy(&cpumask, &iucv_irq_cpumask);
+ cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
+ for_each_cpu(cpu, &cpumask)
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
}
@@ -592,7 +593,7 @@ static int iucv_enable(void)
rc = -EIO;
for_each_online_cpu(cpu)
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
- if (cpus_empty(iucv_buffer_cpumask))
+ if (cpumask_empty(&iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */
goto out;
put_online_cpus();
@@ -620,7 +621,43 @@ static void iucv_disable(void)
put_online_cpus();
}
-static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
+static void free_iucv_data(int cpu)
+{
+ kfree(iucv_param_irq[cpu]);
+ iucv_param_irq[cpu] = NULL;
+ kfree(iucv_param[cpu]);
+ iucv_param[cpu] = NULL;
+ kfree(iucv_irq_data[cpu]);
+ iucv_irq_data[cpu] = NULL;
+}
+
+static int alloc_iucv_data(int cpu)
+{
+ /* Note: GFP_DMA used to get memory below 2G */
+ iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_irq_data[cpu])
+ goto out_free;
+
+ /* Allocate parameter blocks. */
+ iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_param[cpu])
+ goto out_free;
+
+ iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_param_irq[cpu])
+ goto out_free;
+
+ return 0;
+
+out_free:
+ free_iucv_data(cpu);
+ return -ENOMEM;
+}
+
+static int iucv_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
cpumask_t cpumask;
@@ -629,38 +666,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_irq_data[cpu])
- return notifier_from_errno(-ENOMEM);
-
- iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_param[cpu]) {
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
- return notifier_from_errno(-ENOMEM);
- }
- iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_param_irq[cpu]) {
- kfree(iucv_param[cpu]);
- iucv_param[cpu] = NULL;
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
+ if (alloc_iucv_data(cpu))
return notifier_from_errno(-ENOMEM);
- }
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- kfree(iucv_param_irq[cpu]);
- iucv_param_irq[cpu] = NULL;
- kfree(iucv_param[cpu]);
- iucv_param[cpu] = NULL;
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
+ free_iucv_data(cpu);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
@@ -674,15 +687,16 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
case CPU_DOWN_PREPARE_FROZEN:
if (!iucv_path_table)
break;
- cpumask = iucv_buffer_cpumask;
- cpu_clear(cpu, cpumask);
- if (cpus_empty(cpumask))
+ cpumask_copy(&cpumask, &iucv_buffer_cpumask);
+ cpumask_clear_cpu(cpu, &cpumask);
+ if (cpumask_empty(&cpumask))
/* Can't offline last IUCV enabled cpu. */
return notifier_from_errno(-EINVAL);
smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
- if (cpus_empty(iucv_irq_cpumask))
- smp_call_function_single(first_cpu(iucv_buffer_cpumask),
- iucv_allow_cpu, NULL, 1);
+ if (cpumask_empty(&iucv_irq_cpumask))
+ smp_call_function_single(
+ cpumask_first(&iucv_buffer_cpumask),
+ iucv_allow_cpu, NULL, 1);
break;
}
return NOTIFY_OK;
@@ -734,7 +748,7 @@ static void iucv_cleanup_queue(void)
struct iucv_irq_list *p, *n;
/*
- * When a path is severed, the pathid can be reused immediatly
+ * When a path is severed, the pathid can be reused immediately
* on a iucv connect or a connection pending interrupt. Remove
* all entries from the task queue that refer to a stale pathid
* (iucv_path_table[ix] == NULL). Only then do the iucv connect
@@ -806,7 +820,7 @@ void iucv_unregister(struct iucv_handler *handler, int smp)
spin_lock_bh(&iucv_table_lock);
/* Remove handler from the iucv_handler_list. */
list_del_init(&handler->list);
- /* Sever all pathids still refering to the handler. */
+ /* Sever all pathids still referring to the handler. */
list_for_each_entry_safe(p, n, &handler->paths, list) {
iucv_sever_pathid(p->pathid, NULL);
iucv_path_table[p->pathid] = NULL;
@@ -827,14 +841,17 @@ EXPORT_SYMBOL(iucv_unregister);
static int iucv_reboot_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
- int i, rc;
+ int i;
+
+ if (cpumask_empty(&iucv_irq_cpumask))
+ return NOTIFY_DONE;
get_online_cpus();
- on_each_cpu(iucv_block_cpu, NULL, 1);
+ on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1);
preempt_disable();
for (i = 0; i < iucv_max_pathid; i++) {
if (iucv_path_table[i])
- rc = iucv_sever_pathid(i, NULL);
+ iucv_sever_pathid(i, NULL);
}
preempt_enable();
put_online_cpus();
@@ -865,7 +882,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
int rc;
local_bh_disable();
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -914,7 +931,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
spin_lock_bh(&iucv_table_lock);
iucv_cleanup_queue();
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -974,7 +991,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
int rc;
local_bh_disable();
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1006,7 +1023,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
int rc;
local_bh_disable();
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1035,7 +1052,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
int rc;
preempt_disable();
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1069,7 +1086,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
int rc;
local_bh_disable();
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1161,7 +1178,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
if (msg->flags & IUCV_IPRMDATA)
return iucv_message_receive_iprmdata(path, msg, flags,
buffer, size, residual);
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1234,7 +1251,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
int rc;
local_bh_disable();
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1273,7 +1290,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
int rc;
local_bh_disable();
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1323,7 +1340,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
union iucv_param *parm;
int rc;
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1410,7 +1427,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
int rc;
local_bh_disable();
- if (cpus_empty(iucv_buffer_cpumask)) {
+ if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1798,12 +1815,13 @@ static void iucv_work_fn(struct work_struct *work)
* Handles external interrupts coming in from CP.
* Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
*/
-static void iucv_external_interrupt(unsigned int ext_int_code,
+static void iucv_external_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct iucv_irq_data *p;
struct iucv_irq_list *work;
+ inc_irq_stat(IRQEXT_IUC);
p = iucv_irq_data[smp_processor_id()];
if (p->ippathid >= iucv_max_pathid) {
WARN_ON(p->ippathid >= iucv_max_pathid);
@@ -1886,7 +1904,7 @@ static int iucv_pm_freeze(struct device *dev)
printk(KERN_WARNING "iucv_pm_freeze\n");
#endif
if (iucv_pm_state != IUCV_PM_FREEZING) {
- for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
+ for_each_cpu(cpu, &iucv_irq_cpumask)
smp_call_function_single(cpu, iucv_block_cpu_almost,
NULL, 1);
cancel_work_sync(&iucv_work);
@@ -1926,7 +1944,7 @@ static int iucv_pm_thaw(struct device *dev)
if (rc)
goto out;
}
- if (cpus_empty(iucv_irq_cpumask)) {
+ if (cpumask_empty(&iucv_irq_cpumask)) {
if (iucv_nonsmp_handler)
/* enable interrupts on one cpu */
iucv_allow_cpu(NULL);
@@ -1959,7 +1977,7 @@ static int iucv_pm_restore(struct device *dev)
pr_warning("Suspending Linux did not completely close all IUCV "
"connections\n");
iucv_pm_state = IUCV_PM_RESTORING;
- if (cpus_empty(iucv_irq_cpumask)) {
+ if (cpumask_empty(&iucv_irq_cpumask)) {
rc = iucv_query_maxconn();
rc = iucv_enable();
if (rc)
@@ -1971,6 +1989,27 @@ out:
return rc;
}
+struct iucv_interface iucv_if = {
+ .message_receive = iucv_message_receive,
+ .__message_receive = __iucv_message_receive,
+ .message_reply = iucv_message_reply,
+ .message_reject = iucv_message_reject,
+ .message_send = iucv_message_send,
+ .__message_send = __iucv_message_send,
+ .message_send2way = iucv_message_send2way,
+ .message_purge = iucv_message_purge,
+ .path_accept = iucv_path_accept,
+ .path_connect = iucv_path_connect,
+ .path_quiesce = iucv_path_quiesce,
+ .path_resume = iucv_path_resume,
+ .path_sever = iucv_path_sever,
+ .iucv_register = iucv_register,
+ .iucv_unregister = iucv_unregister,
+ .bus = NULL,
+ .root = NULL,
+};
+EXPORT_SYMBOL(iucv_if);
+
/**
* iucv_init
*
@@ -1985,45 +2024,33 @@ static int __init iucv_init(void)
rc = -EPROTONOSUPPORT;
goto out;
}
+ ctl_set_bit(0, 1);
rc = iucv_query_maxconn();
if (rc)
- goto out;
- rc = register_external_interrupt(0x4000, iucv_external_interrupt);
+ goto out_ctl;
+ rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
if (rc)
- goto out;
+ goto out_ctl;
iucv_root = root_device_register("iucv");
if (IS_ERR(iucv_root)) {
rc = PTR_ERR(iucv_root);
goto out_int;
}
- for_each_online_cpu(cpu) {
- /* Note: GFP_DMA used to get memory below 2G */
- iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_irq_data[cpu]) {
- rc = -ENOMEM;
- goto out_free;
- }
+ cpu_notifier_register_begin();
- /* Allocate parameter blocks. */
- iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_param[cpu]) {
- rc = -ENOMEM;
- goto out_free;
- }
- iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_param_irq[cpu]) {
+ for_each_online_cpu(cpu) {
+ if (alloc_iucv_data(cpu)) {
rc = -ENOMEM;
goto out_free;
}
-
}
- rc = register_hotcpu_notifier(&iucv_cpu_notifier);
+ rc = __register_hotcpu_notifier(&iucv_cpu_notifier);
if (rc)
goto out_free;
+
+ cpu_notifier_register_done();
+
rc = register_reboot_notifier(&iucv_reboot_notifier);
if (rc)
goto out_cpu;
@@ -2034,24 +2061,26 @@ static int __init iucv_init(void)
rc = bus_register(&iucv_bus);
if (rc)
goto out_reboot;
+ iucv_if.root = iucv_root;
+ iucv_if.bus = &iucv_bus;
return 0;
out_reboot:
unregister_reboot_notifier(&iucv_reboot_notifier);
out_cpu:
- unregister_hotcpu_notifier(&iucv_cpu_notifier);
+ cpu_notifier_register_begin();
+ __unregister_hotcpu_notifier(&iucv_cpu_notifier);
out_free:
- for_each_possible_cpu(cpu) {
- kfree(iucv_param_irq[cpu]);
- iucv_param_irq[cpu] = NULL;
- kfree(iucv_param[cpu]);
- iucv_param[cpu] = NULL;
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
- }
+ for_each_possible_cpu(cpu)
+ free_iucv_data(cpu);
+
+ cpu_notifier_register_done();
+
root_device_unregister(iucv_root);
out_int:
- unregister_external_interrupt(0x4000, iucv_external_interrupt);
+ unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
+out_ctl:
+ ctl_clear_bit(0, 1);
out:
return rc;
}
@@ -2073,18 +2102,14 @@ static void __exit iucv_exit(void)
kfree(p);
spin_unlock_irq(&iucv_queue_lock);
unregister_reboot_notifier(&iucv_reboot_notifier);
- unregister_hotcpu_notifier(&iucv_cpu_notifier);
- for_each_possible_cpu(cpu) {
- kfree(iucv_param_irq[cpu]);
- iucv_param_irq[cpu] = NULL;
- kfree(iucv_param[cpu]);
- iucv_param[cpu] = NULL;
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
- }
+ cpu_notifier_register_begin();
+ __unregister_hotcpu_notifier(&iucv_cpu_notifier);
+ for_each_possible_cpu(cpu)
+ free_iucv_data(cpu);
+ cpu_notifier_register_done();
root_device_unregister(iucv_root);
bus_unregister(&iucv_bus);
- unregister_external_interrupt(0x4000, iucv_external_interrupt);
+ unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
}
subsys_initcall(iucv_init);