Home Home > GIT Browse > openSUSE-15.0
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Jolly <jjolly@suse.de>2012-02-03 23:44:36 +0100
committerMichal Marek <mmarek@suse.cz>2012-02-13 16:57:28 +0100
commitff7cec54fbb60e6946ef03e46529f4528e94c3cf (patch)
treefd6325b62c8fbab9f23fd8c84c270be2334f70f5
parentfc03fdabc94d2c61cee5be6b51eef8f0fcac1d99 (diff)
- patches.arch/s390-45-01-af_iucv-remove-iucv-path.patch: af_iucv:rpm-3.0.13-0.23
remove IUCV-pathes completely (bnc#744795,LTC#78292). - patches.arch/s390-45-02-af_iucv-net-device.patch: af_iucv: performance improvements for new HS transport (bnc#744795,LTC#78346). - patches.arch/s390-45-03-af_iucv-getsockopt-msgsize.patch: af_iucv: offer new getsockopt SO_MSGSIZE (bnc#744795,LTC#78348).
-rw-r--r--kernel-source.changes11
-rw-r--r--patches.arch/s390-45-01-af_iucv-remove-iucv-path.patch284
-rw-r--r--patches.arch/s390-45-02-af_iucv-net-device.patch438
-rw-r--r--patches.arch/s390-45-03-af_iucv-getsockopt-msgsize.patch57
-rw-r--r--series.conf4
5 files changed, 794 insertions, 0 deletions
diff --git a/kernel-source.changes b/kernel-source.changes
index 8eddf2d112..d7ea4da0fb 100644
--- a/kernel-source.changes
+++ b/kernel-source.changes
@@ -23,6 +23,17 @@ Tue Feb 7 20:36:09 CET 2012 - eich@suse.de
drm/i915/sdvo: Fix up EDID matching logic (bnc #744315).
-------------------------------------------------------------------
+Fri Feb 3 23:44:17 CET 2012 - jjolly@suse.de
+
+- patches.arch/s390-45-01-af_iucv-remove-iucv-path.patch: af_iucv:
+ remove IUCV-pathes completely (bnc#744795,LTC#78292).
+- patches.arch/s390-45-02-af_iucv-net-device.patch:
+ af_iucv: performance improvements for new HS transport
+ (bnc#744795,LTC#78346).
+- patches.arch/s390-45-03-af_iucv-getsockopt-msgsize.patch:
+ af_iucv: offer new getsockopt SO_MSGSIZE (bnc#744795,LTC#78348).
+
+-------------------------------------------------------------------
Fri Feb 3 16:37:52 CET 2012 - jeffm@suse.com
- patches.suse/btrfs-handle-EIO: Fix typo.
diff --git a/patches.arch/s390-45-01-af_iucv-remove-iucv-path.patch b/patches.arch/s390-45-01-af_iucv-remove-iucv-path.patch
new file mode 100644
index 0000000000..4282b3e466
--- /dev/null
+++ b/patches.arch/s390-45-01-af_iucv-remove-iucv-path.patch
@@ -0,0 +1,284 @@
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: af_iucv: remove IUCV-pathes completely
+Patch-mainline: Yes
+References: bnc#744795,LTC#78292
+
+Symptom: defined, but unused IUCV-pathes
+Problem: When running AF_IUCV socket programs with IUCV transport,
+ a SEVER is missing in the callback of a receiving SEVERED. This
+ may inhibit z/VM to remove the corresponding IUCV-path completely.
+Solution: Add a SEVER in the SEVERED callback. In addition, simplify the
+ af_iucv code merging states IUCV_SEVERED and IUCV_DISCONN.
+
+Acked-by: John Jolly <jjolly@suse.de>
+
+---
+ include/net/iucv/af_iucv.h | 1
+ net/iucv/af_iucv.c | 100 ++++++++++++++++++---------------------------
+ 2 files changed, 42 insertions(+), 59 deletions(-)
+
+--- a/include/net/iucv/af_iucv.h
++++ b/include/net/iucv/af_iucv.h
+@@ -27,7 +27,6 @@ enum {
+ IUCV_OPEN,
+ IUCV_BOUND,
+ IUCV_LISTEN,
+- IUCV_SEVERED,
+ IUCV_DISCONN,
+ IUCV_CLOSING,
+ IUCV_CLOSED
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -90,6 +90,7 @@ do { \
+
+ static void iucv_sock_kill(struct sock *sk);
+ static void iucv_sock_close(struct sock *sk);
++static void iucv_sever_path(struct sock *, int);
+
+ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
+@@ -164,18 +165,11 @@ static int afiucv_pm_freeze(struct devic
+ read_lock(&iucv_sk_list.lock);
+ sk_for_each(sk, node, &iucv_sk_list.head) {
+ iucv = iucv_sk(sk);
+- skb_queue_purge(&iucv->send_skb_q);
+- skb_queue_purge(&iucv->backlog_skb_q);
+ switch (sk->sk_state) {
+- case IUCV_SEVERED:
+ case IUCV_DISCONN:
+ case IUCV_CLOSING:
+ case IUCV_CONNECTED:
+- if (iucv->path) {
+- err = pr_iucv->path_sever(iucv->path, NULL);
+- iucv_path_free(iucv->path);
+- iucv->path = NULL;
+- }
++ iucv_sever_path(sk, 0);
+ break;
+ case IUCV_OPEN:
+ case IUCV_BOUND:
+@@ -184,6 +178,8 @@ static int afiucv_pm_freeze(struct devic
+ default:
+ break;
+ }
++ skb_queue_purge(&iucv->send_skb_q);
++ skb_queue_purge(&iucv->backlog_skb_q);
+ }
+ read_unlock(&iucv_sk_list.lock);
+ return err;
+@@ -212,7 +208,6 @@ static int afiucv_pm_restore_thaw(struct
+ sk->sk_state_change(sk);
+ break;
+ case IUCV_DISCONN:
+- case IUCV_SEVERED:
+ case IUCV_CLOSING:
+ case IUCV_LISTEN:
+ case IUCV_BOUND:
+@@ -458,10 +453,29 @@ static void iucv_sock_kill(struct sock *
+ sock_put(sk);
+ }
+
++/* Terminate an IUCV path */
++static void iucv_sever_path(struct sock *sk, int with_user_data)
++{
++ unsigned char user_data[16];
++ struct iucv_sock *iucv = iucv_sk(sk);
++ struct iucv_path *path = iucv->path;
++
++ if (iucv->path) {
++ iucv->path = NULL;
++ if (with_user_data) {
++ low_nmcpy(user_data, iucv->src_name);
++ high_nmcpy(user_data, iucv->dst_name);
++ ASCEBC(user_data, sizeof(user_data));
++ pr_iucv->path_sever(path, user_data);
++ } else
++ pr_iucv->path_sever(path, NULL);
++ iucv_path_free(path);
++ }
++}
++
+ /* Close an IUCV socket */
+ static void iucv_sock_close(struct sock *sk)
+ {
+- unsigned char user_data[16];
+ struct iucv_sock *iucv = iucv_sk(sk);
+ unsigned long timeo;
+ int err, blen;
+@@ -508,25 +522,14 @@ static void iucv_sock_close(struct sock
+ sk->sk_state = IUCV_CLOSED;
+ sk->sk_state_change(sk);
+
+- if (iucv->path) {
+- low_nmcpy(user_data, iucv->src_name);
+- high_nmcpy(user_data, iucv->dst_name);
+- ASCEBC(user_data, sizeof(user_data));
+- pr_iucv->path_sever(iucv->path, user_data);
+- iucv_path_free(iucv->path);
+- iucv->path = NULL;
+- }
+-
+ sk->sk_err = ECONNRESET;
+ sk->sk_state_change(sk);
+
+ skb_queue_purge(&iucv->send_skb_q);
+ skb_queue_purge(&iucv->backlog_skb_q);
+- break;
+
+- default:
+- /* nothing to do here */
+- break;
++ default: /* fall through */
++ iucv_sever_path(sk, 1);
+ }
+
+ /* mark socket for deletion by iucv_sock_kill() */
+@@ -675,16 +678,12 @@ struct sock *iucv_accept_dequeue(struct
+ }
+
+ if (sk->sk_state == IUCV_CONNECTED ||
+- sk->sk_state == IUCV_SEVERED ||
+- sk->sk_state == IUCV_DISCONN || /* due to PM restore */
++ sk->sk_state == IUCV_DISCONN ||
+ !newsock) {
+ iucv_accept_unlink(sk);
+ if (newsock)
+ sock_graft(sk, newsock);
+
+- if (sk->sk_state == IUCV_SEVERED)
+- sk->sk_state = IUCV_DISCONN;
+-
+ release_sock(sk);
+ return sk;
+ }
+@@ -916,11 +915,8 @@ static int iucv_sock_connect(struct sock
+ if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
+ err = -ECONNREFUSED;
+
+- if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
+- pr_iucv->path_sever(iucv->path, NULL);
+- iucv_path_free(iucv->path);
+- iucv->path = NULL;
+- }
++ if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
++ iucv_sever_path(sk, 0);
+
+ done:
+ release_sock(sk);
+@@ -1356,7 +1352,7 @@ static int iucv_sock_recvmsg(struct kioc
+ int blen;
+ int err = 0;
+
+- if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
++ if ((sk->sk_state == IUCV_DISCONN) &&
+ skb_queue_empty(&iucv->backlog_skb_q) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+ list_empty(&iucv->message_q.list))
+@@ -1504,7 +1500,7 @@ unsigned int iucv_sock_poll(struct file
+ if (sk->sk_state == IUCV_CLOSED)
+ mask |= POLLHUP;
+
+- if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
++ if (sk->sk_state == IUCV_DISCONN)
+ mask |= POLLIN;
+
+ if (sock_writeable(sk))
+@@ -1531,7 +1527,6 @@ static int iucv_sock_shutdown(struct soc
+ switch (sk->sk_state) {
+ case IUCV_DISCONN:
+ case IUCV_CLOSING:
+- case IUCV_SEVERED:
+ case IUCV_CLOSED:
+ err = -ENOTCONN;
+ goto fail;
+@@ -1587,13 +1582,6 @@ static int iucv_sock_release(struct sock
+
+ iucv_sock_close(sk);
+
+- /* Unregister with IUCV base support */
+- if (iucv_sk(sk)->path) {
+- pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
+- iucv_path_free(iucv_sk(sk)->path);
+- iucv_sk(sk)->path = NULL;
+- }
+-
+ sock_orphan(sk);
+ iucv_sock_kill(sk);
+ return err;
+@@ -1773,8 +1761,7 @@ static int iucv_callback_connreq(struct
+ path->msglim = iucv->msglimit;
+ err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
+ if (err) {
+- err = pr_iucv->path_sever(path, user_data);
+- iucv_path_free(path);
++ iucv_sever_path(sk, 1);
+ iucv_sock_kill(nsk);
+ goto fail;
+ }
+@@ -1851,6 +1838,7 @@ static void iucv_callback_txdone(struct
+ struct sk_buff *list_skb = list->next;
+ unsigned long flags;
+
++ bh_lock_sock(sk);
+ if (!skb_queue_empty(list)) {
+ spin_lock_irqsave(&list->lock, flags);
+
+@@ -1872,7 +1860,6 @@ static void iucv_callback_txdone(struct
+ iucv_sock_wake_msglim(sk);
+ }
+ }
+- BUG_ON(!this);
+
+ if (sk->sk_state == IUCV_CLOSING) {
+ if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
+@@ -1880,6 +1867,7 @@ static void iucv_callback_txdone(struct
+ sk->sk_state_change(sk);
+ }
+ }
++ bh_unlock_sock(sk);
+
+ }
+
+@@ -1887,12 +1875,14 @@ static void iucv_callback_connrej(struct
+ {
+ struct sock *sk = path->private;
+
+- if (!list_empty(&iucv_sk(sk)->accept_q))
+- sk->sk_state = IUCV_SEVERED;
+- else
+- sk->sk_state = IUCV_DISCONN;
++ if (sk->sk_state == IUCV_CLOSED)
++ return;
+
++ bh_lock_sock(sk);
++ iucv_sever_path(sk, 1);
++ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
++ bh_unlock_sock(sk);
+ }
+
+ /* called if the other communication side shuts down its RECV direction;
+@@ -2056,10 +2046,7 @@ static int afiucv_hs_callback_fin(struct
+ /* other end of connection closed */
+ if (iucv) {
+ bh_lock_sock(sk);
+- if (!list_empty(&iucv->accept_q))
+- sk->sk_state = IUCV_SEVERED;
+- else
+- sk->sk_state = IUCV_DISCONN;
++ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+ }
+@@ -2288,10 +2275,7 @@ static void afiucv_hs_callback_txnotify(
+ case TX_NOTIFY_DELAYED_GENERALERROR:
+ __skb_unlink(this, list);
+ kfree_skb(this);
+- if (!list_empty(&iucv->accept_q))
+- sk->sk_state = IUCV_SEVERED;
+- else
+- sk->sk_state = IUCV_DISCONN;
++ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ break;
+ }
diff --git a/patches.arch/s390-45-02-af_iucv-net-device.patch b/patches.arch/s390-45-02-af_iucv-net-device.patch
new file mode 100644
index 0000000000..5183631419
--- /dev/null
+++ b/patches.arch/s390-45-02-af_iucv-net-device.patch
@@ -0,0 +1,438 @@
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: af_iucv: performance improvements for new HS transport
+Patch-mainline: Yes
+References: bnc#744795,LTC#78346
+
+Symptom: close and send call taking long
+Problem: For the close call a socket state change is missing.
+ For the send call net_device handling is cumbersome.
+Solution: For the close call the state change to IUCV_CLOSED is added
+ in afiucv_hs_callback_txnotify together with further
+ locking improvements.
+ For the send call net_device handling is simplified and the
+ main part moved to bind to accelerate sending.
+ Poll blocks sending if unconfirmed message limit is reached.
+ In addition usage of ancillary data is enabled with
+ HiperSockets transport.
+
+Acked-by: John Jolly <jjolly@suse.de>
+
+---
+ include/net/iucv/af_iucv.h | 1
+ net/iucv/af_iucv.c | 160 ++++++++++++++++++++++++---------------------
+ 2 files changed, 88 insertions(+), 73 deletions(-)
+
+--- a/include/net/iucv/af_iucv.h
++++ b/include/net/iucv/af_iucv.h
+@@ -113,6 +113,7 @@ struct iucv_sock {
+ spinlock_t accept_q_lock;
+ struct sock *parent;
+ struct iucv_path *path;
++ struct net_device *hs_dev;
+ struct sk_buff_head send_skb_q;
+ struct sk_buff_head backlog_skb_q;
+ struct sock_msg_q message_q;
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -41,7 +41,7 @@ static struct proto iucv_proto = {
+ .obj_size = sizeof(struct iucv_sock),
+ };
+
+-static struct iucv_interface *pr_iucv = NULL;
++static struct iucv_interface *pr_iucv;
+
+ /* special AF_IUCV IPRM messages */
+ static const u8 iprm_shutdown[8] =
+@@ -324,7 +324,6 @@ static void iucv_sock_wake_msglim(struct
+ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ struct sk_buff *skb, u8 flags)
+ {
+- struct net *net = sock_net(sock);
+ struct iucv_sock *iucv = iucv_sk(sock);
+ struct af_iucv_trans_hdr *phs_hdr;
+ struct sk_buff *nskb;
+@@ -361,12 +360,10 @@ static int afiucv_hs_send(struct iucv_me
+ if (imsg)
+ memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
+
+- rcu_read_lock();
+- skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
+- rcu_read_unlock();
++ skb->dev = iucv->hs_dev;
+ if (!skb->dev)
+ return -ENODEV;
+- if (!(skb->dev->flags & IFF_UP))
++ if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
+ return -ENETDOWN;
+ if (skb->len > skb->dev->mtu) {
+ if (sock->sk_type == SOCK_SEQPACKET)
+@@ -381,14 +378,14 @@ static int afiucv_hs_send(struct iucv_me
+ return -ENOMEM;
+ skb_queue_tail(&iucv->send_skb_q, nskb);
+ err = dev_queue_xmit(skb);
+- if (err) {
++ if (net_xmit_eval(err)) {
+ skb_unlink(nskb, &iucv->send_skb_q);
+ kfree_skb(nskb);
+ } else {
+ atomic_sub(confirm_recv, &iucv->msg_recv);
+ WARN_ON(atomic_read(&iucv->msg_recv) < 0);
+ }
+- return err;
++ return net_xmit_eval(err);
+ }
+
+ /* Timers */
+@@ -478,7 +475,8 @@ static void iucv_sock_close(struct sock
+ {
+ struct iucv_sock *iucv = iucv_sk(sk);
+ unsigned long timeo;
+- int err, blen;
++ int err = 0;
++ int blen;
+ struct sk_buff *skb;
+
+ iucv_sock_clear_timer(sk);
+@@ -495,20 +493,18 @@ static void iucv_sock_close(struct sock
+ blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ skb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (skb) {
+- skb_reserve(skb,
+- sizeof(struct af_iucv_trans_hdr) +
+- ETH_HLEN);
++ skb_reserve(skb, blen);
+ err = afiucv_hs_send(NULL, sk, skb,
+ AF_IUCV_FLAG_FIN);
+ }
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+- case IUCV_DISCONN:
++ case IUCV_DISCONN: /* fall through */
+ sk->sk_state = IUCV_CLOSING;
+ sk->sk_state_change(sk);
+
+- if (!skb_queue_empty(&iucv->send_skb_q)) {
++ if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+ timeo = sk->sk_lingertime;
+ else
+@@ -532,6 +528,12 @@ static void iucv_sock_close(struct sock
+ iucv_sever_path(sk, 1);
+ }
+
++ if (iucv->hs_dev) {
++ dev_put(iucv->hs_dev);
++ iucv->hs_dev = NULL;
++ sk->sk_bound_dev_if = 0;
++ }
++
+ /* mark socket for deletion by iucv_sock_kill() */
+ sock_set_flag(sk, SOCK_ZAPPED);
+
+@@ -737,7 +739,9 @@ static int iucv_sock_bind(struct socket
+ if (!memcmp(dev->perm_addr, uid, 8)) {
+ memcpy(iucv->src_name, sa->siucv_name, 8);
+ memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
+- sock->sk->sk_bound_dev_if = dev->ifindex;
++ sk->sk_bound_dev_if = dev->ifindex;
++ iucv->hs_dev = dev;
++ dev_hold(dev);
+ sk->sk_state = IUCV_BOUND;
+ iucv->transport = AF_IUCV_TRANS_HIPER;
+ if (!iucv->msglimit)
+@@ -1142,8 +1146,10 @@ static int iucv_sock_sendmsg(struct kioc
+ noblock, &err);
+ else
+ skb = sock_alloc_send_skb(sk, len, noblock, &err);
+- if (!skb)
++ if (!skb) {
++ err = -ENOMEM;
+ goto out;
++ }
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+@@ -1166,6 +1172,7 @@ static int iucv_sock_sendmsg(struct kioc
+ /* increment and save iucv message tag for msg_completion cbk */
+ txmsg.tag = iucv->send_tag++;
+ memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
++
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ atomic_inc(&iucv->msg_sent);
+ err = afiucv_hs_send(&txmsg, sk, skb, 0);
+@@ -1412,7 +1419,14 @@ static int iucv_sock_recvmsg(struct kioc
+ }
+
+ kfree_skb(skb);
+- atomic_inc(&iucv->msg_recv);
++ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
++ atomic_inc(&iucv->msg_recv);
++ if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
++ WARN_ON(1);
++ iucv_sock_close(sk);
++ return -EFAULT;
++ }
++ }
+
+ /* Queue backlog skbs */
+ spin_lock_bh(&iucv->message_q.lock);
+@@ -1435,9 +1449,7 @@ static int iucv_sock_recvmsg(struct kioc
+ blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ sskb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (sskb) {
+- skb_reserve(sskb,
+- sizeof(struct af_iucv_trans_hdr)
+- + ETH_HLEN);
++ skb_reserve(sskb, blen);
+ err = afiucv_hs_send(NULL, sk, sskb,
+ AF_IUCV_FLAG_WIN);
+ }
+@@ -1503,7 +1515,7 @@ unsigned int iucv_sock_poll(struct file
+ if (sk->sk_state == IUCV_DISCONN)
+ mask |= POLLIN;
+
+- if (sock_writeable(sk))
++ if (sock_writeable(sk) && iucv_below_msglim(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ else
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+@@ -1621,9 +1633,8 @@ static int iucv_sock_setsockopt(struct s
+ case IUCV_BOUND:
+ if (val < 1 || val > (u16)(~0))
+ rc = -EINVAL;
+- else {
++ else
+ iucv->msglimit = val;
+- }
+ break;
+ default:
+ rc = -EINVAL;
+@@ -1761,7 +1772,7 @@ static int iucv_callback_connreq(struct
+ path->msglim = iucv->msglimit;
+ err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
+ if (err) {
+- iucv_sever_path(sk, 1);
++ iucv_sever_path(nsk, 1);
+ iucv_sock_kill(nsk);
+ goto fail;
+ }
+@@ -1963,14 +1974,15 @@ static int afiucv_hs_callback_syn(struct
+ niucv->msglimit = iucv->msglimit;
+ if (!trans_hdr->window)
+ niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
+- else {
++ else
+ niucv->msglimit_peer = trans_hdr->window;
+- }
+ memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
+ memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
+ memcpy(niucv->src_name, iucv->src_name, 8);
+ memcpy(niucv->src_user_id, iucv->src_user_id, 8);
+ nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
++ niucv->hs_dev = iucv->hs_dev;
++ dev_hold(niucv->hs_dev);
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
+ trans_hdr->window = niucv->msglimit;
+@@ -1980,9 +1992,8 @@ static int afiucv_hs_callback_syn(struct
+ iucv_accept_enqueue(sk, nsk);
+ nsk->sk_state = IUCV_CONNECTED;
+ sk->sk_data_ready(sk, 1);
+- } else {
++ } else
+ iucv_sock_kill(nsk);
+- }
+ bh_unlock_sock(sk);
+
+ out:
+@@ -1998,12 +2009,10 @@ static int afiucv_hs_callback_synack(str
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+
+- if (!iucv) {
++ if (!iucv)
+ goto out;
+- }
+- if (sk->sk_state != IUCV_BOUND) {
++ if (sk->sk_state != IUCV_BOUND)
+ goto out;
+- }
+ bh_lock_sock(sk);
+ iucv->msglimit_peer = trans_hdr->window;
+ sk->sk_state = IUCV_CONNECTED;
+@@ -2021,12 +2030,10 @@ static int afiucv_hs_callback_synfin(str
+ {
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+- if (!iucv) {
++ if (!iucv)
+ goto out;
+- }
+- if (sk->sk_state != IUCV_BOUND) {
++ if (sk->sk_state != IUCV_BOUND)
+ goto out;
+- }
+ bh_lock_sock(sk);
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+@@ -2044,12 +2051,15 @@ static int afiucv_hs_callback_fin(struct
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ /* other end of connection closed */
+- if (iucv) {
+- bh_lock_sock(sk);
++ if (!iucv)
++ goto out;
++ bh_lock_sock(sk);
++ if (sk->sk_state == IUCV_CONNECTED) {
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+- bh_unlock_sock(sk);
+ }
++ bh_unlock_sock(sk);
++out:
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+@@ -2063,13 +2073,11 @@ static int afiucv_hs_callback_win(struct
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+
+- if (!iucv) {
++ if (!iucv)
+ return NET_RX_SUCCESS;
+- }
+
+- if (sk->sk_state != IUCV_CONNECTED) {
++ if (sk->sk_state != IUCV_CONNECTED)
+ return NET_RX_SUCCESS;
+- }
+
+ atomic_sub(trans_hdr->window, &iucv->msg_sent);
+ iucv_sock_wake_msglim(sk);
+@@ -2196,17 +2204,19 @@ static int afiucv_hs_rcv(struct sk_buff
+ break;
+ case (AF_IUCV_FLAG_WIN):
+ err = afiucv_hs_callback_win(sk, skb);
+- if (skb->len > sizeof(struct af_iucv_trans_hdr))
+- err = afiucv_hs_callback_rx(sk, skb);
+- else
+- kfree(skb);
+- break;
++ if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
++ kfree_skb(skb);
++ break;
++ }
++ /* fall through */
+ case 0:
+ /* plain data frame */
++ memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
++ CB_TRGCLS_LEN);
+ err = afiucv_hs_callback_rx(sk, skb);
+ break;
+ default:
+- break;
++ ;
+ }
+
+ return err;
+@@ -2224,70 +2234,74 @@ static void afiucv_hs_callback_txnotify(
+ struct iucv_sock *iucv = NULL;
+ struct sk_buff_head *list;
+ struct sk_buff *list_skb;
+- struct sk_buff *this = NULL;
++ struct sk_buff *nskb;
+ unsigned long flags;
+ struct hlist_node *node;
+
+-
+- read_lock(&iucv_sk_list.lock);
++ read_lock_irqsave(&iucv_sk_list.lock, flags);
+ sk_for_each(sk, node, &iucv_sk_list.head)
+ if (sk == isk) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+- read_unlock(&iucv_sk_list.lock);
++ read_unlock_irqrestore(&iucv_sk_list.lock, flags);
+
+- if (!iucv) {
++ if (!iucv || sock_flag(sk, SOCK_ZAPPED))
+ return;
+- }
+
+- bh_lock_sock(sk);
+ list = &iucv->send_skb_q;
+- list_skb = list->next;
+- if (skb_queue_empty(list)) {
+- goto out_unlock;
+- }
+-
+ spin_lock_irqsave(&list->lock, flags);
++ if (skb_queue_empty(list))
++ goto out_unlock;
++ list_skb = list->next;
++ nskb = list_skb->next;
+ while (list_skb != (struct sk_buff *)list) {
+ if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
+- this = list_skb;
+ switch (n) {
+ case TX_NOTIFY_OK:
+- __skb_unlink(this, list);
++ __skb_unlink(list_skb, list);
++ kfree_skb(list_skb);
+ iucv_sock_wake_msglim(sk);
+- kfree_skb(this);
+ break;
+ case TX_NOTIFY_PENDING:
+ atomic_inc(&iucv->pendings);
+ break;
+ case TX_NOTIFY_DELAYED_OK:
+- __skb_unlink(this, list);
++ __skb_unlink(list_skb, list);
+ atomic_dec(&iucv->pendings);
+ if (atomic_read(&iucv->pendings) <= 0)
+ iucv_sock_wake_msglim(sk);
+- kfree_skb(this);
++ kfree_skb(list_skb);
+ break;
+ case TX_NOTIFY_UNREACHABLE:
+ case TX_NOTIFY_DELAYED_UNREACHABLE:
+ case TX_NOTIFY_TPQFULL: /* not yet used */
+ case TX_NOTIFY_GENERALERROR:
+ case TX_NOTIFY_DELAYED_GENERALERROR:
+- __skb_unlink(this, list);
+- kfree_skb(this);
+- sk->sk_state = IUCV_DISCONN;
+- sk->sk_state_change(sk);
++ __skb_unlink(list_skb, list);
++ kfree_skb(list_skb);
++ if (sk->sk_state == IUCV_CONNECTED) {
++ sk->sk_state = IUCV_DISCONN;
++ sk->sk_state_change(sk);
++ }
+ break;
+ }
+ break;
+ }
+- list_skb = list_skb->next;
++ list_skb = nskb;
++ nskb = nskb->next;
+ }
++out_unlock:
+ spin_unlock_irqrestore(&list->lock, flags);
+
+-out_unlock:
+- bh_unlock_sock(sk);
++ if (sk->sk_state == IUCV_CLOSING) {
++ if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
++ sk->sk_state = IUCV_CLOSED;
++ sk->sk_state_change(sk);
++ }
++ }
+ }
++
+ static const struct proto_ops iucv_sock_ops = {
+ .family = PF_IUCV,
+ .owner = THIS_MODULE,
diff --git a/patches.arch/s390-45-03-af_iucv-getsockopt-msgsize.patch b/patches.arch/s390-45-03-af_iucv-getsockopt-msgsize.patch
new file mode 100644
index 0000000000..5d44eda549
--- /dev/null
+++ b/patches.arch/s390-45-03-af_iucv-getsockopt-msgsize.patch
@@ -0,0 +1,57 @@
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: af_iucv: offer new getsockopt SO_MSGSIZE
+Patch-mainline: Yes
+References: bnc#744795,LTC#78348
+
+Symptom: socket program cannot determine maximum size of a message
+Problem: AF_IUCV sockets with HS transport are bound to a certain
+ HiperSockets interface, which is defined with one out of
+ four possible frame / mtu sizes. The maximum message size
+ to send depends on this mtu size, but an AF_IUCV socket
+ program does not have an option to determine this maximum
+ message size in advance.
+Solution: Introduce a new getsockopt option called SO_MSGSIZE.
+
+Acked-by: John Jolly <jjolly@suse.de>
+
+---
+ include/net/iucv/af_iucv.h | 1 +
+ net/iucv/af_iucv.c | 10 +++++++++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/include/net/iucv/af_iucv.h
++++ b/include/net/iucv/af_iucv.h
+@@ -132,6 +132,7 @@ struct iucv_sock {
+ /* iucv socket options (SOL_IUCV) */
+ #define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */
+ #define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */
++#define SO_MSGSIZE 0x0800 /* get maximum msgsize */
+
+ /* iucv related control messages (scm) */
+ #define SCM_IUCV_TRGCLS 0x0001 /* target class control message */
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1655,7 +1655,8 @@ static int iucv_sock_getsockopt(struct s
+ {
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+- int val, len;
++ unsigned int val;
++ int len;
+
+ if (level != SOL_IUCV)
+ return -ENOPROTOOPT;
+@@ -1678,6 +1679,13 @@ static int iucv_sock_getsockopt(struct s
+ : iucv->msglimit; /* default */
+ release_sock(sk);
+ break;
++ case SO_MSGSIZE:
++ if (sk->sk_state == IUCV_OPEN)
++ return -EBADFD;
++ val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
++ sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
++ 0x7fffffff;
++ break;
+ default:
+ return -ENOPROTOOPT;
+ }
diff --git a/series.conf b/series.conf
index b1c73dac74..20dfeaf8ad 100644
--- a/series.conf
+++ b/series.conf
@@ -375,6 +375,10 @@
patches.arch/s390-44-01-pgste-update.patch
+ patches.arch/s390-45-01-af_iucv-remove-iucv-path.patch
+ patches.arch/s390-45-02-af_iucv-net-device.patch
+ patches.arch/s390-45-03-af_iucv-getsockopt-msgsize.patch
+
patches.suse/dasd-failfast-expires
patches.suse/dasd-Clarify-comment.patch
patches.suse/dasd-make-number-of-retries-configurable.patch