Commit c9ad5a65 authored by David S. Miller's avatar David S. Miller

Merge branch 'af_iucv-big-bufs'

Ursula Braun says:

====================
s390: af_iucv patches

here are improvements for af_iucv relaxing the pressure to allocate
big contiguous kernel buffers.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 818d49ad a006353a
...@@ -1033,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1033,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk); struct iucv_sock *iucv = iucv_sk(sk);
size_t headroom, linear;
struct sk_buff *skb; struct sk_buff *skb;
struct iucv_message txmsg = {0}; struct iucv_message txmsg = {0};
struct cmsghdr *cmsg; struct cmsghdr *cmsg;
...@@ -1110,20 +1111,31 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1110,20 +1111,31 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
* this is fine for SOCK_SEQPACKET (unless we want to support * this is fine for SOCK_SEQPACKET (unless we want to support
* segmented records using the MSG_EOR flag), but * segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */ * for SOCK_STREAM we might want to improve it in future */
if (iucv->transport == AF_IUCV_TRANS_HIPER) headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
skb = sock_alloc_send_skb(sk, ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN, if (headroom + len < PAGE_SIZE) {
noblock, &err); linear = len;
else } else {
skb = sock_alloc_send_skb(sk, len, noblock, &err); /* In nonlinear "classic" iucv skb,
* reserve space for iucv_array
*/
if (iucv->transport != AF_IUCV_TRANS_HIPER)
headroom += sizeof(struct iucv_array) *
(MAX_SKB_FRAGS + 1);
linear = PAGE_SIZE - headroom;
}
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
noblock, &err, 0);
if (!skb) if (!skb)
goto out; goto out;
if (iucv->transport == AF_IUCV_TRANS_HIPER) if (headroom)
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); skb_reserve(skb, headroom);
if (memcpy_from_msg(skb_put(skb, len), msg, len)) { skb_put(skb, linear);
err = -EFAULT; skb->len = len;
skb->data_len = len - linear;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
if (err)
goto fail; goto fail;
}
/* wait if outstanding messages for iucv path has reached */ /* wait if outstanding messages for iucv path has reached */
timeo = sock_sndtimeo(sk, noblock); timeo = sock_sndtimeo(sk, noblock);
...@@ -1148,49 +1160,67 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1148,49 +1160,67 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
atomic_dec(&iucv->msg_sent); atomic_dec(&iucv->msg_sent);
goto fail; goto fail;
} }
goto release; } else { /* Classic VM IUCV transport */
} skb_queue_tail(&iucv->send_skb_q, skb);
skb_queue_tail(&iucv->send_skb_q, skb);
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) skb->len <= 7) {
&& skb->len <= 7) { err = iucv_send_iprm(iucv->path, &txmsg, skb);
err = iucv_send_iprm(iucv->path, &txmsg, skb);
/* on success: there is no message_complete callback */
/* for an IPRMDATA msg; remove skb from send queue */
if (err == 0) {
skb_unlink(skb, &iucv->send_skb_q);
kfree_skb(skb);
}
/* on success: there is no message_complete callback /* this error should never happen since the */
* for an IPRMDATA msg; remove skb from send queue */ /* IUCV_IPRMDATA path flag is set... sever path */
if (err == 0) { if (err == 0x15) {
skb_unlink(skb, &iucv->send_skb_q); pr_iucv->path_sever(iucv->path, NULL);
kfree_skb(skb); skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
}
} else if (skb_is_nonlinear(skb)) {
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
/* skip iucv_array lying in the headroom */
iba[0].address = (u32)(addr_t)skb->data;
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
iba[i + 1].address =
(u32)(addr_t)skb_frag_address(frag);
iba[i + 1].length = (u32)skb_frag_size(frag);
}
err = pr_iucv->message_send(iucv->path, &txmsg,
IUCV_IPBUFLST, 0,
(void *)iba, skb->len);
} else { /* non-IPRM Linear skb */
err = pr_iucv->message_send(iucv->path, &txmsg,
0, 0, (void *)skb->data, skb->len);
} }
if (err) {
/* this error should never happen since the if (err == 3) {
* IUCV_IPRMDATA path flag is set... sever path */ user_id[8] = 0;
if (err == 0x15) { memcpy(user_id, iucv->dst_user_id, 8);
pr_iucv->path_sever(iucv->path, NULL); appl_id[8] = 0;
memcpy(appl_id, iucv->dst_name, 8);
pr_err(
"Application %s on z/VM guest %s exceeds message limit\n",
appl_id, user_id);
err = -EAGAIN;
} else {
err = -EPIPE;
}
skb_unlink(skb, &iucv->send_skb_q); skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail; goto fail;
} }
} else
err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
(void *) skb->data, skb->len);
if (err) {
if (err == 3) {
user_id[8] = 0;
memcpy(user_id, iucv->dst_user_id, 8);
appl_id[8] = 0;
memcpy(appl_id, iucv->dst_name, 8);
pr_err("Application %s on z/VM guest %s"
" exceeds message limit\n",
appl_id, user_id);
err = -EAGAIN;
} else
err = -EPIPE;
skb_unlink(skb, &iucv->send_skb_q);
goto fail;
} }
release:
release_sock(sk); release_sock(sk);
return len; return len;
...@@ -1201,42 +1231,32 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1201,42 +1231,32 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
return err; return err;
} }
/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
*
* Locking: must be called with message_q.lock held
*/
static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
{ {
int dataleft, size, copied = 0; size_t headroom, linear;
struct sk_buff *nskb; struct sk_buff *skb;
int err;
dataleft = len;
while (dataleft) {
if (dataleft >= sk->sk_rcvbuf / 4)
size = sk->sk_rcvbuf / 4;
else
size = dataleft;
nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
if (!nskb)
return -ENOMEM;
/* copy target class to control buffer of new skb */
IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
/* copy data fragment */
memcpy(nskb->data, skb->data + copied, size);
copied += size;
dataleft -= size;
skb_reset_transport_header(nskb);
skb_reset_network_header(nskb);
nskb->len = size;
skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); if (len < PAGE_SIZE) {
headroom = 0;
linear = len;
} else {
headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
linear = PAGE_SIZE - headroom;
}
skb = alloc_skb_with_frags(headroom + linear, len - linear,
0, &err, GFP_ATOMIC | GFP_DMA);
WARN_ONCE(!skb,
"alloc of recv iucv skb len=%lu failed with errcode=%d\n",
len, err);
if (skb) {
if (headroom)
skb_reserve(skb, headroom);
skb_put(skb, linear);
skb->len = len;
skb->data_len = len - linear;
} }
return skb;
return 0;
} }
/* iucv_process_message() - Receive a single outstanding IUCV message /* iucv_process_message() - Receive a single outstanding IUCV message
...@@ -1263,31 +1283,32 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, ...@@ -1263,31 +1283,32 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
skb->len = 0; skb->len = 0;
} }
} else { } else {
rc = pr_iucv->message_receive(path, msg, if (skb_is_nonlinear(skb)) {
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
iba[0].address = (u32)(addr_t)skb->data;
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
iba[i + 1].address =
(u32)(addr_t)skb_frag_address(frag);
iba[i + 1].length = (u32)skb_frag_size(frag);
}
rc = pr_iucv->message_receive(path, msg,
IUCV_IPBUFLST,
(void *)iba, len, NULL);
} else {
rc = pr_iucv->message_receive(path, msg,
msg->flags & IUCV_IPRMDATA, msg->flags & IUCV_IPRMDATA,
skb->data, len, NULL); skb->data, len, NULL);
}
if (rc) { if (rc) {
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
/* we need to fragment iucv messages for SOCK_STREAM only; WARN_ON_ONCE(skb->len != len);
* for SOCK_SEQPACKET, it is only relevant if we support
* record segmentation using MSG_EOR (see also recvmsg()) */
if (sk->sk_type == SOCK_STREAM &&
skb->truesize >= sk->sk_rcvbuf / 4) {
rc = iucv_fragment_skb(sk, skb, len);
kfree_skb(skb);
skb = NULL;
if (rc) {
pr_iucv->path_sever(path, NULL);
return;
}
skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
} else {
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->len = len;
}
} }
IUCV_SKB_CB(skb)->offset = 0; IUCV_SKB_CB(skb)->offset = 0;
...@@ -1306,7 +1327,7 @@ static void iucv_process_message_q(struct sock *sk) ...@@ -1306,7 +1327,7 @@ static void iucv_process_message_q(struct sock *sk)
struct sock_msg_q *p, *n; struct sock_msg_q *p, *n;
list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA); skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
if (!skb) if (!skb)
break; break;
iucv_process_message(sk, skb, p->path, &p->msg); iucv_process_message(sk, skb, p->path, &p->msg);
...@@ -1801,7 +1822,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) ...@@ -1801,7 +1822,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
if (len > sk->sk_rcvbuf) if (len > sk->sk_rcvbuf)
goto save_message; goto save_message;
skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA); skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
if (!skb) if (!skb)
goto save_message; goto save_message;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment