Commit e5374399 authored by Eugene Crosser's avatar Eugene Crosser Committed by David S. Miller

af_iucv: use paged SKBs for big outbound messages

When an outbound message is bigger than a page, allocate and fill
a paged SKB, and subsequently use IUCV send primitive with IPBUFLST
flag. This relaxes the pressure to allocate big contiguous kernel
buffers.
Signed-off-by: default avatarEugene Crosser <Eugene.Crosser@ru.ibm.com>
Signed-off-by: default avatarUrsula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 818d49ad
...@@ -1033,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1033,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk); struct iucv_sock *iucv = iucv_sk(sk);
size_t headroom, linear;
struct sk_buff *skb; struct sk_buff *skb;
struct iucv_message txmsg = {0}; struct iucv_message txmsg = {0};
struct cmsghdr *cmsg; struct cmsghdr *cmsg;
...@@ -1110,20 +1111,31 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1110,20 +1111,31 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
* this is fine for SOCK_SEQPACKET (unless we want to support * this is fine for SOCK_SEQPACKET (unless we want to support
* segmented records using the MSG_EOR flag), but * segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */ * for SOCK_STREAM we might want to improve it in future */
if (iucv->transport == AF_IUCV_TRANS_HIPER) headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
skb = sock_alloc_send_skb(sk, ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN, if (headroom + len < PAGE_SIZE) {
noblock, &err); linear = len;
else } else {
skb = sock_alloc_send_skb(sk, len, noblock, &err); /* In nonlinear "classic" iucv skb,
* reserve space for iucv_array
*/
if (iucv->transport != AF_IUCV_TRANS_HIPER)
headroom += sizeof(struct iucv_array) *
(MAX_SKB_FRAGS + 1);
linear = PAGE_SIZE - headroom;
}
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
noblock, &err, 0);
if (!skb) if (!skb)
goto out; goto out;
if (iucv->transport == AF_IUCV_TRANS_HIPER) if (headroom)
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); skb_reserve(skb, headroom);
if (memcpy_from_msg(skb_put(skb, len), msg, len)) { skb_put(skb, linear);
err = -EFAULT; skb->len = len;
skb->data_len = len - linear;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
if (err)
goto fail; goto fail;
}
/* wait if outstanding messages for iucv path has reached */ /* wait if outstanding messages for iucv path has reached */
timeo = sock_sndtimeo(sk, noblock); timeo = sock_sndtimeo(sk, noblock);
...@@ -1148,49 +1160,67 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1148,49 +1160,67 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
atomic_dec(&iucv->msg_sent); atomic_dec(&iucv->msg_sent);
goto fail; goto fail;
} }
goto release; } else { /* Classic VM IUCV transport */
}
skb_queue_tail(&iucv->send_skb_q, skb); skb_queue_tail(&iucv->send_skb_q, skb);
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
&& skb->len <= 7) { skb->len <= 7) {
err = iucv_send_iprm(iucv->path, &txmsg, skb); err = iucv_send_iprm(iucv->path, &txmsg, skb);
/* on success: there is no message_complete callback /* on success: there is no message_complete callback */
* for an IPRMDATA msg; remove skb from send queue */ /* for an IPRMDATA msg; remove skb from send queue */
if (err == 0) { if (err == 0) {
skb_unlink(skb, &iucv->send_skb_q); skb_unlink(skb, &iucv->send_skb_q);
kfree_skb(skb); kfree_skb(skb);
} }
/* this error should never happen since the /* this error should never happen since the */
* IUCV_IPRMDATA path flag is set... sever path */ /* IUCV_IPRMDATA path flag is set... sever path */
if (err == 0x15) { if (err == 0x15) {
pr_iucv->path_sever(iucv->path, NULL); pr_iucv->path_sever(iucv->path, NULL);
skb_unlink(skb, &iucv->send_skb_q); skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE; err = -EPIPE;
goto fail; goto fail;
} }
} else } else if (skb_is_nonlinear(skb)) {
err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0, struct iucv_array *iba = (struct iucv_array *)skb->head;
(void *) skb->data, skb->len); int i;
/* skip iucv_array lying in the headroom */
iba[0].address = (u32)(addr_t)skb->data;
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
iba[i + 1].address =
(u32)(addr_t)skb_frag_address(frag);
iba[i + 1].length = (u32)skb_frag_size(frag);
}
err = pr_iucv->message_send(iucv->path, &txmsg,
IUCV_IPBUFLST, 0,
(void *)iba, skb->len);
} else { /* non-IPRM Linear skb */
err = pr_iucv->message_send(iucv->path, &txmsg,
0, 0, (void *)skb->data, skb->len);
}
if (err) { if (err) {
if (err == 3) { if (err == 3) {
user_id[8] = 0; user_id[8] = 0;
memcpy(user_id, iucv->dst_user_id, 8); memcpy(user_id, iucv->dst_user_id, 8);
appl_id[8] = 0; appl_id[8] = 0;
memcpy(appl_id, iucv->dst_name, 8); memcpy(appl_id, iucv->dst_name, 8);
pr_err("Application %s on z/VM guest %s" pr_err(
" exceeds message limit\n", "Application %s on z/VM guest %s exceeds message limit\n",
appl_id, user_id); appl_id, user_id);
err = -EAGAIN; err = -EAGAIN;
} else } else {
err = -EPIPE; err = -EPIPE;
}
skb_unlink(skb, &iucv->send_skb_q); skb_unlink(skb, &iucv->send_skb_q);
goto fail; goto fail;
} }
}
release:
release_sock(sk); release_sock(sk);
return len; return len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment