Commit c2ff29e9 authored by David Howells's avatar David Howells Committed by Jakub Kicinski

siw: Inline do_tcp_sendpages()

do_tcp_sendpages() is now just a small wrapper around tcp_sendmsg_locked(),
so inline it, allowing do_tcp_sendpages() to be removed.  This is part of
replacing ->sendpage() with a call to sendmsg() with MSG_SPLICE_PAGES set.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarBernard Metzler <bmt@zurich.ibm.com>
Reviewed-by: default avatarTom Talpey <tom@talpey.com>
cc: Jason Gunthorpe <jgg@ziepe.ca>
cc: Leon Romanovsky <leon@kernel.org>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent e117dcfd
......@@ -312,7 +312,7 @@ static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
}
/*
* 0copy TCP transmit interface: Use do_tcp_sendpages.
* 0copy TCP transmit interface: Use MSG_SPLICE_PAGES.
*
* Using sendpage to push page by page appears to be less efficient
* than using sendmsg, even if data are copied.
......@@ -323,20 +323,27 @@ static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
size_t size)
{
struct bio_vec bvec;
struct msghdr msg = {
.msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST |
MSG_SPLICE_PAGES),
};
struct sock *sk = s->sk;
int i = 0, rv = 0, sent = 0,
flags = MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST;
int i = 0, rv = 0, sent = 0;
while (size) {
size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
if (size + offset <= PAGE_SIZE)
flags = MSG_MORE | MSG_DONTWAIT;
msg.msg_flags &= ~MSG_SENDPAGE_NOTLAST;
tcp_rate_check_app_limited(sk);
bvec_set_page(&bvec, page[i], bytes, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
try_page_again:
lock_sock(sk);
rv = do_tcp_sendpages(sk, page[i], offset, bytes, flags);
rv = tcp_sendmsg_locked(sk, &msg, size);
release_sock(sk);
if (rv > 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment