Commit 4b6c132b authored by Al Viro's avatar Al Viro

iov_iter: switch ..._full() variants of primitives to use of iov_iter_revert()

Use corresponding plain variants, revert on short copy.  That's the way it
should've been done from the very beginning, except that we didn't have
iov_iter_revert() back then...

[fixed another braino caught by Qian Cai <quic_qiancai@quicinc.com>]
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 3b3fc051
...@@ -132,9 +132,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, ...@@ -132,9 +132,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
static __always_inline __must_check static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
...@@ -157,10 +155,11 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) ...@@ -157,10 +155,11 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
static __always_inline __must_check static __always_inline __must_check
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{ {
if (unlikely(!check_copy_size(addr, bytes, false))) size_t copied = copy_from_iter(addr, bytes, i);
if (likely(copied == bytes))
return true;
iov_iter_revert(i, copied);
return false; return false;
else
return _copy_from_iter_full(addr, bytes, i);
} }
static __always_inline __must_check static __always_inline __must_check
...@@ -175,10 +174,11 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) ...@@ -175,10 +174,11 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
static __always_inline __must_check static __always_inline __must_check
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{ {
if (unlikely(!check_copy_size(addr, bytes, false))) size_t copied = copy_from_iter_nocache(addr, bytes, i);
if (likely(copied == bytes))
return true;
iov_iter_revert(i, copied);
return false; return false;
else
return _copy_from_iter_full_nocache(addr, bytes, i);
} }
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
...@@ -278,7 +278,17 @@ struct csum_state { ...@@ -278,7 +278,17 @@ struct csum_state {
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i); size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
static __always_inline __must_check
bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i)
{
size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
if (likely(copied == bytes))
return true;
iov_iter_revert(i, copied);
return false;
}
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i); struct iov_iter *i);
......
...@@ -819,35 +819,6 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) ...@@ -819,35 +819,6 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
} }
EXPORT_SYMBOL(_copy_from_iter); EXPORT_SYMBOL(_copy_from_iter);
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return false;
}
if (unlikely(i->count < bytes))
return false;
if (iter_is_iovec(i))
might_fault();
iterate_all_kinds(i, bytes, v, ({
if (copyin((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len)
)
iov_iter_advance(i, bytes);
return true;
}
EXPORT_SYMBOL(_copy_from_iter_full);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{ {
char *to = addr; char *to = addr;
...@@ -907,32 +878,6 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) ...@@ -907,32 +878,6 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif #endif
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return false;
}
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len)
)
iov_iter_advance(i, bytes);
return true;
}
EXPORT_SYMBOL(_copy_from_iter_full_nocache);
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{ {
struct page *head; struct page *head;
...@@ -1740,55 +1685,6 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, ...@@ -1740,55 +1685,6 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
} }
EXPORT_SYMBOL(csum_and_copy_from_iter); EXPORT_SYMBOL(csum_and_copy_from_iter);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
{
char *to = addr;
__wsum sum, next;
size_t off = 0;
sum = *csum;
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return false;
}
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
next = csum_and_copy_from_user(v.iov_base,
(to += v.iov_len) - v.iov_len,
v.iov_len);
if (!next)
return false;
sum = csum_block_add(sum, next, off);
off += v.iov_len;
0;
}), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
p + v.bv_offset, v.bv_len,
sum, off);
kunmap_atomic(p);
off += v.bv_len;
}),({
sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len,
sum, off);
off += v.iov_len;
}), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
p + v.bv_offset, v.bv_len,
sum, off);
kunmap_atomic(p);
off += v.bv_len;
})
)
*csum = sum;
iov_iter_advance(i, bytes);
return true;
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i) struct iov_iter *i)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment