Commit ff9c18e4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ceph-for-6.9-rc1' of https://github.com/ceph/ceph-client

Pull ceph updates from Ilya Dryomov:
 "A patch to minimize blockage when processing very large batches of
  dirty caps and two fixes to better handle EOF in the face of multiple
  clients performing reads and size-extending writes at the same time"

* tag 'ceph-for-6.9-rc1' of https://github.com/ceph/ceph-client:
  ceph: set correct cap mask for getattr request for read
  ceph: stop copying to iter at EOF on sync reads
  ceph: remove SLAB_MEM_SPREAD flag usage
  ceph: break the check delayed cap loop every 5s
parents 6f6efce5 825b82f6
...@@ -4634,6 +4634,14 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc) ...@@ -4634,6 +4634,14 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
iput(inode); iput(inode);
spin_lock(&mdsc->cap_delay_lock); spin_lock(&mdsc->cap_delay_lock);
} }
/*
* Make sure too many dirty caps or general
* slowness doesn't block mdsc delayed work,
* preventing send_renew_caps() from running.
*/
if (jiffies - loop_start >= 5 * HZ)
break;
} }
spin_unlock(&mdsc->cap_delay_lock); spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "done\n"); doutc(cl, "done\n");
......
...@@ -1138,7 +1138,12 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos, ...@@ -1138,7 +1138,12 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
} }
idx = 0; idx = 0;
left = ret > 0 ? ret : 0; if (ret <= 0)
left = 0;
else if (off + ret > i_size)
left = i_size - off;
else
left = ret;
while (left > 0) { while (left > 0) {
size_t plen, copied; size_t plen, copied;
...@@ -1167,15 +1172,13 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos, ...@@ -1167,15 +1172,13 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
} }
if (ret > 0) { if (ret > 0) {
if (off > *ki_pos) { if (off >= i_size) {
if (off >= i_size) { *retry_op = CHECK_EOF;
*retry_op = CHECK_EOF; ret = i_size - *ki_pos;
ret = i_size - *ki_pos; *ki_pos = i_size;
*ki_pos = i_size; } else {
} else { ret = off - *ki_pos;
ret = off - *ki_pos; *ki_pos = off;
*ki_pos = off;
}
} }
if (last_objver) if (last_objver)
...@@ -2126,14 +2129,16 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -2126,14 +2129,16 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
int statret; int statret;
struct page *page = NULL; struct page *page = NULL;
loff_t i_size; loff_t i_size;
int mask = CEPH_STAT_CAP_SIZE;
if (retry_op == READ_INLINE) { if (retry_op == READ_INLINE) {
page = __page_cache_alloc(GFP_KERNEL); page = __page_cache_alloc(GFP_KERNEL);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
mask = CEPH_STAT_CAP_INLINE_DATA;
} }
statret = __ceph_do_getattr(inode, page, statret = __ceph_do_getattr(inode, page, mask, !!page);
CEPH_STAT_CAP_INLINE_DATA, !!page);
if (statret < 0) { if (statret < 0) {
if (page) if (page)
__free_page(page); __free_page(page);
...@@ -2174,7 +2179,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -2174,7 +2179,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
/* hit EOF or hole? */ /* hit EOF or hole? */
if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
ret < len) { ret < len) {
doutc(cl, "hit hole, ppos %lld < size %lld, reading more\n", doutc(cl, "may hit hole, ppos %lld < size %lld, reading more\n",
iocb->ki_pos, i_size); iocb->ki_pos, i_size);
read += ret; read += ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment