Commit 2099306c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '6.7-rc4-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull smb client fixes from Steve French:
 "Six smb3 client fixes:

   - Fixes for copy_file_range and clone (cache invalidation and file
     size), also addresses an xfstest failure

   - Fix to return proper error if REMAP_FILE_DEDUP set (also fixes
     xfstest generic/304)

   - Fix potential null pointer reference with DFS

   - Multichannel fix addressing (reverting an earlier patch) some of
     the problems with enabling/disabling channels dynamically

  Still working on a followon multichannel fix to address another issue
  found in reconnect testing that will send next week"

* tag '6.7-rc4-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: reconnect worker should take reference on server struct unconditionally
  Revert "cifs: reconnect work should have reference on server struct"
  cifs: Fix non-availability of dedup breaking generic/304
  smb: client: fix potential NULL deref in parse_dfs_referrals()
  cifs: Fix flushing, invalidation and file size with FICLONE
  cifs: Fix flushing, invalidation and file size with copy_file_range()
parents f2e8a57e 04909192
...@@ -1196,32 +1196,103 @@ const struct inode_operations cifs_symlink_inode_ops = { ...@@ -1196,32 +1196,103 @@ const struct inode_operations cifs_symlink_inode_ops = {
.listxattr = cifs_listxattr, .listxattr = cifs_listxattr,
}; };
/*
* Advance the EOF marker to after the source range.
*/
static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
struct cifs_tcon *src_tcon,
unsigned int xid, loff_t src_end)
{
struct cifsFileInfo *writeable_srcfile;
int rc = -EINVAL;
writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
if (writeable_srcfile) {
if (src_tcon->ses->server->ops->set_file_size)
rc = src_tcon->ses->server->ops->set_file_size(
xid, src_tcon, writeable_srcfile,
src_inode->i_size, true /* no need to set sparse */);
else
rc = -ENOSYS;
cifsFileInfo_put(writeable_srcfile);
cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
}
if (rc < 0)
goto set_failed;
netfs_resize_file(&src_cifsi->netfs, src_end);
fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
return 0;
set_failed:
return filemap_write_and_wait(src_inode->i_mapping);
}
/*
* Flush out either the folio that overlaps the beginning of a range in which
* pos resides or the folio that overlaps the end of a range unless that folio
* is entirely within the range we're going to invalidate. We extend the flush
* bounds to encompass the folio.
*/
static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
bool first)
{
struct folio *folio;
unsigned long long fpos, fend;
pgoff_t index = pos / PAGE_SIZE;
size_t size;
int rc = 0;
folio = filemap_get_folio(inode->i_mapping, index);
if (IS_ERR(folio))
return 0;
size = folio_size(folio);
fpos = folio_pos(folio);
fend = fpos + size - 1;
*_fstart = min_t(unsigned long long, *_fstart, fpos);
*_fend = max_t(unsigned long long, *_fend, fend);
if ((first && pos == fpos) || (!first && pos == fend))
goto out;
rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
out:
folio_put(folio);
return rc;
}
static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
struct file *dst_file, loff_t destoff, loff_t len, struct file *dst_file, loff_t destoff, loff_t len,
unsigned int remap_flags) unsigned int remap_flags)
{ {
struct inode *src_inode = file_inode(src_file); struct inode *src_inode = file_inode(src_file);
struct inode *target_inode = file_inode(dst_file); struct inode *target_inode = file_inode(dst_file);
struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
struct cifsFileInfo *smb_file_src = src_file->private_data; struct cifsFileInfo *smb_file_src = src_file->private_data;
struct cifsFileInfo *smb_file_target; struct cifsFileInfo *smb_file_target = dst_file->private_data;
struct cifs_tcon *target_tcon; struct cifs_tcon *target_tcon, *src_tcon;
unsigned long long destend, fstart, fend, new_size;
unsigned int xid; unsigned int xid;
int rc; int rc;
if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) if (remap_flags & REMAP_FILE_DEDUP)
return -EOPNOTSUPP;
if (remap_flags & ~REMAP_FILE_ADVISORY)
return -EINVAL; return -EINVAL;
cifs_dbg(FYI, "clone range\n"); cifs_dbg(FYI, "clone range\n");
xid = get_xid(); xid = get_xid();
if (!src_file->private_data || !dst_file->private_data) { if (!smb_file_src || !smb_file_target) {
rc = -EBADF; rc = -EBADF;
cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
goto out; goto out;
} }
smb_file_target = dst_file->private_data; src_tcon = tlink_tcon(smb_file_src->tlink);
target_tcon = tlink_tcon(smb_file_target->tlink); target_tcon = tlink_tcon(smb_file_target->tlink);
/* /*
...@@ -1234,20 +1305,63 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, ...@@ -1234,20 +1305,63 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
if (len == 0) if (len == 0)
len = src_inode->i_size - off; len = src_inode->i_size - off;
cifs_dbg(FYI, "about to flush pages\n"); cifs_dbg(FYI, "clone range\n");
/* should we flush first and last page first */
truncate_inode_pages_range(&target_inode->i_data, destoff,
PAGE_ALIGN(destoff + len)-1);
if (target_tcon->ses->server->ops->duplicate_extents) /* Flush the source buffer */
rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
off + len - 1);
if (rc)
goto unlock;
/* The server-side copy will fail if the source crosses the EOF marker.
* Advance the EOF marker after the flush above to the end of the range
* if it's short of that.
*/
if (src_cifsi->netfs.remote_i_size < off + len) {
rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
if (rc < 0)
goto unlock;
}
new_size = destoff + len;
destend = destoff + len - 1;
/* Flush the folios at either end of the destination range to prevent
* accidental loss of dirty data outside of the range.
*/
fstart = destoff;
fend = destend;
rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
if (rc)
goto unlock;
rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
if (rc)
goto unlock;
/* Discard all the folios that overlap the destination region. */
cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
i_size_read(target_inode), 0);
rc = -EOPNOTSUPP;
if (target_tcon->ses->server->ops->duplicate_extents) {
rc = target_tcon->ses->server->ops->duplicate_extents(xid, rc = target_tcon->ses->server->ops->duplicate_extents(xid,
smb_file_src, smb_file_target, off, len, destoff); smb_file_src, smb_file_target, off, len, destoff);
else if (rc == 0 && new_size > i_size_read(target_inode)) {
rc = -EOPNOTSUPP; truncate_setsize(target_inode, new_size);
netfs_resize_file(&target_cifsi->netfs, new_size);
fscache_resize_cookie(cifs_inode_cookie(target_inode),
new_size);
}
}
/* force revalidate of size and timestamps of target file now /* force revalidate of size and timestamps of target file now
that target is updated on the server */ that target is updated on the server */
CIFS_I(target_inode)->time = 0; CIFS_I(target_inode)->time = 0;
unlock:
/* although unlocking in the reverse order from locking is not /* although unlocking in the reverse order from locking is not
strictly necessary here it is a little cleaner to be consistent */ strictly necessary here it is a little cleaner to be consistent */
unlock_two_nondirectories(src_inode, target_inode); unlock_two_nondirectories(src_inode, target_inode);
...@@ -1263,10 +1377,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid, ...@@ -1263,10 +1377,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
{ {
struct inode *src_inode = file_inode(src_file); struct inode *src_inode = file_inode(src_file);
struct inode *target_inode = file_inode(dst_file); struct inode *target_inode = file_inode(dst_file);
struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
struct cifsFileInfo *smb_file_src; struct cifsFileInfo *smb_file_src;
struct cifsFileInfo *smb_file_target; struct cifsFileInfo *smb_file_target;
struct cifs_tcon *src_tcon; struct cifs_tcon *src_tcon;
struct cifs_tcon *target_tcon; struct cifs_tcon *target_tcon;
unsigned long long destend, fstart, fend;
ssize_t rc; ssize_t rc;
cifs_dbg(FYI, "copychunk range\n"); cifs_dbg(FYI, "copychunk range\n");
...@@ -1306,13 +1422,41 @@ ssize_t cifs_file_copychunk_range(unsigned int xid, ...@@ -1306,13 +1422,41 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
if (rc) if (rc)
goto unlock; goto unlock;
/* should we flush first and last page first */ /* The server-side copy will fail if the source crosses the EOF marker.
truncate_inode_pages(&target_inode->i_data, 0); * Advance the EOF marker after the flush above to the end of the range
* if it's short of that.
*/
if (src_cifsi->server_eof < off + len) {
rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
if (rc < 0)
goto unlock;
}
destend = destoff + len - 1;
/* Flush the folios at either end of the destination range to prevent
* accidental loss of dirty data outside of the range.
*/
fstart = destoff;
fend = destend;
rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
if (rc)
goto unlock;
rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
if (rc)
goto unlock;
/* Discard all the folios that overlap the destination region. */
truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
rc = file_modified(dst_file); rc = file_modified(dst_file);
if (!rc) if (!rc) {
rc = target_tcon->ses->server->ops->copychunk_range(xid, rc = target_tcon->ses->server->ops->copychunk_range(xid,
smb_file_src, smb_file_target, off, len, destoff); smb_file_src, smb_file_target, off, len, destoff);
if (rc > 0 && destoff + rc > i_size_read(target_inode))
truncate_setsize(target_inode, destoff + rc);
}
file_accessed(src_file); file_accessed(src_file);
......
...@@ -402,13 +402,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server, ...@@ -402,13 +402,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
spin_unlock(&server->srv_lock); spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server); cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server); cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
/* increase ref count which reconnect work will drop */
spin_lock(&cifs_tcp_ses_lock);
server->srv_count++;
spin_unlock(&cifs_tcp_ses_lock);
if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
cifs_put_tcp_session(server, false);
} }
} while (server->tcpStatus == CifsNeedReconnect); } while (server->tcpStatus == CifsNeedReconnect);
...@@ -538,13 +532,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) ...@@ -538,13 +532,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
spin_unlock(&server->srv_lock); spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server); cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server); cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
/* increase ref count which reconnect work will drop */
spin_lock(&cifs_tcp_ses_lock);
server->srv_count++;
spin_unlock(&cifs_tcp_ses_lock);
if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
cifs_put_tcp_session(server, false);
} while (server->tcpStatus == CifsNeedReconnect); } while (server->tcpStatus == CifsNeedReconnect);
mutex_lock(&server->refpath_lock); mutex_lock(&server->refpath_lock);
...@@ -1620,25 +1608,22 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) ...@@ -1620,25 +1608,22 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
list_del_init(&server->tcp_ses_list); list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
/* For secondary channels, we pick up ref-count on the primary server */
if (SERVER_IS_CHAN(server))
cifs_put_tcp_session(server->primary_server, from_reconnect);
cancel_delayed_work_sync(&server->echo); cancel_delayed_work_sync(&server->echo);
if (from_reconnect) { if (from_reconnect)
/* /*
* Avoid deadlock here: reconnect work calls * Avoid deadlock here: reconnect work calls
* cifs_put_tcp_session() at its end. Need to be sure * cifs_put_tcp_session() at its end. Need to be sure
* that reconnect work does nothing with server pointer after * that reconnect work does nothing with server pointer after
* that step. * that step.
*/ */
if (cancel_delayed_work(&server->reconnect)) cancel_delayed_work(&server->reconnect);
cifs_put_tcp_session(server, from_reconnect); else
} else { cancel_delayed_work_sync(&server->reconnect);
if (cancel_delayed_work_sync(&server->reconnect))
cifs_put_tcp_session(server, from_reconnect); /* For secondary channels, we pick up ref-count on the primary server */
} if (SERVER_IS_CHAN(server))
cifs_put_tcp_session(server->primary_server, from_reconnect);
spin_lock(&server->srv_lock); spin_lock(&server->srv_lock);
server->tcpStatus = CifsExiting; server->tcpStatus = CifsExiting;
......
...@@ -2836,6 +2836,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, ...@@ -2836,6 +2836,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
usleep_range(512, 2048); usleep_range(512, 2048);
} while (++retry_count < 5); } while (++retry_count < 5);
if (!rc && !dfs_rsp)
rc = -EIO;
if (rc) { if (rc) {
if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP) if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc); cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
......
...@@ -158,7 +158,7 @@ smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd, ...@@ -158,7 +158,7 @@ smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
static int static int
smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
struct TCP_Server_Info *server) struct TCP_Server_Info *server, bool from_reconnect)
{ {
int rc = 0; int rc = 0;
struct nls_table *nls_codepage = NULL; struct nls_table *nls_codepage = NULL;
...@@ -331,7 +331,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, ...@@ -331,7 +331,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
* as cifs_put_tcp_session takes a higher lock * as cifs_put_tcp_session takes a higher lock
* i.e. cifs_tcp_ses_lock * i.e. cifs_tcp_ses_lock
*/ */
cifs_put_tcp_session(server, 1); cifs_put_tcp_session(server, from_reconnect);
server->terminate = true; server->terminate = true;
cifs_signal_cifsd_for_reconnect(server, false); cifs_signal_cifsd_for_reconnect(server, false);
...@@ -499,7 +499,7 @@ static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, ...@@ -499,7 +499,7 @@ static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
{ {
int rc; int rc;
rc = smb2_reconnect(smb2_command, tcon, server); rc = smb2_reconnect(smb2_command, tcon, server, false);
if (rc) if (rc)
return rc; return rc;
...@@ -3895,6 +3895,15 @@ void smb2_reconnect_server(struct work_struct *work) ...@@ -3895,6 +3895,15 @@ void smb2_reconnect_server(struct work_struct *work)
int rc; int rc;
bool resched = false; bool resched = false;
/* first check if ref count has reached 0, if not inc ref count */
spin_lock(&cifs_tcp_ses_lock);
if (!server->srv_count) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
server->srv_count++;
spin_unlock(&cifs_tcp_ses_lock);
/* If server is a channel, select the primary channel */ /* If server is a channel, select the primary channel */
pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
...@@ -3952,11 +3961,10 @@ void smb2_reconnect_server(struct work_struct *work) ...@@ -3952,11 +3961,10 @@ void smb2_reconnect_server(struct work_struct *work)
} }
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server); rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
if (!rc) if (!rc)
cifs_reopen_persistent_handles(tcon); cifs_reopen_persistent_handles(tcon);
else else
...@@ -3989,7 +3997,7 @@ void smb2_reconnect_server(struct work_struct *work) ...@@ -3989,7 +3997,7 @@ void smb2_reconnect_server(struct work_struct *work)
/* now reconnect sessions for necessary channels */ /* now reconnect sessions for necessary channels */
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
tcon->ses = ses; tcon->ses = ses;
rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server); rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
if (rc) if (rc)
resched = true; resched = true;
list_del_init(&ses->rlist); list_del_init(&ses->rlist);
...@@ -3999,13 +4007,8 @@ void smb2_reconnect_server(struct work_struct *work) ...@@ -3999,13 +4007,8 @@ void smb2_reconnect_server(struct work_struct *work)
done: done:
cifs_dbg(FYI, "Reconnecting tcons and channels finished\n"); cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
if (resched) { if (resched)
queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ); queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
mutex_unlock(&pserver->reconnect_mutex);
/* no need to put tcp session as we're retrying */
return;
}
mutex_unlock(&pserver->reconnect_mutex); mutex_unlock(&pserver->reconnect_mutex);
/* now we can safely release srv struct */ /* now we can safely release srv struct */
...@@ -4029,12 +4032,7 @@ SMB2_echo(struct TCP_Server_Info *server) ...@@ -4029,12 +4032,7 @@ SMB2_echo(struct TCP_Server_Info *server)
server->ops->need_neg(server)) { server->ops->need_neg(server)) {
spin_unlock(&server->srv_lock); spin_unlock(&server->srv_lock);
/* No need to send echo on newly established connections */ /* No need to send echo on newly established connections */
spin_lock(&cifs_tcp_ses_lock); mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
server->srv_count++;
spin_unlock(&cifs_tcp_ses_lock);
if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
cifs_put_tcp_session(server, false);
return rc; return rc;
} }
spin_unlock(&server->srv_lock); spin_unlock(&server->srv_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment