summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEnzo Matsumiya <ematsumiya@suse.de>2025-05-02 17:00:53 -0300
committerEnzo Matsumiya <ematsumiya@suse.de>2025-05-02 17:00:53 -0300
commitf5a8b7d07231adc88b2c19755df08f3aa6846622 (patch)
tree48320255d00b0c47d20535f261a5ce898abf2746
parent82a3fee58fd4e1632e5a2fa7206c47741da67dc9 (diff)
downloadlinux-cfid-fixes.tar.gz
linux-cfid-fixes.tar.bz2
linux-cfid-fixes.zip
smb: client: simplify cached dir invalidation/closecfid-fixes
We already have the laundromat thread (to cleanup expired cfids) and the cfid_put_wq workqueue to parallel-process cfids invalidation, so remove the invalidation worker. This also allows the removal of the cfids->dying list, which simplify thing even further. In general, there are less processes running, less locks taken, and, code-wise, less room for mistakes. Signed-off-by: Enzo Matsumiya <ematsumiya@suse.de>
-rw-r--r--fs/smb/client/cached_dir.c225
-rw-r--r--fs/smb/client/cached_dir.h10
2 files changed, 75 insertions, 160 deletions
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index 19b06b875a91..d95f14975775 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -187,7 +187,6 @@ find_again:
if (cfid->has_lease) {
if (cfid->time) {
*ret_cfid = cfid;
- kfree(utf16_path);
return 0;
}
@@ -204,31 +203,37 @@ find_again:
/*
* cfid for @path not found or gone, alloc/open a new one.
+ *
* Differentiate between "can't add to list" and "no memory to alloc" so callers handle it
* properly.
*/
+ cfid = alloc_cached_dir(cfids, path);
+ if (!cfid)
+ return -ENOMEM;
+replay_again:
spin_lock(&cfids->cfid_list_lock);
if (cfids->num_entries >= tcon->max_cached_dirs) {
spin_unlock(&cfids->cfid_list_lock);
- return -ENOENT;
+ rc = -ENOENT;
+ goto out;
}
spin_unlock(&cfids->cfid_list_lock);
- cfid = alloc_cached_dir(cfids, path);
- if (!cfid)
- return -ENOMEM;
-replay_again:
/* reinitialize for possible replay */
flags = 0;
oplock = SMB2_OPLOCK_LEVEL_II;
server = cifs_pick_channel(ses);
- if (!server->ops->new_lease_key)
- return -EIO;
+ if (!server->ops->new_lease_key) {
+ rc = -EIO;
+ goto out;
+ }
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
- if (!utf16_path)
- return -ENOMEM;
+ if (!utf16_path) {
+ rc = -ENOMEM;
+ goto out;
+ }
/*
* Skip any prefix paths in @path as lookup_positive_unlocked() ends up
@@ -368,7 +373,14 @@ oshr_free:
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
out:
- if (rc) {
+ if (!rc) {
+ /* At this point the directory handle is fully cached */
+ cfid->time = jiffies;
+ *ret_cfid = cfid;
+ atomic_inc(&tcon->num_remote_opens);
+ } else {
+ kfree(utf16_path);
+
spin_lock(&cfids->cfid_list_lock);
if (cfid->has_lease) {
/*
@@ -382,17 +394,11 @@ out:
spin_unlock(&cfids->cfid_list_lock);
kref_put(&cfid->refcount, smb2_close_cached_fid);
- } else {
- /* At this point the directory handle is fully cached */
- cfid->time = jiffies;
- *ret_cfid = cfid;
- atomic_inc(&tcon->num_remote_opens);
- }
- kfree(utf16_path);
- if (is_replayable_error(rc) &&
- smb2_should_replay(tcon, &retries, &cur_sleep))
- goto replay_again;
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+ }
return rc;
}
@@ -486,8 +492,8 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
struct cifs_tcon *tcon;
struct tcon_link *tlink;
struct cached_fids *cfids;
- struct cached_dir_dentry *tmp_list, *q;
- LIST_HEAD(entry);
+ struct cached_dir_dentry *cd, *q;
+ LIST_HEAD(cached_dentries);
spin_lock(&cifs_sb->tlink_tree_lock);
for (node = rb_first(root); node; node = rb_next(node)) {
@@ -495,29 +501,32 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
tcon = tlink_tcon(tlink);
if (IS_ERR(tcon))
continue;
+
cfids = tcon->cfids;
- if (cfids == NULL)
+ if (!cfids)
continue;
+
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry(cfid, &cfids->entries, entry) {
- tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
- if (tmp_list == NULL)
+ cd = kzalloc(sizeof(*cd), GFP_ATOMIC);
+ if (!cd)
break;
+
spin_lock(&cfid->fid_lock);
- tmp_list->dentry = cfid->dentry;
+ cd->dentry = cfid->dentry;
cfid->dentry = NULL;
spin_unlock(&cfid->fid_lock);
- list_add_tail(&tmp_list->entry, &entry);
+ list_add_tail(&cd->entry, &cached_dentries);
}
spin_unlock(&cfids->cfid_list_lock);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
- list_for_each_entry_safe(tmp_list, q, &entry, entry) {
- list_del(&tmp_list->entry);
- dput(tmp_list->dentry);
- kfree(tmp_list);
+ list_for_each_entry_safe(cd, q, &cached_dentries, entry) {
+ list_del(&cd->entry);
+ dput(cd->dentry);
+ kfree(cd);
}
/* Flush any pending work that will drop dentries */
@@ -532,49 +541,30 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
{
struct cached_fids *cfids = tcon->cfids;
struct cached_fid *cfid, *q;
+ LIST_HEAD(aux);
if (cfids == NULL)
return;
- /*
- * Mark all the cfids as closed, and move them to the cfids->dying list.
- * They'll be cleaned up later by cfids_invalidation_worker. Take
- * a reference to each cfid during this process.
- */
+ /* Prevent any further additions to the list */
+ cfids->num_entries = UINT_MAX;
+
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
- list_move(&cfid->entry, &cfids->dying);
- cfids->num_entries--;
- cfid->is_open = false;
+ list_move(&cfid->entry, &aux);
+
cfid->on_list = false;
- if (cfid->has_lease) {
- /*
- * The lease was never cancelled from the server,
- * so steal that reference.
- */
- cfid->has_lease = false;
- } else
- kref_get(&cfid->refcount);
+ cfid->has_lease = false;
+ cfid->is_open = false;
}
- /*
- * Queue dropping of the dentries once locks have been dropped
- */
- if (!list_empty(&cfids->dying))
- queue_work(cfid_put_wq, &cfids->invalidation_work);
spin_unlock(&cfids->cfid_list_lock);
-}
-static void
-cached_dir_offload_close(struct work_struct *work)
-{
- struct cached_fid *cfid = container_of(work,
- struct cached_fid, close_work);
- struct cifs_tcon *tcon = cfid->tcon;
-
- WARN_ON(cfid->on_list);
+ list_for_each_entry_safe(cfid, q, &aux, entry) {
+ list_del(&cfid->entry);
+ free_cached_dir(cfid);
+ }
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
+ cfids->num_entries = 0;
}
/*
@@ -586,17 +576,11 @@ cached_dir_offload_close(struct work_struct *work)
*/
static void cached_dir_put_work(struct work_struct *work)
{
- struct cached_fid *cfid = container_of(work, struct cached_fid,
- put_work);
- struct dentry *dentry;
-
- spin_lock(&cfid->fid_lock);
- dentry = cfid->dentry;
- cfid->dentry = NULL;
- spin_unlock(&cfid->fid_lock);
+ struct cached_fid *cfid = container_of(work, struct cached_fid, put_work);
+ struct cifs_tcon *tcon = cfid->tcon;
- dput(dentry);
- queue_work(serverclose_wq, &cfid->close_work);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
}
int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
@@ -648,7 +632,6 @@ static struct cached_fid *init_cached_dir(const char *path)
return NULL;
}
- INIT_WORK(&cfid->close_work, cached_dir_offload_close);
INIT_WORK(&cfid->put_work, cached_dir_put_work);
INIT_LIST_HEAD(&cfid->entry);
INIT_LIST_HEAD(&cfid->dirents.entries);
@@ -662,7 +645,6 @@ static void free_cached_dir(struct cached_fid *cfid)
{
struct cached_dirent *dirent, *q;
- WARN_ON(work_pending(&cfid->close_work));
WARN_ON(work_pending(&cfid->put_work));
dput(cfid->dentry);
@@ -682,78 +664,31 @@ static void free_cached_dir(struct cached_fid *cfid)
kfree(cfid);
}
-static void cfids_invalidation_worker(struct work_struct *work)
-{
- struct cached_fids *cfids = container_of(work, struct cached_fids,
- invalidation_work);
- struct cached_fid *cfid, *q;
- LIST_HEAD(entry);
-
- spin_lock(&cfids->cfid_list_lock);
- /* move cfids->dying to the local list */
- list_cut_before(&entry, &cfids->dying, &cfids->dying);
- spin_unlock(&cfids->cfid_list_lock);
-
- list_for_each_entry_safe(cfid, q, &entry, entry) {
- list_del(&cfid->entry);
- /* Drop the ref-count acquired in invalidate_all_cached_dirs */
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- }
-}
-
static void cfids_laundromat_worker(struct work_struct *work)
{
struct cached_fids *cfids;
struct cached_fid *cfid, *q;
- struct dentry *dentry;
- LIST_HEAD(entry);
cfids = container_of(work, struct cached_fids, laundromat_work.work);
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
- if (cfid->time &&
- time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
- cfid->on_list = false;
- list_move(&cfid->entry, &entry);
- cfids->num_entries--;
- if (cfid->has_lease) {
- /*
- * Our lease has not yet been cancelled from the
- * server. Steal that reference.
- */
- cfid->has_lease = false;
- } else
- kref_get(&cfid->refcount);
- }
- }
- spin_unlock(&cfids->cfid_list_lock);
-
- list_for_each_entry_safe(cfid, q, &entry, entry) {
- list_del(&cfid->entry);
+ /* if cfid doesn't have a lease anymore, let whoever set it to false to handle it */
+ if (!cfid->has_lease)
+ continue;
- spin_lock(&cfid->fid_lock);
- dentry = cfid->dentry;
- cfid->dentry = NULL;
- spin_unlock(&cfid->fid_lock);
+ if (cfid->time && time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
+ cfid->has_lease = false;
+ spin_unlock(&cfids->cfid_list_lock);
- dput(dentry);
- if (cfid->is_open) {
- spin_lock(&cifs_tcp_ses_lock);
- ++cfid->tcon->tc_count;
- trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
- netfs_trace_tcon_ref_get_cached_laundromat);
- spin_unlock(&cifs_tcp_ses_lock);
- queue_work(serverclose_wq, &cfid->close_work);
- } else
- /*
- * Drop the ref-count from above, either the lease-ref (if there
- * was one) or the extra one acquired.
- */
kref_put(&cfid->refcount, smb2_close_cached_fid);
+
+ spin_lock(&cfids->cfid_list_lock);
+ }
}
- queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
- dir_cache_timeout * HZ);
+ spin_unlock(&cfids->cfid_list_lock);
+
+ queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, dir_cache_timeout * HZ);
}
struct cached_fids *init_cached_dirs(void)
@@ -763,11 +698,10 @@ struct cached_fids *init_cached_dirs(void)
cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
if (!cfids)
return NULL;
+
spin_lock_init(&cfids->cfid_list_lock);
INIT_LIST_HEAD(&cfids->entries);
- INIT_LIST_HEAD(&cfids->dying);
- INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
dir_cache_timeout * HZ);
@@ -787,23 +721,12 @@ void free_cached_dirs(struct cached_fids *cfids)
if (cfids == NULL)
return;
+ /* Prevent any further additions to the list (for safety, and we can run this unlocked) */
+ cfids->num_entries = UINT_MAX;
+
cancel_delayed_work_sync(&cfids->laundromat_work);
- cancel_work_sync(&cfids->invalidation_work);
- spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
- cfid->on_list = false;
- cfid->is_open = false;
- list_move(&cfid->entry, &entry);
- }
- list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
- cfid->on_list = false;
- cfid->is_open = false;
- list_move(&cfid->entry, &entry);
- }
- spin_unlock(&cfids->cfid_list_lock);
-
- list_for_each_entry_safe(cfid, q, &entry, entry) {
list_del(&cfid->entry);
free_cached_dir(cfid);
}
diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
index 1dfe79d947a6..b047a13ed718 100644
--- a/fs/smb/client/cached_dir.h
+++ b/fs/smb/client/cached_dir.h
@@ -7,8 +7,6 @@
#ifndef _CACHED_DIR_H
#define _CACHED_DIR_H
-
-
struct cached_dirent {
struct list_head entry;
char *name;
@@ -45,22 +43,16 @@ struct cached_fid {
struct cifs_tcon *tcon;
struct dentry *dentry;
struct work_struct put_work;
- struct work_struct close_work;
struct smb2_file_all_info file_all_info;
struct cached_dirents dirents;
};
/* default MAX_CACHED_FIDS is 16 */
struct cached_fids {
- /* Must be held when:
- * - accessing the cfids->entries list
- * - accessing the cfids->dying list
- */
+ /* Must be held when accessing the cfids->entries list */
spinlock_t cfid_list_lock;
int num_entries;
struct list_head entries;
- struct list_head dying;
- struct work_struct invalidation_work;
struct delayed_work laundromat_work;
};