summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEnzo Matsumiya <ematsumiya@suse.de>2025-04-01 09:28:47 -0300
committerEnzo Matsumiya <ematsumiya@suse.de>2025-04-01 09:28:47 -0300
commit19688fc9e24f4c9be2a6841b37065dead642180f (patch)
tree589d958bdb698858bdef8d945b48e787467b831e
parentde4c67c30cdc6cd8814e790486f044a102ed0f92 (diff)
downloadlinux-19688fc9e24f4c9be2a6841b37065dead642180f.tar.gz
linux-19688fc9e24f4c9be2a6841b37065dead642180f.tar.bz2
linux-19688fc9e24f4c9be2a6841b37065dead642180f.zip
smb: client: cdir 2025-04-01 WIP
Signed-off-by: Enzo Matsumiya <ematsumiya@suse.de>
-rw-r--r--fs/smb/client/cached_dir.c1145
-rw-r--r--fs/smb/client/cached_dir.h76
-rw-r--r--fs/smb/client/cifs_debug.c8
-rw-r--r--fs/smb/client/cifsfs.c32
-rw-r--r--fs/smb/client/connect.c3
-rw-r--r--fs/smb/client/dir.c2
-rw-r--r--fs/smb/client/file.c2
-rw-r--r--fs/smb/client/inode.c12
-rw-r--r--fs/smb/client/misc.c5
-rw-r--r--fs/smb/client/readdir.c73
-rw-r--r--fs/smb/client/smb2inode.c55
-rw-r--r--fs/smb/client/smb2ops.c37
-rw-r--r--fs/smb/client/smb2pdu.c2
13 files changed, 946 insertions, 506 deletions
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index ed8bd00dee97..ddc8e7bd1fd8 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -13,24 +13,145 @@
#include "smb2pdu.h"
#include "cifs_debug.h"
-#define CDIR_DEBUG
+#define CDIR_FIND_PATH 0x1
+#define CDIR_FIND_DENTRY 0x2
+#define CDIR_FIND_LEASEKEY 0x4
+#define CDIR_FIND_PTR 0x8
-#ifdef CDIR_DEBUG
-#define cdir_dbg(fmt, ...) pr_err(fmt, __VA_ARGS__)
-#else
-#define cdir_dbg(fmt, ...)
-#endif
-//static void cdir_release(struct kref *ref);
-//static inline void cdir_invalidate(struct cached_dir_internal *cdir);
-static void cdir_remote_close(struct cached_dirs *cdirs, unsigned long long pfid, unsigned long long vfid);
+static struct cached_lease_break *check_lease_break(struct cached_dirs *cdirs, u8 *lease_key)
+{
+ struct cached_lease_break *lb;
+
+ if (!cdirs || !lease_key)
+ return NULL;
+
+ write_seqlock(&cdirs->seq);
+ list_for_each_entry(lb, &cdirs->lease_breaks, head) {
+ if (!memcmp(lb->lease_key, lease_key, 16)) {
+ atomic_inc(&lb->recvs);
+ write_sequnlock(&cdirs->seq);
+
+ return lb;
+ }
+ }
+ write_sequnlock(&cdirs->seq);
+
+ return NULL;
+}
+
+static void recv_lease_break(struct cached_dirs *cdirs, u8 *lease_key)
+{
+ struct cached_lease_break *lb;
+
+ if (!cdirs || !lease_key)
+ return;
+
+ if (check_lease_break(cdirs, lease_key))
+ return;
+
+ lb = kzalloc(sizeof(*lb), GFP_ATOMIC);
+ if (!lb)
+ return;
+
+ lb->lease_key = kmemdup(lease_key, 16, GFP_ATOMIC);
+ if (!lb->lease_key) {
+ kfree(lb);
+ return;
+ }
+
+ INIT_LIST_HEAD(&lb->head);
+ atomic_set(&lb->recvs, 1);
+ atomic_set(&lb->acks, 0);
+ lb->time = jiffies;
+
+ write_seqlock(&cdirs->seq);
+ list_add_tail(&lb->head, &cdirs->lease_breaks);
+ write_sequnlock(&cdirs->seq);
+
+ pr_err("received leasebreak: %*ph\n", 16, lease_key);
+}
+
+static void ack_lease_break(struct cached_lease_break *lb)
+{
+ if (!lb)
+ return;
+
+ atomic_inc(&lb->acks);
+}
+
+static bool drop_lease_break(struct cached_lease_break *lb, bool teardown)
+{
+ if (!lb)
+ return true;
+
+ if (teardown || (atomic_read(&lb->acks) > atomic_read(&lb->recvs)) ||
+ time_is_before_jiffies(lb->time + dir_cache_timeout * HZ)) {
+ list_del(&lb->head);
+ kfree(lb->lease_key);
+ kfree(lb);
+ return true;
+ }
+
+ return false;
+}
+
+static void cleanup_lease_breaks(struct cached_dirs *cdirs, bool teardown)
+{
+ struct cached_lease_break *lb, *q;
+
+ write_seqlock(&cdirs->seq);
+ list_for_each_entry_safe(lb, q, &cdirs->lease_breaks, head)
+ drop_lease_break(lb, teardown);
+ write_sequnlock(&cdirs->seq);
+}
+
+static bool cdir_match_key(struct cached_dir *cdir, const void *key, int mode);
+
+static void __dump_cdir(struct cached_dir *cdir, const char *caller)
+{
+//#if 0
+// pr_err("%s: cdir=%p, path='%s'\n", caller, cdir, cdir->path);
+//#else
+// pr_err("%s: cdir=%p:\n", caller, cdir);
+// pr_err("\t\tpath=%s\n", cdir->path);
+// pr_err("\t\tdentry=%pd\n", cdir->dentry);
+// if (cdir->lease_key)
+// pr_err("\t\tleasekey=%*ph\n", 16, cdir->lease_key);
+// else
+// pr_err("\t\tinvalid!\n");
+// pr_err("\t\texpired=%s\n", str_yes_no(cdir_is_expired(cdir)));
+// pr_err("\t\tuses=%u\n", kref_read(&cdir->ref));
+// pr_err("\t\tpfid=0x%llx\n", cdir->pfid);
+// pr_err("\t\tvfid=0x%llx\n", cdir->vfid);
+//#endif
+// pr_err("\t\t--------\n");
+}
+
+static void dump_cdir(struct cached_dir *cdir, const char *caller)
+{
+ struct cached_dir copy = {};
+ int seq = 0;
+
+ if (!cdir)
+ return;
+
+ do {
+ read_seqbegin_or_lock(&cdir->seq, &seq);
+
+ copy = *cdir;
+ __dump_cdir(cdir, caller);
+ } while (need_seqretry(&cdir->seq, seq));
+
+ done_seqretry(&cdir->seq, seq);
+}
/*
* Utils
*/
-/* XXX: ok to do this? cdir::path is for reference/search purposes only afterall */
-static inline const char *root_path(const char *path)
+/* Used for cdir::path. Ok to do this because it's for reference/search purposes only. */
+static inline const char *cdir_root_path(const char *path)
{
if (!*path)
return "\\";
@@ -38,7 +159,7 @@ static inline const char *root_path(const char *path)
return path;
}
-static struct dentry *path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
+static struct dentry *cdir_path_dentry(struct cifs_sb_info *cifs_sb, const char *path)
{
struct dentry *dentry = dget(cifs_sb->root);
const char *p, *s;
@@ -91,302 +212,470 @@ static struct dentry *path_to_dentry(struct cifs_sb_info *cifs_sb, const char *p
return dentry;
}
-static void cdir_entry_free_all(struct cached_dir_entries *cde)
+/*
+ * This function frees the whole @cdir, but not @cdir itself -- that's up to callers.
+ *
+ * Remote close and dput() must have been handled by callers, e.g. save cdir::{p,v}fid and/or
+ * cdir::dentry BEFORE calling this function and act accordingly.
+ */
+static void cdir_free(struct cached_dir *cdir)
{
struct cached_dir_entry *de, *q;
- //mutex_lock(&cde->lock);
- list_for_each_entry_safe(de, q, &cde->list, head) {
+ kfree_const(cdir->path);
+ kfree(cdir->lease_key);
+ kfree(cdir->info);
+
+ WRITE_ONCE(cdir->path, NULL);
+ WRITE_ONCE(cdir->lease_key, NULL);
+ WRITE_ONCE(cdir->info, NULL);
+ WRITE_ONCE(cdir->dentry, NULL);
+
+ list_for_each_entry_safe(de, q, &cdir->entries.list, head) {
list_del(&de->head);
kfree(de->name);
+ WRITE_ONCE(de->name, NULL);
kfree(de);
}
- //mutex_unlock(&cde->lock);
- cde->ctx = ERR_PTR(-ENOENT);
+ WRITE_ONCE(cdir->entries.ctx, NULL);
+ WRITE_ONCE(cdir->entries.valid, false);
}
-/*
- * Internal ops.
- */
+static inline bool cdir_expired(struct cached_dir *cdir, void *arg)
+{
+ unsigned long time, t = READ_ONCE(cdir->time);
+
+ if (!t)
+ return true;
+
+ time = (unsigned long)arg;
+ if (!time)
+ return time_is_before_jiffies(t + dir_cache_timeout * HZ);
+
+ return time_after(t, time);
+}
+
+static inline bool cdir_is_expired(struct cached_dir *cdir, void *arg)
+{
+ return cdir_expired(cdir, arg);
+}
+
+static inline bool cdir_has_lease(struct cached_dir *cdir, void __maybe_unused *arg)
+{
+ return (!!READ_ONCE(cdir->lease_key));
+}
+
+static inline bool cdir_is_open(struct cached_dir *cdir, void __maybe_unused *arg)
+{
+ return (READ_ONCE(cdir->pfid) != COMPOUND_FID && READ_ONCE(cdir->vfid) != COMPOUND_FID);
+}
+
+static inline bool cdir_is_valid(struct cached_dir *cdir, void *arg)
+{
+ return (cdir_is_open(cdir, NULL) && cdir_has_lease(cdir, NULL) && !cdir_is_expired(cdir, arg));
+}
+
+static inline bool cdir_in_use(struct cached_dir *cdir, void __maybe_unused *arg)
+{
+ return (kref_read(&cdir->ref) > 1);
+}
+
+static bool cdir_got_lease_break(struct cached_dir *cdir, void *arg)
+{
+ struct cached_lease_break *lb;
+ struct cached_dirs *cdirs = (struct cached_dirs *)arg;
+
+ if (!cdir || !cdirs)
+ return true;
+
+ if (!cdir_has_lease(cdir, NULL))
+ return true;
+
+ lb = check_lease_break(cdirs, cdir->lease_key);
+ if (!lb)
+ return false;
+
+ ack_lease_break(lb);
+ return true;
+}
+
+static inline bool cdir_check(struct cached_dir *cdir, void *arg,
+ bool (*fn)(struct cached_dir *cdir, void *arg))
+{
+ bool ret;
+ int seq = 0;
+
+ if (!cdir || IS_ERR(cdir))
+ return false;
+
+ do {
+ read_seqbegin_or_lock(&cdir->seq, &seq);
+
+ ret = fn(cdir, arg);
+ } while (need_seqretry(&cdir->seq, seq));
+ done_seqretry(&cdir->seq, seq);
+
+ return ret;
+}
+
+#define cdir_check_expired(cdir, arg) cdir_check(cdir, arg, cdir_is_expired)
+#define cdir_check_lease(cdir) cdir_check(cdir, NULL, cdir_has_lease)
+#define cdir_check_valid(cdir) cdir_check(cdir, NULL, cdir_is_valid)
+#define cdir_check_use(cdir) cdir_check(cdir, NULL, cdir_in_use)
+#define cdir_check_lease_break(cdir, arg) cdir_check(cdir, arg, cdir_got_lease_break)
/*
- * This function frees the whole cdir.
+ * Mark a cdir as invalid (i.e. no lease anymore).
*
- * cdir_remote_close() and dput() must have been handled by callers, e.g. save cdir::fid and/or
- * cdir::dentry BEFORE calling this function and act accordingly.
+ * Since this can be called from several different contexts, don't free/close/put anything here.
+ *
+ * Return: true if can be cleaned up now, false otherwise.
*/
-static void cdir_free(struct cached_dir *cdir)
+static inline bool cdir_invalidate_unlocked(struct cached_dir *cdir)
{
- if (!cdir)
- return;
+ bool ret = true;
+
+ if (!cdir_has_lease(cdir, NULL)) {
+ if (!cdir_is_expired(cdir, NULL))
+ return false;
+ return true;
+ }
+
+ /*
+ * This gives a grace period for explicitly invalidation calls for cdirs that are still in
+ * use to survive at least until the next cleanup run.
+ */
+ if (cdir_in_use(cdir, NULL)) {
+ pr_err("%s: invalidating cdir=%p in use (%u), key=%*ph\n", __func__, cdir,
+ kref_read(&cdir->ref), 16, cdir->lease_key);
+ WRITE_ONCE(cdir->time, jiffies + dir_cache_timeout * HZ);
+ ret = false;
+ } else {
+ pr_err("%s: invalidating unused cdir=%p, key=%*ph\n", __func__, cdir, 16, cdir->lease_key);
+ WRITE_ONCE(cdir->time, 0);
+ }
- kfree_const(cdir->path);
kfree(cdir->lease_key);
- kfree(cdir->info);
+ WRITE_ONCE(cdir->lease_key, NULL);
- cdir_entry_free_all(&cdir->dir_entries);
+ /* catch unexpected kref_get/kref_put calls, which shouldn't happen for an invalid cdir */
+ //refcount_set(&cdir->ref.refcount, 0);
+
+ return ret;
}
-static bool cdir_expired_unlocked(struct cached_dir *cdir, unsigned long t)
+static inline bool cdir_invalidate(struct cached_dir *cdir)
{
- if (!t)
- return time_is_before_jiffies(cdir->time + dir_cache_timeout * HZ);
+ bool ret;
- return time_before(cdir->time, t);
+ write_seqlock(&cdir->seq);
+ ret = cdir_invalidate_unlocked(cdir);
+ write_sequnlock(&cdir->seq);
+
+ return ret;
}
-static bool cdir_is_valid_unlocked(struct cached_dir *cdir)
+static inline void cdir_invalidate_put(struct kref *kref)
{
- return !cdir_expired_unlocked(cdir, 0);
+ struct cached_dir *cdir = container_of(kref, struct cached_dir, ref);
+
+ cdir_invalidate_unlocked(cdir);
+ WRITE_ONCE(cdir->time, 0);
}
-static bool cdir_is_valid(struct cached_dir *cdir, unsigned long t)
+static inline bool cdir_put(struct cached_dir *cdir)
{
- unsigned int seq = read_seqbegin(&cdir->seq);
- bool valid;
+ bool ret = true;
- do {
- valid = cdir_expired_unlocked(cdir, t);
- } while (read_seqretry(&cdir->seq, seq));
+ if (!cdir)
+ return true;
- return valid;
+ write_seqlock(&cdir->seq);
+ ret = kref_put(&cdir->ref, cdir_invalidate_put);
+ write_sequnlock(&cdir->seq);
+
+ return ret;
}
-static bool cdir_match_unlocked(struct cached_dir *cdir, const void *key, int mode)
+static bool cdir_match_key(struct cached_dir *cdir, const void *key, int mode)
{
+ if (!cdir || !key)
+ return false;
+
switch (mode) {
case CDIR_FIND_PATH:
- return (cdir->path && !strcmp(cdir->path, key));
+ {
+ const char *path = READ_ONCE(cdir->path);
+
+ return (path && !strcmp(path, key));
+ }
case CDIR_FIND_DENTRY:
- return (cdir->dentry && cdir->dentry == key);
+ {
+ struct dentry *dentry = READ_ONCE(cdir->dentry);
+
+ return (dentry && dentry == key);
+ }
case CDIR_FIND_LEASEKEY:
- return (cdir->lease_key && !memcmp(cdir->lease_key, key, 16));
- case CDIR_FIND_CDE:
- return (cdir->dir_entries.ctx == key);
+ {
+ u8 *lease_key = READ_ONCE(cdir->lease_key);
+
+ return (lease_key && !memcmp(lease_key, key, 16));
+ }
+ case CDIR_FIND_PTR:
+ return (cdir == key);
}
return false;
}
-static bool cdir_match(struct cached_dir *cdir, const void *key, int mode)
+static int cdir_match_check(struct cached_dir *cdir, const void *key, int mode)
{
- unsigned int seq = read_seqbegin(&cdir->seq);
- bool valid = false;
+ int ret = -ENOENT, seq = 0;
- do {
- valid = (cdir_match_unlocked(cdir, key, mode) &&
- cdir_is_valid_unlocked(cdir));
- } while (read_seqretry(&cdir->seq, seq));
+ read_seqbegin_or_lock(&cdir->seq, &seq);
+
+ if (cdir_match_key(cdir, key, mode))
+ ret = 0;
+
+ if (need_seqretry(&cdir->seq, seq))
+ ret = -ECHILD;
- return valid;
+ done_seqretry(&cdir->seq, seq);
+
+ return ret;
}
/*
- * Mark a cdir as expired (i.e. invalid).
+ * Search for a matching cdir by @key by @mode in @cdirs::list.
+ *
+ * Try a fast lookup first, if can't find an entry, retry a slow search.
+ *
+ * Fast: seqcount protected iteration and matching.
+ * Slow: read exclusive seqlock protected iteration and matching.
*
- * Since this can be called from several different contexts, we don't free/close/put anything else
- * here.
+ * Return: On success, valid matching cdir is returned, ERR_PTR(-errno) otherwise.
*/
-static inline void cdir_invalidate_unlocked(struct cached_dir *cdir)
+static struct cached_dir *cdir_find_entry(struct cached_dirs *cdirs, const void *key, int mode)
{
- if (!cdir)
- return;
+ struct cached_dir *cdir;
+ long ret;
+ int seq = 0;
- cdir->time = 0;
-}
+ if (!cdirs || !key || !mode)
+ return ERR_PTR(-EOPNOTSUPP);
-static struct cached_dir *cdir_alloc(void)
-{
- struct cached_dir *cdir = kzalloc(sizeof(*cdir), GFP_KERNEL);
+ do {
+ read_seqbegin_or_lock(&cdirs->seq, &seq);
- if (!cdir)
- return NULL;
+ ret = -ENOENT;
+ list_for_each_entry(cdir, &cdirs->list, head) {
+ ret = cdir_match_check(cdir, key, mode);
+ if (ret != -ENOENT)
+ break;
+ }
+ } while (need_seqretry(&cdirs->seq, seq) && ret != -ECHILD);
- /* This is our internal ref, cdir stays on the list until it expires/leasebreak/drop. */
- INIT_LIST_HEAD(&cdir->head);
- seqlock_init(&cdir->seq);
+#if 0
+ if (ret == -ECHILD)
+ goto out;
- INIT_LIST_HEAD(&cdir->dir_entries.list);
- mutex_init(&cdir->dir_entries.lock);
- //cdir->dir_entries.valid = false;
+ ret = 0;
+#endif
+ done_seqretry(&cdirs->seq, seq);
- return cdir;
-}
+ if (!ret) {
+ if (cdir_check_valid(cdir))
+ return cdir;
-/*
- * Management ops for tcon::cdirs.
- */
+ ret = -EINVAL;
+ }
-/*
- * Add an entry with the provided data to cdirs::entries list.
- */
-static void cdirs_add_entry(struct cached_dirs *cdirs, struct cached_dir *cdir)
-{
- write_seqlock(&cdirs->seq);
- list_add(&cdir->head, &cdirs->entries);
- atomic_inc(&cdirs->count);
- write_sequnlock(&cdirs->seq);
+ return ERR_PTR(ret);
}
-static struct cached_dir *cdirs_init_entry(struct cached_dirs *cdirs, const char *path,
- struct dentry *dentry, struct cifs_fid *fid,
- struct smb2_file_all_info *info)
+static struct cached_dir *cdir_init_entry(const char *path, struct dentry *dentry,
+ struct cifs_fid *fid, struct smb2_file_all_info *info)
{
- struct cached_dir *cdir = cdir_alloc();
+ struct cached_dir *cdir;
+
+ /* We accept a NULL @dentry. */
+ if (IS_ERR(dentry))
+ return ERR_CAST(dentry);
+ cdir = kzalloc(sizeof(*cdir), GFP_KERNEL);
if (!cdir)
return ERR_PTR(-ENOMEM);
- if (IS_ERR(dentry))
- return ERR_CAST(dentry);
+ INIT_LIST_HEAD(&cdir->head);
+ seqlock_init(&cdir->seq);
+ kref_init(&cdir->ref);
+
+ mutex_init(&cdir->entries.lock);
+ INIT_LIST_HEAD(&cdir->entries.list);
- cdir->dentry = dentry;
cdir->path = kstrdup_const(path, GFP_KERNEL);
if (!cdir->path) {
kfree(cdir);
return ERR_PTR(-ENOMEM);
}
- cdir->pfid = fid->persistent_fid;
- cdir->vfid = fid->volatile_fid;
cdir->lease_key = kmemdup(fid->lease_key, 16, GFP_KERNEL);
if (!cdir->lease_key) {
kfree(cdir->path);
kfree(cdir);
return ERR_PTR(-ENOMEM);
}
- cdir->info = info;
+ if (info) {
+ cdir->info = kmemdup(info, sizeof(*info), GFP_KERNEL);
+ if (!cdir->info) {
+ kfree(cdir->lease_key);
+ kfree(cdir->path);
+ kfree(cdir);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ cdir->dentry = dentry;
+ cdir->pfid = fid->persistent_fid;
+ cdir->vfid = fid->volatile_fid;
cdir->time = jiffies;
return cdir;
}
-struct need_close {
- struct cached_dir *cdir;
- struct dentry *dentry;
- unsigned long long pfid;
- unsigned long long vfid;
-};
-
-static void cdir_del_entry(struct cached_dir *cdir, struct need_close *close)
+static __maybe_unused void cdir_del_entry(struct cached_dir *cdir, struct cached_dir *copy)
{
- cdir_invalidate_unlocked(cdir);
-
- close->cdir = cdir;
- close->dentry = NULL;
- close->pfid = COMPOUND_FID;
- close->vfid = COMPOUND_FID;
-
list_del(&cdir->head);
- swap(cdir->dentry, close->dentry);
+ if (copy) {
+ copy->dentry = READ_ONCE(cdir->dentry);
+ copy->pfid = READ_ONCE(cdir->pfid);
+ copy->vfid = READ_ONCE(cdir->vfid);
+ }
- /* mark cdir as "closed" */
- swap(cdir->pfid, close->pfid);
- swap(cdir->vfid, close->vfid);
+ cdir_free(cdir);
}
-/*
- * Search for a matching, and valid, cdir by @key (specified by @mode) in @list.
- *
- * Caller must hold any locks necessary, and do refcount accordingly.
- *
- * Return:
- * - valid cdir if found
- * - NULL in any other case
- */
-static struct cached_dir *cdirs_find_fast(struct cached_dirs *cdirs, const void *key, int mode)
+static void cdir_find_drop(struct cached_dirs *cdirs, const void *key, int mode)
{
- struct cached_dir *cdir;
+ struct cached_dir *cdir; //, copy = {};
- if (!key)
- return NULL;
+ // allow us to drop this cdir without cleanup intervetion
+ //mod_delayed_work(cfid_put_wq, &cdirs->cleanup_work, 10 * HZ);
- list_for_each_entry(cdir, &cdirs->entries, head)
- if (cdir_match_unlocked(cdir, key, mode) && cdir_is_valid_unlocked(cdir))
- return cdir;
+ cdir = cdir_find_entry(cdirs, key, mode);
+ if (IS_ERR(cdir))
+ return;
- return NULL;
-}
+ if (cdir_check_lease_break(cdir, cdirs))
+ return;
-/*
- * Try a fast lookup first, if can't find an entry, fallback to slow search.
- *
- * Fast: seqcount protected only, check valid.
- * Slow: exclusive seqlock protected, retry on -ECHILD.
- */
-static struct cached_dir *cdirs_find_entry(struct cached_dirs *cdirs, const void *key, int mode)
-{
- struct cached_dir *cdir;
- unsigned int seq;
- bool slow = false;
- int retries = 3;
+ /*
+ * Put our ref.
+ * If we can't properly drop it yet, clean as much as we can and offload to cleanup.
+ */
+ if (!cdir_put(cdir)) {
+ struct dentry *d;
- seq = read_seqbegin(&cdirs->seq);
-find_again:
- cdir = cdirs_find_fast(cdirs, key, mode);
+ write_seqlock(&cdir->seq);
+ d = cdir->dentry;
+ cdir->dentry = NULL;
+ write_sequnlock(&cdir->seq);
- if (!slow && !read_seqretry(&cdirs->seq, seq))
- return cdir;
+ if (d)
+ dput(d);
- /* if the list was modified, but didn't affect our cdir, ok to return it */
- if (cdir && cdir_match(cdir, key, mode)) {
- if (slow)
- read_sequnlock_excl(&cdirs->seq);
- return cdir;
+ if (!cdir_invalidate(cdir)) {
+ mod_delayed_work(cfid_put_wq, &cdirs->cleanup_work, dir_cache_timeout * HZ);
+ return;
+ }
}
- /* slow path */
- if (!slow) {
- slow = true;
- read_seqlock_excl(&cdirs->seq);
- if (retries-- > 0)
- goto find_again;
- }
+#if 0
+ //pr_err("%s: dropping now cdir=%p\n", __func__, cdir);
+ write_seqlock(&cdir->seq);
+ // leave it on the list??
+ //cdir_del_entry(cdir, &copy);
+ copy.dentry = READ_ONCE(cdir->dentry);
+ cdir_free(cdir);
+ write_sequnlock(&cdir->seq);
- read_sequnlock_excl(&cdirs->seq);
+ // leave it on the list?
+ //kfree(cdir);
- return NULL;
+ if (copy.dentry)
+ dput(copy.dentry);
+#endif
+
+ mod_delayed_work(cfid_put_wq, &cdirs->cleanup_work, 0);
}
-static void cdir_cleanup_sync(struct cached_dirs *cdirs, bool all)
+static bool cdir_cleanup_sync(struct cached_dirs *cdirs, bool teardown)
{
- struct cached_dir *cdir, *q;
- struct need_close close[16] = {};
+ struct cached_dir *cdir, *q, *copy;
int i = 0;
-#if 0
- need_close = kcalloc(16, sizeof(*need_close), GFP_KERNEL);
- if (WARN_ON(!need_close))
- return;
-#endif
+
+ copy = kcalloc(16, sizeof(*cdir), GFP_KERNEL);
+ if (WARN_ON(!copy))
+ return list_empty(&cdirs->list);
+
+ cleanup_lease_breaks(cdirs, teardown);
+
write_seqlock(&cdirs->seq);
- if (all)
- atomic_set(&cdirs->count, INT_MAX);
+ list_for_each_entry_safe(cdir, q, &cdirs->list, head) {
+ if (!teardown && cdir_check_valid(cdir)) {
+ dump_cdir(cdir, "skipping valid cleanup");
+ continue;
+ }
- list_for_each_entry_safe(cdir, q, &cdirs->entries, head) {
- if (!cdir_is_valid_unlocked(cdir) || all) {
- write_seqlock(&cdir->seq);
- cdir_del_entry(cdir, &close[i++]);
+ write_seqlock(&cdir->seq);
+ if (cdir_invalidate_unlocked(cdir) || teardown) {
+ if (teardown)
+ __dump_cdir(cdir, "tearing down");
+ else
+ __dump_cdir(cdir, "cleaning up");
+
+ //cdir_del_entry(cdir, &copy[i++]);
+ list_del(&cdir->head);
+ copy[i].dentry = READ_ONCE(cdir->dentry);
+ copy[i].pfid = READ_ONCE(cdir->pfid);
+ copy[i].vfid = READ_ONCE(cdir->vfid);
+ if (cdir_is_open(&copy[i], NULL))
+ pr_err("%s: remote closing cdir=%p path='%s', dentry='%pd'\n",
+ cdirs->tcon->tree_name, cdir, copy[i].path, copy[i].dentry);
+ i++;
cdir_free(cdir);
+ atomic_dec(&cdirs->count);
write_sequnlock(&cdir->seq);
+
kfree(cdir);
- atomic_dec(&cdirs->count);
+ continue;
}
+ write_sequnlock(&cdir->seq);
+
+ dump_cdir(cdir, "skipping invalid cleanup");
}
write_sequnlock(&cdirs->seq);
- i = 0;
- while (i < 16) {
- if (close[i].dentry)
- dput(close[i].dentry);
+ while (--i >= 0) {
+ if (copy[i].dentry) {
+ //pr_err("%s: cleaning up[%d]: put dentry='%pd'\n", __func__, i, copy[i].dentry);
+ dput(copy[i].dentry);
+ }
- cdir_remote_close(cdirs, close[i].pfid, close[i].vfid);
- i++;
+ if (cdir_is_open(&copy[i], NULL)) {
+ //pr_err("%s: cleaning up[%d]: remote close=0x%llx\n", __func__, i, copy[i].pfid);
+ SMB2_close(get_xid(), cdirs->tcon, copy[i].pfid, copy[i].vfid);
+ atomic_dec(&cdirs->tcon->num_remote_opens);
+ }
}
+ kfree(copy);
+
+ return list_empty(&cdirs->list);
}
static void cdir_cleanup(struct work_struct *work)
@@ -397,22 +686,11 @@ static void cdir_cleanup(struct work_struct *work)
queue_delayed_work(cfid_put_wq, &cdirs->cleanup_work, dir_cache_timeout * HZ);
}
-/*
- * Remote ops.
- */
-static void cdir_remote_close(struct cached_dirs *cdirs, unsigned long long pfid, unsigned long long vfid)
-{
- if (pfid != COMPOUND_FID) {
- SMB2_close(get_xid(), cdirs->tcon, pfid, vfid);
- atomic_dec(&cdirs->tcon->num_remote_opens);
- }
-}
-
-static int cdir_remote_open(struct cifs_tcon *tcon, const char *path, struct cifs_sb_info *cifs_sb,
- struct cifs_fid *fid, struct smb2_file_all_info *info)
+static int cdir_remote_open(unsigned int xid, struct cifs_tcon *tcon, const char *path,
+ struct cifs_sb_info *cifs_sb, struct cifs_fid *fid,
+ struct smb2_file_all_info *info, bool *info_ok)
{
struct smb2_query_info_rsp *qi_rsp = NULL;
- struct smb2_file_all_info _info = {};
struct TCP_Server_Info *server;
struct cifs_open_parms oparms;
struct smb2_create_rsp *o_rsp = NULL;
@@ -421,8 +699,6 @@ static int cdir_remote_open(struct cifs_tcon *tcon, const char *path, struct cif
struct kvec rsp_iov[2];
struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
struct kvec qi_iov[1];
- unsigned int xid = get_xid();
- bool got_info = false;
int resp_buftype[2];
__le16 *utf16_path = NULL;
int rc, flags = 0;
@@ -431,6 +707,11 @@ static int cdir_remote_open(struct cifs_tcon *tcon, const char *path, struct cif
replay_again:
memset(fid, 0, sizeof(*fid));
+ fid->persistent_fid = COMPOUND_FID;
+ fid->volatile_fid = COMPOUND_FID;
+
+ memset(info, 0, sizeof(*info));
+ *info_ok = false;
flags = 0;
oplock = SMB2_OPLOCK_LEVEL_II;
@@ -526,8 +807,8 @@ replay_again:
if (!smb2_validate_and_copy_iov(le16_to_cpu(qi_rsp->OutputBufferOffset),
sizeof(struct smb2_file_all_info),
&rsp_iov[1], sizeof(struct smb2_file_all_info),
- (char *)&_info))
- got_info = true;
+ (char *)info))
+ *info_ok = true;
rc = 0;
oshr_free:
@@ -540,112 +821,169 @@ oshr_free:
if (rc && is_replayable_error(rc) && smb2_should_replay(tcon, &retries, &sleep))
goto replay_again;
- if (!rc) {
+ if (!rc)
atomic_inc(&tcon->num_remote_opens);
- if (got_info)
- memcpy(info, &_info, sizeof(_info));
- } else if (rc == -EINVAL) {
- rc = SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
- }
return rc;
}
-/* Do a remote open + query info and add a cached_dir entry on success. */
static struct cached_dir *cdir_open(struct cached_dirs *cdirs, const char *path,
- struct cifs_sb_info *cifs_sb)
+ struct cifs_sb_info *cifs_sb, unsigned int xid)
{
struct cached_dir *cdir = NULL;
- struct smb2_file_all_info *info, dummy = {};
+ struct smb2_file_all_info info = {};
struct cifs_fid fid = {};
- unsigned int seq;
+ bool info_ok;
int ret;
- seq = read_seqbegin(&cdirs->seq);
- do {
- ret = atomic_read(&cdirs->count) >= 16; /* MAX_CACHED_FIDS */
- } while (read_seqretry(&cdirs->seq, seq));
+ ret = cdir_remote_open(xid, cdirs->tcon, path, cifs_sb, &fid, &info, &info_ok);
+ cdir = ERR_PTR(ret);
+ if (!ret) {
+ struct dentry *dentry = NULL;
- if (ret)
- return ERR_PTR(-EMFILE);
+ /*
+ * If we're mounting (i.e. cifs_sb->root == NULL), we cache it with a NULL dentry
+ * for now and set it later in cifs_smb3_do_mount().
+ */
+ if (cifs_sb->root)
+ dentry = cdir_path_dentry(cifs_sb, path);
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
+ cdir = cdir_init_entry(cdir_root_path(path), dentry, &fid, info_ok ? &info : NULL);
+ if (!IS_ERR(cdir))
+ return cdir;
- ret = cdir_remote_open(cdirs->tcon, path, cifs_sb, &fid, info);
- if (!ret) {
- if (memcmp(info, &dummy, sizeof(dummy))) {
- kfree(info);
- info = NULL;
- }
+ if (dentry)
+ dput(dentry);
+ }
- cdir = cdirs_init_entry(cdirs, root_path(path), path_to_dentry(cifs_sb, path),
- &fid, info);
- if (!IS_ERR(cdir))
- cdirs_add_entry(cdirs, cdir);
- else {
- kfree(info);
- }
- } else {
- kfree(info);
+ if (fid.persistent_fid != COMPOUND_FID && fid.volatile_fid != COMPOUND_FID) {
+ /* XXX: Ignore return value for now. */
+ pr_err("%s: remote closing uncached dir path='%s', leasekey=%*ph\n",
+ __func__, path, 16, fid.lease_key);
+ (void) SMB2_close(xid, cdirs->tcon, fid.persistent_fid, fid.volatile_fid);
}
return cdir;
}
-/*
- * Public ops (used by callers).
- */
-
struct cached_dir *cached_dir_get_path(struct cached_dirs *cdirs, const char *path)
{
- return cdirs_find_entry(cdirs, root_path(path), CDIR_FIND_PATH);
-}
+ struct cached_dir *cdir = cdir_find_entry(cdirs, cdir_root_path(path), CDIR_FIND_PATH);
-struct cached_dir *cached_dir_get_dentry(struct cached_dirs *cdirs, const struct dentry *dentry)
-{
- return cdirs_find_entry(cdirs, dentry, CDIR_FIND_DENTRY);
-}
+ if (IS_ERR(cdir))
+ return NULL;
-struct cached_dir *cached_dir_get_leasekey(struct cached_dirs *cdirs, const u8 *lease_key)
-{
- return cdirs_find_entry(cdirs, lease_key, CDIR_FIND_LEASEKEY);
+ if (cdir_check_lease_break(cdir, cdirs)) {
+ cdir_invalidate(cdir);
+ return NULL;
+ }
+
+ if (kref_get_unless_zero(&cdir->ref)) {
+ if (cdir_check_valid(cdir))
+ return cdir;
+
+ cdir_put(cdir);
+ }
+
+ return NULL;
}
-struct cached_dir *cached_dir_get_open(struct cached_dirs *cdirs, const char *path,
- struct cifs_sb_info *cifs_sb)
+struct cached_dir *cached_dir_open(struct cached_dirs *cdirs, const char *path,
+ struct cifs_sb_info *cifs_sb)
{
- struct cached_dir *cdir;
+ struct cached_dir *cdir, *check = NULL;
+ unsigned int xid;
- if (!cifs_sb->root || !cdirs)
+ if (!cdirs) {
+ pr_err("%s: no cdirs\n", __func__);
return NULL;
+ }
- cdir = cached_dir_get_path(cdirs, path);
+ /* Try to find a cached entry by @path. */
+ cdir = cached_dir_get_path(cdirs, cdir_root_path(path));
if (cdir)
return cdir;
- cdir = cdir_open(cdirs, path, cifs_sb);
- if (IS_ERR(cdir))
+ /* Check if we can actually cache a new entry. */
+ if (atomic_read(&cdirs->count) >= 16) {
+ pr_err("%s: too many cdirs %u\n", __func__, atomic_read(&cdirs->count));
+ return NULL;
+ }
+
+ /* No cached entry yet, do remote open + query info for @path. */
+ xid = get_xid();
+ cdir = cdir_open(cdirs, path, cifs_sb, xid);
+
+ pr_err("%s: remote open path=%s, cdir=%p\n", cdirs->tcon->tree_name, cdir_root_path(path), cdir);
+
+ /*
+ * If the remote open failed, there's nothing we can do, just return the error.
+ * (cdir_open() doesn't return NULL)
+ */
+ if (IS_ERR(cdir)) {
+ cifs_dbg(VFS, "%s: remote open failed, err=%ld\n", __func__, PTR_ERR(cdir));
+ return NULL;
+ }
+
+ /*
+ * Cache a new entry only if we're sure there are no duplicates.
+ *
+ * We want to know if a concurrent thread successfully cached the same path as us, so
+ * check if there was a lease break for us.
+ */
+ if (cdir_check_lease_break(cdir, cdirs)) {
+ dput(cdir->dentry);
+ cdir_free(cdir);
+ kfree(cdir);
+ cdir = NULL;
+ }
+
+ /*
+ * Regardless if there was a leasebreak for our @path, we still check (preventively) if
+ * that same path wasn't added before us.
+ */
+ check = cached_dir_get_path(cdirs, cdir_root_path(path));
+ if (check && !IS_ERR(check)) {
+ pr_err("%s: race in open path=%s\n", __func__, cdir_root_path(path));
+
+ // cdir != NULL means no leasebreak for this path, but also means some other bug?
+ if (WARN_ON(cdir)) {
+ dput(cdir->dentry);
+ cdir_free(cdir);
+ kfree(cdir);
+ }
+
+ return check;
+ }
+
+ // got a leasebreak but nobody cached it yet? should retry...
+ if (!cdir)
return NULL;
+ /* Else (check == NULL && cdir != NULL), we're fine to cache our cdir now */
+ pr_err("%s: caching new cdir=%p, path='%s', dentry=%pd, key=%*ph\n", cdirs->tcon->tree_name, cdir, cdir->path, cdir->dentry, 16, cdir->lease_key);
+ write_seqlock(&cdirs->seq);
+ kref_get(&cdir->ref);
+ list_add_tail(&cdir->head, &cdirs->list);
+ atomic_inc(&cdirs->count);
+ write_sequnlock(&cdirs->seq);
+
return cdir;
}
-void cached_dir_put(struct cached_dirs *cdirs, struct cached_dir *cdir)
+void cached_dir_close(struct cached_dirs *cdirs, struct cached_dir *cdir)
{
- unsigned int seq;
-
if (!cdir)
return;
- seq = read_seqbegin(&cdir->seq);
- if (cdir_is_valid_unlocked(cdir))
- if (!read_seqretry(&cdir->seq, seq))
- return;
+ //dump_cdir(cdir, "closing");
- /* similarly, this must also not happen */
- if (WARN_ON(!cdir->path || !cdir->dentry || !cdir->lease_key)) {
+ if (!cdir_check_valid(cdir)) {
+ pr_err("%s: closing cdir no longer valid, should reopen? path='%s', dentry='%pd', leasekey=%*ph\n",
+ __func__, cdir->path, cdir->dentry, 16, cdir->lease_key);
+ }
+
+ if (cdir_put(cdir)) {
mod_delayed_work(cfid_put_wq, &cdirs->cleanup_work, 0);
return;
}
@@ -653,52 +991,80 @@ void cached_dir_put(struct cached_dirs *cdirs, struct cached_dir *cdir)
queue_delayed_work(cfid_put_wq, &cdirs->cleanup_work, dir_cache_timeout * HZ);
}
-static void __cdir_drop(struct cached_dirs *cdirs, struct cached_dir *cdir)
+void cached_dir_drop(struct cached_dirs *cdirs, const char *path)
{
- write_seqlock(&cdir->seq);
- cdir_invalidate_unlocked(cdir);
- write_sequnlock(&cdir->seq);
-
- mod_delayed_work(cfid_put_wq, &cdirs->cleanup_work, 0);
+ pr_err("%s: cached dir drop (path=%s)\n", __func__, cdir_root_path(path));
+ cdir_find_drop(cdirs, cdir_root_path(path), CDIR_FIND_PATH);
}
-static void cdir_drop_by(struct cached_dirs *cdirs, const void *key, int mode)
+/* Find a cdir that matches @lease_key and drop it immediately. */
+void cached_dir_lease_break(struct cached_dirs *cdirs, u8 *lease_key)
{
- struct cached_dir *cdir = cdirs_find_entry(cdirs, key, mode);
+ pr_err("%s: cached dir lease break (key=%*ph)\n", __func__, 16, lease_key);
- /* Already gone? */
- if (!cdir)
- return;
-
- __cdir_drop(cdirs, cdir);
+ recv_lease_break(cdirs, lease_key);
+ cdir_find_drop(cdirs, lease_key, CDIR_FIND_LEASEKEY);
}
-void cached_dir_drop(struct cached_dirs *cdirs, struct cached_dir *cdir)
+/* Return: true if cdir::time + `dir_cache_timeout' seconds is < jiffies, false otherwise. */
+bool cached_dir_is_expired(struct cached_dir *cdir)
{
- if (!cdir)
- return;
-
- __cdir_drop(cdirs, cdir);
+ return !cdir_check_valid(cdir);
}
-/* return true if cdir::time + `dir_cache_timeout' seconds is < jiffies */
-bool cached_dir_expired(struct cached_dir *cdir)
+bool cached_dir_check_expired_path(struct cached_dirs *cdirs, const char *path)
{
- return cdir_is_valid(cdir, 0);
-}
+ struct cached_dir *cdir;
-/* return true if cdir::time is older than @time */
-bool cached_dir_is_older(struct cached_dir *cdir, unsigned long time)
-{
- return cdir_is_valid(cdir, time);
+ /* cached_dir_get_path() already checks for valid + non-expired, nothing else to do here */
+ cdir = cached_dir_get_path(cdirs, path);
+ if (cdir) {
+ if (cdir_put(cdir))
+ return true;
+ return false;
+ }
+
+ return true;
}
/*
- * Invalidate or drop a cdir that got a lease break.
+ * Return: true if (cifs inode) @time is older (<) than @cdir::time, false otherwise.
*/
-void cached_dir_lease_break(struct cached_dirs *cdirs, u8 lease_key[16])
+bool cached_dir_check_expired_dentry(struct cached_dirs *cdirs, struct dentry *dentry, unsigned long time)
{
- cdir_drop_by(cdirs, lease_key, CDIR_FIND_LEASEKEY);
+ struct cached_dir *cdir;
+
+ if (!cdirs || !dentry)
+ return true;
+
+ cdir = cdir_find_entry(cdirs, dentry, CDIR_FIND_DENTRY);
+ pr_err("%s: cdir=%p (err=%ld) for dentry='%pd', expired=%s\n", cdirs->tcon->tree_name, cdir, !cdir ? 0 : (IS_ERR(cdir) ? PTR_ERR(cdir) : 0), dentry,
+ (cdir && !IS_ERR(cdir)) ? str_yes_no(cdir_check_expired(cdir, (void *)time)) : "yes");
+ if (!cdir || IS_ERR(cdir))
+ return true;
+
+ if (cdir) {
+ if (kref_get_unless_zero(&cdir->ref)) {
+ bool older = cdir_check_expired(cdir, (void *)time);
+
+ //pr_err("%s: cdir=%p, dentry=%pd, older=%s (cdir=%u, time=%u)\n", __func__,
+ // cdir, dentry, str_yes_no(older), jiffies_to_msecs(jiffies - cdir->time), jiffies_to_msecs(jiffies - time));
+ if (cdir_put(cdir)) {
+ pr_err("%s: (last ref) cdir=%p, dentry=%pd, older=%s (cdir=%u, time=%u)\n", __func__,
+ cdir, dentry, str_yes_no(older), jiffies_to_msecs(jiffies - cdir->time), jiffies_to_msecs(jiffies - time));
+ return true;
+ }
+
+ if (older)
+ pr_err("%s: (ref) cdir=%p, dentry=%pd, older=true (cdir=%u, time=%u)\n", __func__,
+ cdir, dentry, jiffies_to_msecs(jiffies - cdir->time), jiffies_to_msecs(jiffies - time));
+ return older;
+ }
+ }
+
+ pr_err("%s: no cdir for dentry=%pd (%p, positive=%s), time=%lu\n", __func__, dentry, dentry, str_yes_no(d_really_is_positive(dentry)), time);
+
+ return true;
}
/*
@@ -712,27 +1078,20 @@ void cached_dir_init(struct cifs_tcon *tcon)
if (!tcon->cdirs)
return;
- /* grab a ref */
- tcon->tc_count++;
-
/* tcon backref (ugh..., but it's only way we can properly remote close on cleanup) */
tcon->cdirs->tcon = tcon;
+ tcon->tc_count++;
- INIT_LIST_HEAD(&tcon->cdirs->entries);
+ INIT_LIST_HEAD(&tcon->cdirs->list);
+ INIT_LIST_HEAD(&tcon->cdirs->lease_breaks);
INIT_DELAYED_WORK(&tcon->cdirs->cleanup_work, cdir_cleanup);
seqlock_init(&tcon->cdirs->seq);
+ atomic_set(&tcon->cdirs->count, 0);
queue_delayed_work(cfid_put_wq, &tcon->cdirs->cleanup_work, dir_cache_timeout * HZ);
}
-/*
- * Destroy the cached directories management interface for a tcon.
- *
- * This will:
- * - remote close all open cdirs
- * - dput their dentries
- * - free everything
- */
+/* Destroy the cached directories management interface for a tcon. */
void cached_dir_exit(struct cifs_tcon *tcon)
{
struct cached_dirs *cdirs;
@@ -742,21 +1101,29 @@ void cached_dir_exit(struct cifs_tcon *tcon)
return;
cdirs = tcon->cdirs;
+
+ /* Prevent further additions to the list. */
+ flush_delayed_work(&cdirs->cleanup_work);
cancel_delayed_work_sync(&cdirs->cleanup_work);
- cdir_cleanup_sync(cdirs, true);
- done = list_empty(&cdirs->entries);
- WARN(!list_empty(&cdirs->entries), "%s: entries list not empty yet!\n", __func__);
+ atomic_set(&cdirs->count, INT_MAX);
+
+ done = cdir_cleanup_sync(cdirs, true);
+
+ /* Something bad happened, give it a last chance to cleanup. */
+ if (unlikely(!done))
+ done = cdir_cleanup_sync(cdirs, true);
+
+ kfree(cdirs);
+ tcon->cdirs = NULL;
+
+ WARN(!done, "%s: cached directories still in use!\n", tcon->tree_name);
/* put tcon ref for this cdirs (grabbed in cached_dir_init()) */
cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
}
-/*
- * Destroy the cached directories management interface (called on umount).
- *
- * This will call cached_dir_exit() for all tcons.
- */
+/* Call cached_dir_exit() for all tcons. */
void cached_dir_destroy(struct cifs_sb_info *cifs_sb)
{
struct rb_root *root = &cifs_sb->tlink_tree;
@@ -765,6 +1132,8 @@ void cached_dir_destroy(struct cifs_sb_info *cifs_sb)
struct tcon_link *tlink;
LIST_HEAD(entry);
+ flush_workqueue(cfid_put_wq);
+
spin_lock(&cifs_sb->tlink_tree_lock);
for (node = rb_first(root); node; node = rb_next(node)) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
@@ -777,28 +1146,59 @@ void cached_dir_destroy(struct cifs_sb_info *cifs_sb)
spin_unlock(&cifs_sb->tlink_tree_lock);
- cached_dir_exit(tcon);
- //cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
+ //cached_dir_exit(tcon);
+
+ cached_dir_cleanup(tcon, true);
+ kfree(tcon->cdirs);
+ tcon->cdirs = NULL;
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
spin_lock(&cifs_sb->tlink_tree_lock);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
-
- /* Flush any pending work that will drop dentries */
- flush_workqueue(cfid_put_wq);
- drain_workqueue(cfid_put_wq);
}
-struct cached_dir_entries *cached_dir_entries_get(struct cached_dirs *cdirs, struct cached_dir *cdir, struct dir_context *ctx)
+void cached_dir_cleanup(struct cifs_tcon *tcon, bool free)
{
- struct cached_dir_entries *cde = NULL;
- unsigned int seq = read_seqbegin(&cdir->seq);
+ struct cached_lease_break *lb, *q;
+ struct cached_dirs *cdirs;
+ struct cached_dir *cdir;
- do {
- cde = &cdir->dir_entries;
- } while (read_seqretry(&cdir->seq, seq));
+ if (!tcon || !tcon->cdirs)
+ return;
+
+ cdirs = tcon->cdirs;
+
+ if (free) {
+ cancel_delayed_work_sync(&cdirs->cleanup_work);
+ write_seqlock(&cdirs->seq);
+ atomic_set(&cdirs->count, INT_MAX);
+ write_sequnlock(&cdirs->seq);
+ }
+
+ write_seqlock(&cdirs->seq);
+ list_for_each_entry(cdir, &cdirs->list, head) {
+ write_seqlock(&cdir->seq);
+ (void) cdir_invalidate_unlocked(cdir);
+ WRITE_ONCE(cdir->pfid, COMPOUND_FID);
+ WRITE_ONCE(cdir->vfid, COMPOUND_FID);
+ write_sequnlock(&cdir->seq);
+ }
+
+ list_for_each_entry_safe(lb, q, &cdirs->lease_breaks, head) {
+ list_del(&lb->head);
+ kfree(lb->lease_key);
+ kfree(lb);
+ }
+ write_sequnlock(&cdirs->seq);
- return cde;
+ (void) cdir_cleanup_sync(cdirs, true);
+
+ if (!free) {
+ write_seqlock(&cdirs->seq);
+ atomic_set(&cdirs->count, 0);
+ write_sequnlock(&cdirs->seq);
+ }
}
static bool cde_emit(struct cached_dir_entries *cde, struct dir_context *ctx)
@@ -807,24 +1207,22 @@ static bool cde_emit(struct cached_dir_entries *cde, struct dir_context *ctx)
bool rc;
list_for_each_entry(de, &cde->list, head) {
- /*
- * Skip all early entries prior to the current lseek()
- * position.
- */
+ /* Skip all early entries prior to the current lseek() position. */
if (ctx->pos > de->pos)
continue;
/*
- * We recorded the current ->pos value for the dirent
- * when we stored it in the cache.
- * However, this sequence of ->pos values may have holes
- * in it, for example dot-dirs returned from the server
- * are suppressed.
- * Handle this by forcing ctx->pos to be the same as the
- * ->pos of the current dirent we emit from the cache.
- * This means that when we emit these entries from the cache
- * we now emit them with the same ->pos value as in the
- * initial scan.
+ * We recorded the current ->pos value for the dirent when we stored it in the
+ * cache.
+ *
+ * However, this sequence of ->pos values may have holes in it, for example
+ * dot-dirs returned from the server are suppressed.
+ *
+ * Handle this by forcing ctx->pos to be the same as the ->pos of the current
+ * dirent we emit from the cache.
+ *
+ * This means that when we emit these entries from the cache we now emit them with
+ * the same ->pos value as in the initial scan.
*/
ctx->pos = de->pos;
@@ -838,49 +1236,68 @@ static bool cde_emit(struct cached_dir_entries *cde, struct dir_context *ctx)
return true;
}
-int cached_dir_entry_emit(struct cached_dir_entries *cde, struct dir_context *ctx, struct file *file)
+/*
+ * cached_dir_emit_entries - Emit the entries for a cached dir.
+ * @cdir: cached dir to store the entries
+ * @ctx: must match @cdir::entries::ctx
+ * @file: passed on to dir_emit_dots()
+ *
+ * Return: true to keep going (e.g. no cached dir or cached dir entries not filled in yet), false
+ * otherwise (cached dir entries were emitted).
+ */
+bool cached_dir_emit_entries(struct cached_dir *cdir, struct dir_context *ctx, struct file *file)
{
- if (!cde)
- return 0;
+ struct cached_dir_entries *cde;
+
+ if (!cdir)
+ return true;
+ cde = &cdir->entries;
mutex_lock(&cde->lock);
+ if (cde->ctx && IS_ERR(cde->ctx)) {
+ mutex_unlock(&cde->lock);
+ return false;
+ }
+
/*
- * If this was reading from the start of the directory we need to
- * initialize scanning and storing the directory content.
+ * If this was reading from the start of the directory we need to initialize scanning and
+ * storing the directory content.
*/
if (ctx->pos == 0 && !cde->ctx) {
cde->ctx = ctx;
cde->pos = 2;
}
- if (cde->valid) {
- if (!dir_emit_dots(file, ctx)) {
- mutex_unlock(&cde->lock);
- return -ENOENT;
- }
- /*
- * If we already have the entire directory cached then we can just serve the cache.
- */
- cde_emit(cde, ctx);
+ if (!cde->valid) {
mutex_unlock(&cde->lock);
+ return true;
+ }
- return 0;
+ if (!dir_emit_dots(file, ctx)) {
+ mutex_unlock(&cde->lock);
+ return false;
}
+
+ /* If we already have the entire directory cached then we can just serve the cache. */
+ cde_emit(cde, ctx);
mutex_unlock(&cde->lock);
- return -EAGAIN;
+ return false;
}
-void cached_dir_entry_add(struct cached_dir_entries *cde, struct dir_context *ctx,
- const char *name, int len, struct cifs_fattr *fattr)
+void cached_dir_add_entry(struct cached_dir *cdir, struct dir_context *ctx, const char *name,
+ int len, struct cifs_fattr *fattr)
{
+ struct cached_dir_entries *cde;
struct cached_dir_entry *de;
- if (!cde)
+ if (!cdir)
return;
+ cde = &cdir->entries;
+
mutex_lock(&cde->lock);
if (cde->ctx != ctx) {
mutex_unlock(&cde->lock);
diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
index a0fba26d3da2..8753df164171 100644
--- a/fs/smb/client/cached_dir.h
+++ b/fs/smb/client/cached_dir.h
@@ -6,12 +6,12 @@
#include "cifsglob.h"
struct cached_dir_entry {
- struct list_head head;
- loff_t pos;
+ struct list_head head; /* added to cached_dir_entries::list */
+ loff_t pos; /* used to update ctx::pos */
+ /* Used on final dir_emit() call. */
char *name;
int len;
-
unsigned long long unique_id; /* cifs_fattr::cf_uniqueid */
unsigned int type; /* cifs_fattr::cf_dtype */
};
@@ -20,7 +20,7 @@ struct cached_dir_entries {
struct list_head list;
struct mutex lock;
- /* Never dereferenced, only used to make sure we only take entries from a single context. */
+ /* Never dereferenced, only used to make sure we only take entries from the same context. */
const struct dir_context *ctx;
bool valid;
@@ -29,33 +29,36 @@ struct cached_dir_entries {
};
struct cached_dir {
- /*
- * Private/internal use only!
- */
+ /* Private/internal use only! */
struct list_head head;
seqlock_t seq;
+ struct kref ref;
const char *path;
struct dentry *dentry;
+ unsigned long time; /* expiration time (based on dir_cache_timeout) */
+ u8 *lease_key; /* cifs_fid::lease_key */
+ /* Cached entries for this cached directory. */
+ struct cached_dir_entries entries;
- /*
- * Public/used by callers.
- */
- unsigned long time;
+ /* Public/used by callers. */
unsigned long long pfid; /* cifs_fid::persistent_fid */
unsigned long long vfid; /* cifs_fid::volatile_fid */
- u8 *lease_key; /* cifs_fid::lease_key */
-
struct smb2_file_all_info *info;
+};
- /* Cached entries for this cached directory. */
- struct cached_dir_entries dir_entries;
+struct cached_lease_break {
+ struct list_head head;
+ u8 *lease_key;
+ atomic_t recvs;
+ atomic_t acks;
+ unsigned long time;
};
-/* Management */
struct cached_dirs {
- struct list_head entries;
+ struct list_head list;
+ struct list_head lease_breaks;
seqlock_t seq;
/* tcon backreference */
@@ -65,31 +68,28 @@ struct cached_dirs {
struct delayed_work cleanup_work;
};
-enum {
- CDIR_FIND_PATH,
- CDIR_FIND_DENTRY,
- CDIR_FIND_LEASEKEY,
- CDIR_FIND_CDE,
-};
-
struct cached_dir *cached_dir_get_path(struct cached_dirs *cdirs, const char *path);
-struct cached_dir *cached_dir_get_dentry(struct cached_dirs *cdirs, const struct dentry *dentry);
-struct cached_dir *cached_dir_get_leasekey(struct cached_dirs *cdirs, const u8 *lease_key);
-struct cached_dir *cached_dir_get_open(struct cached_dirs *cdirs, const char *path,
- struct cifs_sb_info *cifs_sb);
-void cached_dir_put(struct cached_dirs *cdirs, struct cached_dir *cdir);
-void cached_dir_drop(struct cached_dirs *cdirs, struct cached_dir *cdir);
-bool cached_dir_expired(struct cached_dir *cdir);
-bool cached_dir_is_older(struct cached_dir *cdir, unsigned long time);
-void cached_dir_lease_break(struct cached_dirs *cdirs, u8 lease_key[16]);
+struct cached_dir *cached_dir_open(struct cached_dirs *cdirs, const char *path,
+ struct cifs_sb_info *cifs_sb);
+#if 0
+int cached_dir_add(struct cached_dirs *cdirs, const char *path, struct dentry *dentry,
+ struct cifs_sb_info *cifs_sb, struct cifs_fid *fid,
+ struct smb2_file_all_info *info);
+#endif
+void cached_dir_close(struct cached_dirs *cdirs, struct cached_dir *cdir);
+void cached_dir_drop(struct cached_dirs *cdirs, const char *path);
+void cached_dir_lease_break(struct cached_dirs *cdirs, u8 *lease_key);
+
+bool cached_dir_is_expired(struct cached_dir *cdir);
+bool cached_dir_check_expired_path(struct cached_dirs *cdirs, const char *path);
+bool cached_dir_check_expired_dentry(struct cached_dirs *cdirs, struct dentry *dentry, unsigned long time);
void cached_dir_init(struct cifs_tcon *tcon);
void cached_dir_exit(struct cifs_tcon *tcon);
void cached_dir_destroy(struct cifs_sb_info *cifs_sb);
+void cached_dir_cleanup(struct cifs_tcon *tcon, bool free);
-// subdirs
-struct cached_dir_entries *cached_dir_entries_get(struct cached_dirs *cdirs, struct cached_dir *cdir, struct dir_context *ctx);
-int cached_dir_entry_emit(struct cached_dir_entries *cde, struct dir_context *ctx, struct file *file);
-void cached_dir_entry_add(struct cached_dir_entries *cde, struct dir_context *ctx,
- const char *name, int len, struct cifs_fattr *fattr);
+bool cached_dir_emit_entries(struct cached_dir *cdir, struct dir_context *ctx, struct file *file);
+void cached_dir_add_entry(struct cached_dir *cdir, struct dir_context *ctx, const char *name,
+ int len, struct cifs_fattr *fattr);
#endif /* _CACHED_DIR_H */
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index 4ed1d12900c8..707e0abd117b 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -138,19 +138,19 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
seq_printf(m, "\n\tCached directories (%d):", atomic_read(&cdirs->count));
- list_for_each_entry_safe(cdir, q, &cdirs->entries, head) {
+ list_for_each_entry_safe(cdir, q, &cdirs->list, head) {
struct cached_dir_entry *de, *dq;
seq_printf(m, "\n\t\tPath: %s%s, ", tcon->tree_name, cdir->path);
- if (cached_dir_expired(cdir)) {
+ if (cached_dir_is_expired(cdir)) {
seq_printf(m, "(expired)");
} else {
seq_printf(m, "(expires in %us)", dir_cache_timeout - (jiffies_to_msecs(jiffies - cdir->time) / 1000));
if (cdir->lease_key)
seq_printf(m, "\n\t\tLease key: %*ph", 16, cdir->lease_key);
}
- seq_printf(m, "\t\tEntries:");
- list_for_each_entry_safe(de, dq, &cdir->dir_entries.list, head)
+ seq_printf(m, "\n\t\tEntries:");
+ list_for_each_entry_safe(de, dq, &cdir->entries.list, head)
seq_printf(m, "\n\t\t\tname: %s (%p)", de->name, de);
seq_printf(m, "\n\t\t----");
}
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index e4aa7694ea86..c265d389f6b4 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -883,7 +883,7 @@ static const struct super_operations cifs_super_ops = {
* Return dentry with refcount + 1 on success and NULL otherwise.
*/
static struct dentry *
-cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
+cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb, const char **path)
{
struct dentry *dentry;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -891,8 +891,12 @@ cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
char *s, *p;
char sep;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ *path = NULL;
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
+ *path = kstrdup_const("", GFP_KERNEL);
return dget(sb->s_root);
+ }
full_path = cifs_build_path_to_root(ctx, cifs_sb,
cifs_sb_master_tcon(cifs_sb), 0);
@@ -929,6 +933,8 @@ cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
dput(dentry);
dentry = child;
} while (!IS_ERR(dentry));
+
+ *path = kstrdup_const(full_path, GFP_KERNEL);
kfree(full_path);
return dentry;
}
@@ -948,6 +954,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
struct cifs_sb_info *cifs_sb;
struct super_block *sb;
struct dentry *root;
+ const char *path;
int rc;
if (cifsFYI) {
@@ -1014,13 +1021,30 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
sb->s_flags |= SB_ACTIVE;
}
- root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
+ root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb, &path);
if (IS_ERR(root))
goto out_super;
- if (cifs_sb)
+ if (cifs_sb) {
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cached_dir *cdir = cached_dir_get_path(tcon->cdirs, path);
+
cifs_sb->root = dget(root);
+ // cdir not NULL implies path not NULL
+ if (cdir && !IS_ERR(cdir)) {
+ if (WARN_ON(cdir->dentry))
+ dput(cdir->dentry);
+ write_seqlock(&cdir->seq);
+ cdir->dentry = dget(cifs_sb->root);
+ write_sequnlock(&cdir->seq);
+
+ cached_dir_close(tcon->cdirs, cdir);
+ }
+ }
+
+ kfree(path);
+
cifs_dbg(FYI, "dentry root is: %p\n", root);
return root;
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index f2e2d58b6fb7..71794f6357f7 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -2502,9 +2502,6 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
/* tc_count can never go negative */
WARN_ON(tcon->tc_count < 0);
- kfree(tcon->cdirs);
- tcon->cdirs = NULL;
-
list_del_init(&tcon->tcon_list);
tcon->status = TID_EXITING;
spin_unlock(&tcon->tc_lock);
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index d1e95632ac54..4cf328c2964c 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -54,7 +54,7 @@ cifs_build_path_to_root(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_s
else
dfsplen = 0;
- full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL);
+ full_path = kzalloc(dfsplen + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return full_path;
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index b6140e22b27b..5c74a8c6fcfd 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -381,7 +381,7 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
}
spin_unlock(&tcon->open_file_lock);
- cached_dir_exit(tcon);
+ cached_dir_cleanup(tcon, false);
spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_FILES_INVALIDATE)
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 9223b0d9051c..ec7a23652138 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -2588,7 +2588,6 @@ cifs_dentry_needs_reval(struct dentry *dentry)
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
- struct cached_dir *cdir;
if (cifs_i->time == 0)
return true;
@@ -2599,15 +2598,8 @@ cifs_dentry_needs_reval(struct dentry *dentry)
if (!lookupCacheEnabled)
return true;
- cdir = cached_dir_get_dentry(tcon->cdirs, dentry->d_parent);
- if (cdir) {
- if (!cached_dir_is_older(cdir, cifs_i->time)) {
- cached_dir_put(tcon->cdirs, cdir);
- return false;
- }
-
- cached_dir_put(tcon->cdirs, cdir);
- }
+ if (!cached_dir_check_expired_dentry(tcon->cdirs, dentry->d_parent, cifs_i->time))
+ return false;
/*
* depending on inode type, check if attribute caching disabled for
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 75478591a5c7..0cb243acb7c1 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -154,9 +154,12 @@ tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
return;
}
trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, trace);
- atomic_dec(&tconInfoAllocCount);
+
+ cached_dir_cleanup(tcon, true);
kfree(tcon->cdirs);
tcon->cdirs = NULL;
+
+ atomic_dec(&tconInfoAllocCount);
kfree(tcon->nativeFileSystem);
kfree_sensitive(tcon->password);
kfree(tcon->origin_fullpath);
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index 7b0f80699247..1311caa93fee 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -813,7 +813,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
static bool cifs_dir_emit(struct dir_context *ctx,
const char *name, int namelen,
struct cifs_fattr *fattr,
- struct cached_dir_entries *cde)
+ struct cached_dir *cdir)
{
bool rc;
ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
@@ -822,7 +822,7 @@ static bool cifs_dir_emit(struct dir_context *ctx,
if (!rc)
return rc;
- cached_dir_entry_add(cde, ctx, name, namelen, fattr);
+ cached_dir_add_entry(cdir, ctx, name, namelen, fattr);
return rc;
}
@@ -830,7 +830,7 @@ static bool cifs_dir_emit(struct dir_context *ctx,
static int cifs_filldir(char *find_entry, struct file *file,
struct dir_context *ctx,
char *scratch_buf, unsigned int max_len,
- struct cached_dir_entries *cde)
+ struct cached_dir *cdir)
{
struct cifsFileInfo *file_info = file->private_data;
struct super_block *sb = file_inode(file)->i_sb;
@@ -919,7 +919,7 @@ static int cifs_filldir(char *find_entry, struct file *file,
cifs_prime_dcache(file_dentry(file), &name, &fattr);
- return !cifs_dir_emit(ctx, name.name, name.len, &fattr, cde);
+ return !cifs_dir_emit(ctx, name.name, name.len, &fattr, cdir);
}
@@ -939,16 +939,13 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
const char *full_path;
void *page = alloc_dentry_path();
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
- struct cached_dir_entries *cde = NULL;
struct cached_dir *cdir = NULL;
- xid = get_xid();
-
full_path = build_path_from_dentry(file_dentry(file), page);
- if (IS_ERR(full_path)) {
- rc = PTR_ERR(full_path);
- goto rddir2_exit;
- }
+ if (IS_ERR(full_path))
+ return PTR_ERR(full_path);
+
+ xid = get_xid();
if (file->private_data == NULL) {
tlink = cifs_sb_tlink(cifs_sb);
@@ -960,27 +957,23 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
tcon = tlink_tcon(cifsFile->tlink);
}
- cdir = cached_dir_get_open(tcon->cdirs, full_path, cifs_sb);
cifs_put_tlink(tlink);
- if (!cdir)
- goto cache_not_found;
- cde = cached_dir_entries_get(tcon->cdirs, cdir, ctx);
- if (!cde)
+ cdir = cached_dir_open(tcon->cdirs, full_path, cifs_sb);
+ if (!cdir) {
+ pr_err("%s: (1) cdir not found for path='%s' (unexpected because remote open)\n", __func__, full_path);
goto cache_not_found;
+ }
- rc = cached_dir_entry_emit(cde, ctx, file);
- if (!rc)
+ if (!cached_dir_emit_entries(cdir, ctx, file))
goto rddir2_exit;
- /* Drop the cache while calling initiate_cifs_search and
- * find_cifs_entry in case there will be reconnects during
- * query_directory.
+ /*
+ * Drop the cache while calling initiate_cifs_search and find_cifs_entry in case there
+ * will be reconnects during query_directory.
*/
- cached_dir_put(tcon->cdirs, cdir);
+ cached_dir_close(tcon->cdirs, cdir);
cdir = NULL;
- cde = NULL;
-
cache_not_found:
/*
* Ensure FindFirst doesn't fail before doing filldir() for '.' and
@@ -1015,19 +1008,23 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
tcon = tlink_tcon(cifsFile->tlink);
rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
&current_entry, &num_to_fill);
- cdir = cached_dir_get_open(tcon->cdirs, full_path, cifs_sb);
- if (!cdir) {
+ if (rc) {
cifs_dbg(FYI, "fce error %d\n", rc);
goto rddir2_exit;
}
- cde = &cdir->dir_entries;
+ cdir = cached_dir_open(tcon->cdirs, full_path, cifs_sb);
+ if (!cdir) {
+ pr_err("%s: (2) cdir not found for path='%s' (unexpected because remote open)\n", __func__, full_path);
+ goto rddir2_exit;
+ }
if (!current_entry) {
- mutex_lock(&cde->lock);
- if ((!cde->ctx || cde->ctx == ctx) && !cde->valid && cde->pos == ctx->pos)
- cde->valid = true;
- mutex_unlock(&cde->lock);
+ mutex_lock(&cdir->entries.lock);
+ if ((!cdir->entries.ctx || cdir->entries.ctx == ctx) &&
+ !cdir->entries.valid && cdir->entries.pos == ctx->pos)
+ cdir->entries.valid = true;
+ mutex_unlock(&cdir->entries.lock);
cifs_dbg(FYI, "Could not find entry\n");
goto rddir2_exit;
@@ -1059,7 +1056,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
*/
*tmp_buf = 0;
rc = cifs_filldir(current_entry, file, ctx,
- tmp_buf, max_len, cde);
+ tmp_buf, max_len, cdir);
if (rc) {
if (rc > 0)
rc = 0;
@@ -1068,11 +1065,11 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
ctx->pos++;
// update count
- if (cde) {
- mutex_lock(&cde->lock);
- if (cde->ctx == ctx && !cde->valid)
- cde->pos++;
- mutex_unlock(&cde->lock);
+ if (cdir) {
+ mutex_lock(&cdir->entries.lock);
+ if (cdir->entries.ctx == ctx && !cdir->entries.valid)
+ cdir->entries.pos++;
+ mutex_unlock(&cdir->entries.lock);
}
if (ctx->pos ==
@@ -1090,7 +1087,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
rddir2_exit:
if (tcon)
- cached_dir_put(tcon->cdirs, cdir);
+ cached_dir_close(tcon->cdirs, cdir);
free_dentry_path(page);
free_xid(xid);
return rc;
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 1c84058c4dab..8ab20e56c97e 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -911,7 +911,7 @@ int smb2_query_path_info(const unsigned int xid,
struct kvec in_iov[3], out_iov[5] = {};
struct cifs_open_parms oparms;
struct cifsFileInfo *cfile;
- struct cached_dir *cdir;
+ struct cached_dir *cdir = NULL;
__u32 create_options = 0;
int out_buftype[5] = {};
struct smb2_hdr *hdr;
@@ -930,36 +930,35 @@ int smb2_query_path_info(const unsigned int xid,
* is fast enough (always using the compounded version).
*/
if (!tcon->posix_extensions) {
- if (*full_path) {
- rc = -ENOENT;
- } else {
- rc = 0;
- cdir = cached_dir_get_open(tcon->cdirs, full_path, cifs_sb);
- if (!cdir)
- rc = -ENOENT;
+ cmds[num_cmds++] = SMB2_OP_QUERY_INFO;
+ if (*full_path)
+ goto no_cache;
+
+ cdir = cached_dir_open(tcon->cdirs, full_path, cifs_sb);
+ if (!cdir) {
+ pr_err("%s: cache dir not found for path='\'\n", __func__);
+ goto no_cache;
}
/* If it is a root and its handle is cached then use it */
- if (!rc) {
- if (cdir->info) {
- data->fi = *cdir->info;
- } else {
- rc = SMB2_query_info(xid, tcon, cdir->pfid, cdir->vfid, &data->fi);
- if (!rc) {
- cdir->info = kmemdup(&data->fi, sizeof(data->fi), GFP_KERNEL);
- if (!cdir->info)
- rc = -ENOMEM;
- }
+ if (cdir->info) {
+ data->fi = *cdir->info;
+ rc = 0;
+ } else {
+ rc = SMB2_query_info(xid, tcon, cdir->pfid, cdir->vfid, &data->fi);
+ if (!rc) {
+ cdir->info = kmemdup(&data->fi, sizeof(data->fi), GFP_KERNEL);
+ if (!cdir->info)
+ rc = -ENOMEM;
}
-
- cached_dir_put(tcon->cdirs, cdir);
- return rc;
}
- cmds[num_cmds++] = SMB2_OP_QUERY_INFO;
+
+ cached_dir_close(tcon->cdirs, cdir);
+ return rc;
} else {
cmds[num_cmds++] = SMB2_OP_POSIX_QUERY_INFO;
}
-
+no_cache:
in_iov[0].iov_base = data;
in_iov[0].iov_len = sizeof(*data);
in_iov[1] = in_iov[0];
@@ -1094,11 +1093,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
struct cifs_open_parms oparms;
- struct cached_dir *cdir;
- cdir = cached_dir_get_path(tcon->cdirs, name);
- if (cdir)
- cached_dir_drop(tcon->cdirs, cdir);
+ cached_dir_drop(tcon->cdirs, name);
oparms = CIFS_OPARMS(cifs_sb, tcon, name, DELETE,
FILE_OPEN, CREATE_NOT_FILE, ACL_NO_MODE);
@@ -1166,12 +1162,9 @@ int smb2_rename_path(const unsigned int xid,
struct cifs_sb_info *cifs_sb)
{
struct cifsFileInfo *cfile;
- struct cached_dir *cdir;
__u32 co = file_create_options(source_dentry);
- cdir = cached_dir_get_path(tcon->cdirs, from_name);
- if (cdir)
- cached_dir_drop(tcon->cdirs, cdir);
+ cached_dir_drop(tcon->cdirs, from_name);
cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 2cf28da64ac5..5a336ebfa533 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -850,12 +850,13 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *pfid, fid = {};
struct cached_dir *cdir;
- cdir = cached_dir_get_open(tcon->cdirs, "", cifs_sb);
+ cdir = cached_dir_open(tcon->cdirs, "", cifs_sb);
if (cdir) {
fid.persistent_fid = cdir->pfid;
fid.volatile_fid = cdir->vfid;
memcpy(fid.lease_key, cdir->lease_key, SMB2_LEASE_KEY_SIZE);
} else {
+ pr_err("%s: !!!! cache dir not found for path='\\' -- shouldn't happen !!!\n", __func__);
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.path = "",
@@ -883,7 +884,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_QFS_attr(xid, tcon, pfid->persistent_fid, pfid->volatile_fid,
FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
if (cdir)
- cached_dir_put(tcon->cdirs, cdir);
+ cached_dir_close(tcon->cdirs, cdir);
else
SMB2_close(xid, tcon, pfid->persistent_fid, pfid->volatile_fid);
}
@@ -929,19 +930,25 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct kvec err_iov = {};
struct cifs_fid fid;
- struct cached_dir *cdir;
bool islink;
int rc, rc2;
- cdir = cached_dir_get_path(tcon->cdirs, full_path);
- if (cdir) {
- bool expired = cached_dir_expired(cdir);
+ if (!cached_dir_check_expired_path(tcon->cdirs, full_path))
+ return 0;
- cached_dir_put(tcon->cdirs, cdir);
+#if 0 // lookup + open (leasebreak?)
+ cdir = cached_dir_open(tcon->cdirs, full_path, cifs_sb);
+ if (cdir) {
+ bool expired = cached_dir_is_expired(cdir);
+ cached_dir_close(tcon->cdirs, cdir);
if (!expired)
return 0;
}
+#endif
+
+ pr_err("%s: cached dir for path='%s' not found or expired, remote opening...\n",
+ __func__, *full_path ? full_path : "\\");
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
if (!utf16_path)
@@ -2678,6 +2685,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
__le16 *utf16_path;
struct cached_dir *cdir = NULL;
int retries = 0, cur_sleep = 1;
+ bool using_cdir = false;
replay_again:
/* reinitialize for possible replay */
@@ -2706,8 +2714,13 @@ replay_again:
/*
* We can only call this for things we know are directories.
*/
- if (!strcmp(path, ""))
- cdir = cached_dir_get_open(tcon->cdirs, path, cifs_sb);
+ if (!strcmp(path, "")) {
+ cdir = cached_dir_open(tcon->cdirs, path, cifs_sb);
+ if (!cdir)
+ pr_err("%s: cache dir not found for path='\\' (unexpected because of remote open)\n", __func__);
+ else
+ using_cdir = true;
+ }
rqst[0].rq_iov = vars->open_iov;
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
@@ -2799,7 +2812,11 @@ replay_again:
SMB2_close_free(&rqst[2]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
- cached_dir_put(tcon->cdirs, cdir);
+ cached_dir_close(tcon->cdirs, cdir);
+ if (!cdir && using_cdir) {
+ pr_err("%s: was using cdir, it's gone now, retrying...\n", __func__);
+ rc = -EAGAIN;
+ }
kfree(vars);
out_free_path:
kfree(utf16_path);
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index c74574863e54..929f95f2c145 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -2201,7 +2201,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
}
spin_unlock(&ses->chan_lock);
- cached_dir_exit(tcon);
+ cached_dir_cleanup(tcon, false);
rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server,
(void **) &req,