diff options
author | David S. Miller <davem@davemloft.net> | 2020-07-25 17:49:04 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-07-25 17:49:04 -0700 |
commit | a57066b1a01977a646145f4ce8dfb4538b08368a (patch) | |
tree | 57c2b4fa2fc48e687a1820b9bf4ef4f4363be0f9 /drivers/base/regmap/regmap-debugfs.c | |
parent | dfecd3e00cd32b2a6d1cfdb30b513dd42575ada3 (diff) | |
parent | 04300d66f0a06d572d9f2ad6768c38cabde22179 (diff) | |
download | linux-a57066b1a01977a646145f4ce8dfb4538b08368a.tar.gz linux-a57066b1a01977a646145f4ce8dfb4538b08368a.tar.bz2 linux-a57066b1a01977a646145f4ce8dfb4538b08368a.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
The UDP reuseport conflict was a little bit tricky.
The net-next code, via bpf-next, extracted the reuseport handling
into a helper so that the BPF sk lookup code could invoke it.
At the same time, the logic for reuseport handling of unconnected
sockets changed via commit efc6b6f6c3113e8b203b9debfb72d81e0f3dcace
which changed the logic to carry on the reuseport result into the
rest of the lookup loop if we do not return immediately.
This requires moving the reuseport_has_conns() logic into the callers.
While we are here, get rid of inline directives as they do not belong
in foo.c files.
The other changes were cases of more straightforward overlapping
modifications.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/base/regmap/regmap-debugfs.c')
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 52 |
1 files changed, 29 insertions, 23 deletions
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 089e5dc7144a..f58baff2be0a 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -463,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_only); - ssize_t result; - bool was_enabled, require_sync = false; + bool new_val, require_sync = false; int err; - map->lock(map->lock_arg); + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; - was_enabled = map->cache_only; + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) { - map->unlock(map->lock_arg); - return result; - } + map->lock(map->lock_arg); - if (map->cache_only && !was_enabled) { + if (new_val && !map->cache_only) { dev_warn(map->dev, "debugfs cache_only=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_only && was_enabled) { + } else if (!new_val && map->cache_only) { dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); require_sync = true; } + map->cache_only = new_val; map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); if (require_sync) { err = regcache_sync(map); @@ -493,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file, dev_err(map->dev, "Failed to sync cache %d\n", err); } - return result; + return count; } static const struct file_operations regmap_cache_only_fops = { @@ -508,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_bypass); - ssize_t result; - bool was_enabled; + bool new_val; + int err; - map->lock(map->lock_arg); + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; - was_enabled = map->cache_bypass; + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) - goto out; + map->lock(map->lock_arg); - if (map->cache_bypass && !was_enabled) { + if (new_val && !map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_bypass && was_enabled) { + } else if (!new_val && map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); } + map->cache_bypass = new_val; -out: map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); - return result; + return count; } static const struct file_operations regmap_cache_bypass_fops = { |