diff options
Diffstat (limited to 'drivers/base')
32 files changed, 2063 insertions, 850 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 6f04b831a5c0..2b8fd6bb7da0 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -230,4 +230,16 @@ config GENERIC_ARCH_NUMA Enable support for generic NUMA implementation. Currently, RISC-V and ARM64 use it. +config FW_DEVLINK_SYNC_STATE_TIMEOUT + bool "sync_state() behavior defaults to timeout instead of strict" + help + This is build time equivalent of adding kernel command line parameter + "fw_devlink.sync_state=timeout". Give up waiting on consumers and + call sync_state() on any devices that haven't yet received their + sync_state() calls after deferred_probe_timeout has expired or by + late_initcall() if !CONFIG_MODULES. You should almost always want to + select N here unless you have already successfully tested with the + command line option on every system/board your kernel is expected to + work on. + endmenu diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index b1c1dd38ab01..b741b5ba82bd 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -835,18 +835,19 @@ void __init init_cpu_topology(void) if (ret) { /* * Discard anything that was parsed if we hit an error so we - * don't use partial information. + * don't use partial information. But do not return yet to give + * arch-specific early cache level detection a chance to run. */ reset_cpu_topology(); - return; } for_each_possible_cpu(cpu) { ret = fetch_cache_info(cpu); - if (ret) { + if (!ret) + continue; + else if (ret != -ENOENT) pr_err("Early cacheinfo failed, ret = %d\n", ret); - break; - } + return; } } diff --git a/drivers/base/base.h b/drivers/base/base.h index 726a12a244c0..eb4c0ace9242 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -27,11 +27,13 @@ * on this bus. * @bus - pointer back to the struct bus_type that this structure is associated * with. + * @dev_root: Default device to use as the parent. * * @glue_dirs - "glue" directory to put in-between the parent device to * avoid namespace conflicts * @class - pointer back to the struct class that this structure is associated * with. + * @lock_key: Lock class key for use by the lock validator * * This structure is the one that is the actual kobject allowing struct * bus_type/class to be statically allocated safely. Nothing outside of the @@ -48,10 +50,11 @@ struct subsys_private { struct klist klist_drivers; struct blocking_notifier_head bus_notifier; unsigned int drivers_autoprobe:1; - struct bus_type *bus; + const struct bus_type *bus; + struct device *dev_root; struct kset glue_dirs; - struct class *class; + const struct class *class; struct lock_class_key lock_key; }; @@ -70,6 +73,8 @@ static inline void subsys_put(struct subsys_private *sp) kset_put(&sp->subsys); } +struct subsys_private *class_to_subsys(const struct class *class); + struct driver_private { struct kobject kobj; struct klist klist_devices; @@ -122,69 +127,73 @@ struct device_private { container_of(obj, struct device_private, knode_class) /* initialisation functions */ -extern int devices_init(void); -extern int buses_init(void); -extern int classes_init(void); -extern int firmware_init(void); +int devices_init(void); +int buses_init(void); +int classes_init(void); +int firmware_init(void); #ifdef CONFIG_SYS_HYPERVISOR -extern int hypervisor_init(void); +int hypervisor_init(void); #else static inline int hypervisor_init(void) { return 0; } #endif -extern int platform_bus_init(void); -extern void cpu_dev_init(void); -extern void container_dev_init(void); +int platform_bus_init(void); +void cpu_dev_init(void); +void container_dev_init(void); #ifdef CONFIG_AUXILIARY_BUS -extern void auxiliary_bus_init(void); +void auxiliary_bus_init(void); #else static inline void auxiliary_bus_init(void) { } #endif struct kobject *virtual_device_parent(struct device *dev); -extern int bus_add_device(struct device *dev); -extern void bus_probe_device(struct device *dev); -extern void bus_remove_device(struct device *dev); +int bus_add_device(struct device *dev); +void bus_probe_device(struct device *dev); +void bus_remove_device(struct device *dev); void bus_notify(struct device *dev, enum bus_notifier_event value); bool bus_is_registered(const struct bus_type *bus); -extern int bus_add_driver(struct device_driver *drv); -extern void bus_remove_driver(struct device_driver *drv); -extern void device_release_driver_internal(struct device *dev, - struct device_driver *drv, - struct device *parent); +int bus_add_driver(struct device_driver *drv); +void bus_remove_driver(struct device_driver *drv); +void device_release_driver_internal(struct device *dev, struct device_driver *drv, + struct device *parent); -extern void driver_detach(struct device_driver *drv); -extern void driver_deferred_probe_del(struct device *dev); -extern void device_set_deferred_probe_reason(const struct device *dev, - struct va_format *vaf); +void driver_detach(struct device_driver *drv); +void driver_deferred_probe_del(struct device *dev); +void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf); static inline int driver_match_device(struct device_driver *drv, struct device *dev) { return drv->bus->match ? drv->bus->match(dev, drv) : 1; } -extern int driver_add_groups(struct device_driver *drv, - const struct attribute_group **groups); -extern void driver_remove_groups(struct device_driver *drv, - const struct attribute_group **groups); +static inline void dev_sync_state(struct device *dev) +{ + if (dev->bus->sync_state) + dev->bus->sync_state(dev); + else if (dev->driver && dev->driver->sync_state) + dev->driver->sync_state(dev); +} + +int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups); +void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups); void device_driver_detach(struct device *dev); -extern int devres_release_all(struct device *dev); -extern void device_block_probing(void); -extern void device_unblock_probing(void); -extern void deferred_probe_extend_timeout(void); -extern void driver_deferred_probe_trigger(void); +int devres_release_all(struct device *dev); +void device_block_probing(void); +void device_unblock_probing(void); +void deferred_probe_extend_timeout(void); +void driver_deferred_probe_trigger(void); const char *device_get_devnode(const struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); /* /sys/devices directory */ extern struct kset *devices_kset; -extern void devices_kset_move_last(struct device *dev); +void devices_kset_move_last(struct device *dev); #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) -extern void module_add_driver(struct module *mod, struct device_driver *drv); -extern void module_remove_driver(struct device_driver *drv); +void module_add_driver(struct module *mod, struct device_driver *drv); +void module_remove_driver(struct device_driver *drv); #else static inline void module_add_driver(struct module *mod, struct device_driver *drv) { } @@ -192,23 +201,34 @@ static inline void module_remove_driver(struct device_driver *drv) { } #endif #ifdef CONFIG_DEVTMPFS -extern int devtmpfs_init(void); +int devtmpfs_init(void); #else static inline int devtmpfs_init(void) { return 0; } #endif +#ifdef CONFIG_BLOCK +extern struct class block_class; +static inline bool is_blockdev(struct device *dev) +{ + return dev->class == &block_class; +} +#else +static inline bool is_blockdev(struct device *dev) { return false; } +#endif + /* Device links support */ -extern int device_links_read_lock(void); -extern void device_links_read_unlock(int idx); -extern int device_links_read_lock_held(void); -extern int device_links_check_suppliers(struct device *dev); -extern void device_links_force_bind(struct device *dev); -extern void device_links_driver_bound(struct device *dev); -extern void device_links_driver_cleanup(struct device *dev); -extern void device_links_no_driver(struct device *dev); -extern bool device_links_busy(struct device *dev); -extern void device_links_unbind_consumers(struct device *dev); -extern void fw_devlink_drivers_done(void); +int device_links_read_lock(void); +void device_links_read_unlock(int idx); +int device_links_read_lock_held(void); +int device_links_check_suppliers(struct device *dev); +void device_links_force_bind(struct device *dev); +void device_links_driver_bound(struct device *dev); +void device_links_driver_cleanup(struct device *dev); +void device_links_no_driver(struct device *dev); +bool device_links_busy(struct device *dev); +void device_links_unbind_consumers(struct device *dev); +void fw_devlink_drivers_done(void); +void fw_devlink_probing_done(void); /* device pm support */ void device_pm_move_to_tail(struct device *dev); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index dd4b82d7510f..84a21084d67d 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -84,7 +84,7 @@ done: return sp; } -static struct bus_type *bus_get(struct bus_type *bus) +static const struct bus_type *bus_get(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); @@ -233,7 +233,7 @@ static const struct kset_uevent_ops bus_uevent_ops = { static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count) { - struct bus_type *bus = bus_get(drv->bus); + const struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; @@ -256,7 +256,7 @@ static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store); static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count) { - struct bus_type *bus = bus_get(drv->bus); + const struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; @@ -274,7 +274,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, } static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store); -static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf) +static ssize_t drivers_autoprobe_show(const struct bus_type *bus, char *buf) { struct subsys_private *sp = bus_to_subsys(bus); int ret; @@ -287,7 +287,7 @@ static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf) return ret; } -static ssize_t drivers_autoprobe_store(struct bus_type *bus, +static ssize_t drivers_autoprobe_store(const struct bus_type *bus, const char *buf, size_t count) { struct subsys_private *sp = bus_to_subsys(bus); @@ -304,7 +304,7 @@ static ssize_t drivers_autoprobe_store(struct bus_type *bus, return count; } -static ssize_t drivers_probe_store(struct bus_type *bus, +static ssize_t drivers_probe_store(const struct bus_type *bus, const char *buf, size_t count) { struct device *dev; @@ -769,7 +769,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev, * attached and rescan it against existing drivers to see if it matches * any by calling device_attach() for the unbound devices. */ -int bus_rescan_devices(struct bus_type *bus) +int bus_rescan_devices(const struct bus_type *bus) { return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper); } @@ -808,7 +808,7 @@ static void klist_devices_put(struct klist_node *n) put_device(dev); } -static ssize_t bus_uevent_store(struct bus_type *bus, +static ssize_t bus_uevent_store(const struct bus_type *bus, const char *buf, size_t count) { struct subsys_private *sp = bus_to_subsys(bus); @@ -841,7 +841,7 @@ static struct bus_attribute bus_attr_uevent = __ATTR(uevent, 0200, NULL, * infrastructure, then register the children subsystems it has: * the devices and drivers that belong to the subsystem. */ -int bus_register(struct bus_type *bus) +int bus_register(const struct bus_type *bus) { int retval; struct subsys_private *priv; @@ -935,8 +935,8 @@ void bus_unregister(const struct bus_type *bus) return; pr_debug("bus: '%s': unregistering\n", bus->name); - if (bus->dev_root) - device_unregister(bus->dev_root); + if (sp->dev_root) + device_unregister(sp->dev_root); bus_kobj = &sp->subsys.kobj; sysfs_remove_groups(bus_kobj, bus->bus_groups); @@ -1198,6 +1198,7 @@ static int subsys_register(struct bus_type *subsys, const struct attribute_group **groups, struct kobject *parent_of_root) { + struct subsys_private *sp; struct device *dev; int err; @@ -1205,6 +1206,12 @@ static int subsys_register(struct bus_type *subsys, if (err < 0) return err; + sp = bus_to_subsys(subsys); + if (!sp) { + err = -EINVAL; + goto err_sp; + } + dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!dev) { err = -ENOMEM; @@ -1223,7 +1230,8 @@ static int subsys_register(struct bus_type *subsys, if (err < 0) goto err_dev_reg; - subsys->dev_root = dev; + sp->dev_root = dev; + subsys_put(sp); return 0; err_dev_reg: @@ -1232,6 +1240,8 @@ err_dev_reg: err_name: kfree(dev); err_dev: + subsys_put(sp); +err_sp: bus_unregister(subsys); return err; } @@ -1297,7 +1307,7 @@ EXPORT_SYMBOL_GPL(subsys_virtual_register); * from being unregistered or unloaded while the caller is using it. * The caller is responsible for preventing this. */ -struct device_driver *driver_find(const char *name, struct bus_type *bus) +struct device_driver *driver_find(const char *name, const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct kobject *k; @@ -1349,9 +1359,15 @@ bool bus_is_registered(const struct bus_type *bus) */ struct device *bus_get_dev_root(const struct bus_type *bus) { - if (bus) - return get_device(bus->dev_root); - return NULL; + struct subsys_private *sp = bus_to_subsys(bus); + struct device *dev_root; + + if (!sp) + return NULL; + + dev_root = get_device(sp->dev_root); + subsys_put(sp); + return dev_root; } EXPORT_SYMBOL_GPL(bus_get_dev_root); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index f6573c335f4c..bba3482ddeb8 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -14,7 +14,7 @@ #include <linux/cpu.h> #include <linux/device.h> #include <linux/init.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/smp.h> @@ -28,6 +28,9 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); #define per_cpu_cacheinfo_idx(cpu, idx) \ (per_cpu_cacheinfo(cpu) + (idx)) +/* Set if no cache information is found in DT/ACPI. */ +static bool use_arch_info; + struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) { return ci_cacheinfo(cpu); @@ -38,11 +41,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, { /* * For non DT/ACPI systems, assume unique level 1 caches, - * system-wide shared caches for all other levels. This will be used - * only if arch specific code has not populated shared_cpu_map + * system-wide shared caches for all other levels. */ - if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI))) - return !(this_leaf->level == 1); + if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) || + use_arch_info) + return (this_leaf->level != 1) && (sib_leaf->level != 1); if ((sib_leaf->attributes & CACHE_ID) && (this_leaf->attributes & CACHE_ID)) @@ -79,6 +82,9 @@ bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y) } #ifdef CONFIG_OF + +static bool of_check_cache_nodes(struct device_node *np); + /* OF properties to query for a given cache type */ struct cache_type_info { const char *size_prop; @@ -206,6 +212,11 @@ static int cache_setup_of_node(unsigned int cpu) return -ENOENT; } + if (!of_check_cache_nodes(np)) { + of_node_put(np); + return -ENOENT; + } + prev = np; while (index < cache_leaves(cpu)) { @@ -230,6 +241,25 @@ static int cache_setup_of_node(unsigned int cpu) return 0; } +static bool of_check_cache_nodes(struct device_node *np) +{ + struct device_node *next; + + if (of_property_present(np, "cache-size") || + of_property_present(np, "i-cache-size") || + of_property_present(np, "d-cache-size") || + of_property_present(np, "cache-unified")) + return true; + + next = of_find_next_cache_node(np); + if (next) { + of_node_put(next); + return true; + } + + return false; +} + static int of_count_cache_leaves(struct device_node *np) { unsigned int leaves = 0; @@ -261,6 +291,11 @@ int init_of_cache_level(unsigned int cpu) struct device_node *prev = NULL; unsigned int levels = 0, leaves, level; + if (!of_check_cache_nodes(np)) { + of_node_put(np); + return -ENOENT; + } + leaves = of_count_cache_leaves(np); if (leaves > 0) levels = 1; @@ -312,6 +347,10 @@ static int cache_setup_properties(unsigned int cpu) else if (!acpi_disabled) ret = cache_setup_acpi(cpu); + // Assume there is no cache information available in DT/ACPI from now. + if (ret && use_arch_cache_info()) + use_arch_info = true; + return ret; } @@ -330,7 +369,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) * to update the shared cpu_map if the cache attributes were * populated early before all the cpus are brought online */ - if (!last_level_cache_is_valid(cpu)) { + if (!last_level_cache_is_valid(cpu) && !use_arch_info) { ret = cache_setup_properties(cpu); if (ret) return ret; @@ -398,6 +437,11 @@ static void free_cache_attributes(unsigned int cpu) cache_shared_cpu_map_remove(cpu); } +int __weak early_cache_level(unsigned int cpu) +{ + return -ENOENT; +} + int __weak init_cache_level(unsigned int cpu) { return -ENOENT; @@ -423,63 +467,95 @@ int allocate_cache_info(int cpu) int fetch_cache_info(unsigned int cpu) { - struct cpu_cacheinfo *this_cpu_ci; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); unsigned int levels = 0, split_levels = 0; int ret; if (acpi_disabled) { ret = init_of_cache_level(cpu); - if (ret < 0) - return ret; } else { ret = acpi_get_cache_info(cpu, &levels, &split_levels); - if (ret < 0) + if (!ret) { + this_cpu_ci->num_levels = levels; + /* + * This assumes that: + * - there cannot be any split caches (data/instruction) + * above a unified cache + * - data/instruction caches come by pair + */ + this_cpu_ci->num_leaves = levels + split_levels; + } + } + + if (ret || !cache_leaves(cpu)) { + ret = early_cache_level(cpu); + if (ret) return ret; - this_cpu_ci = get_cpu_cacheinfo(cpu); - this_cpu_ci->num_levels = levels; - /* - * This assumes that: - * - there cannot be any split caches (data/instruction) - * above a unified cache - * - data/instruction caches come by pair - */ - this_cpu_ci->num_leaves = levels + split_levels; + if (!cache_leaves(cpu)) + return -ENOENT; + + this_cpu_ci->early_ci_levels = true; } - if (!cache_leaves(cpu)) - return -ENOENT; return allocate_cache_info(cpu); } -int detect_cache_attributes(unsigned int cpu) +static inline int init_level_allocate_ci(unsigned int cpu) { - int ret; + unsigned int early_leaves = cache_leaves(cpu); /* Since early initialization/allocation of the cacheinfo is allowed * via fetch_cache_info() and this also gets called as CPU hotplug * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped * as it will happen only once (the cacheinfo memory is never freed). - * Just populate the cacheinfo. + * Just populate the cacheinfo. However, if the cacheinfo has been + * allocated early through the arch-specific early_cache_level() call, + * there is a chance the info is wrong (this can happen on arm64). In + * that case, call init_cache_level() anyway to give the arch-specific + * code a chance to make things right. */ - if (per_cpu_cacheinfo(cpu)) - goto populate_leaves; + if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels) + return 0; if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; - ret = allocate_cache_info(cpu); + /* + * Now that we have properly initialized the cache level info, make + * sure we don't try to do that again the next time we are called + * (e.g. as CPU hotplug callbacks). + */ + ci_cacheinfo(cpu)->early_ci_levels = false; + + if (cache_leaves(cpu) <= early_leaves) + return 0; + + kfree(per_cpu_cacheinfo(cpu)); + return allocate_cache_info(cpu); +} + +int detect_cache_attributes(unsigned int cpu) +{ + int ret; + + ret = init_level_allocate_ci(cpu); if (ret) return ret; -populate_leaves: /* - * populate_cache_leaves() may completely setup the cache leaves and - * shared_cpu_map or it may leave it partially setup. + * If LLC is valid the cache leaves were already populated so just go to + * update the cpu map. */ - ret = populate_cache_leaves(cpu); - if (ret) - goto free_ci; + if (!last_level_cache_is_valid(cpu)) { + /* + * populate_cache_leaves() may completely setup the cache leaves and + * shared_cpu_map or it may leave it partially setup. + */ + ret = populate_cache_leaves(cpu); + if (ret) + goto free_ci; + } /* * For systems using DT for cache hierarchy, fw_token diff --git a/drivers/base/class.c b/drivers/base/class.c index 2373b3e210d8..ac1808d1a2e8 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -20,8 +20,52 @@ #include <linux/mutex.h> #include "base.h" +/* /sys/class */ +static struct kset *class_kset; + #define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr) +/** + * class_to_subsys - Turn a struct class into a struct subsys_private + * + * @class: pointer to the struct bus_type to look up + * + * The driver core internals need to work on the subsys_private structure, not + * the external struct class pointer. This function walks the list of + * registered classes in the system and finds the matching one and returns the + * internal struct subsys_private that relates to that class. + * + * Note, the reference count of the return value is INCREMENTED if it is not + * NULL. A call to subsys_put() must be done when finished with the pointer in + * order for it to be properly freed. + */ +struct subsys_private *class_to_subsys(const struct class *class) +{ + struct subsys_private *sp = NULL; + struct kobject *kobj; + + if (!class || !class_kset) + return NULL; + + spin_lock(&class_kset->list_lock); + + if (list_empty(&class_kset->list)) + goto done; + + list_for_each_entry(kobj, &class_kset->list, entry) { + struct kset *kset = container_of(kobj, struct kset, kobj); + + sp = container_of_const(kset, struct subsys_private, subsys); + if (sp->class == class) + goto done; + } + sp = NULL; +done: + sp = subsys_get(sp); + spin_unlock(&class_kset->list_lock); + return sp; +} + static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { @@ -49,25 +93,24 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, static void class_release(struct kobject *kobj) { struct subsys_private *cp = to_subsys_private(kobj); - struct class *class = cp->class; + const struct class *class = cp->class; pr_debug("class '%s': release.\n", class->name); - class->p = NULL; - if (class->class_release) class->class_release(class); else pr_debug("class '%s' does not have a release() function, " "be careful\n", class->name); + lockdep_unregister_key(&cp->lock_key); kfree(cp); } static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj) { const struct subsys_private *cp = to_subsys_private(kobj); - struct class *class = cp->class; + const struct class *class = cp->class; return class->ns_type; } @@ -83,44 +126,34 @@ static const struct kobj_type class_ktype = { .child_ns_type = class_child_ns_type, }; -/* Hotplug events for classes go to the class subsys */ -static struct kset *class_kset; - - -int class_create_file_ns(struct class *cls, const struct class_attribute *attr, +int class_create_file_ns(const struct class *cls, const struct class_attribute *attr, const void *ns) { + struct subsys_private *sp = class_to_subsys(cls); int error; - if (cls) - error = sysfs_create_file_ns(&cls->p->subsys.kobj, - &attr->attr, ns); - else - error = -EINVAL; + if (!sp) + return -EINVAL; + + error = sysfs_create_file_ns(&sp->subsys.kobj, &attr->attr, ns); + subsys_put(sp); + return error; } EXPORT_SYMBOL_GPL(class_create_file_ns); -void class_remove_file_ns(struct class *cls, const struct class_attribute *attr, +void class_remove_file_ns(const struct class *cls, const struct class_attribute *attr, const void *ns) { - if (cls) - sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns); -} -EXPORT_SYMBOL_GPL(class_remove_file_ns); + struct subsys_private *sp = class_to_subsys(cls); -static struct class *class_get(struct class *cls) -{ - if (cls) - kset_get(&cls->p->subsys); - return cls; -} + if (!sp) + return; -static void class_put(struct class *cls) -{ - if (cls) - kset_put(&cls->p->subsys); + sysfs_remove_file_ns(&sp->subsys.kobj, &attr->attr, ns); + subsys_put(sp); } +EXPORT_SYMBOL_GPL(class_remove_file_ns); |