summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-06-19 01:17:50 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-06-19 01:17:50 +0200
commit8ced6789da03649d21656dd908c2930a003fffc4 (patch)
tree63789eea39d470d969bfb78a470b703827850338
parent4a3004e5e651baf15f0452da9200dd5c3bda5907 (diff)
parent07949bf9c63c9a80027fe8452d5fe8b9ba9b3c23 (diff)
downloadlinux-8ced6789da03649d21656dd908c2930a003fffc4.tar.gz
linux-8ced6789da03649d21656dd908c2930a003fffc4.tar.bz2
linux-8ced6789da03649d21656dd908c2930a003fffc4.zip
Merge branch 'pm-cpufreq'
* pm-cpufreq: (37 commits) cpufreq: dt: allow driver to boot automatically intel_pstate: Fix overflow in busy_scaled due to long delay cpufreq: qoriq: optimize the CPU frequency switching time cpufreq: gx-suspmod: Fix two typos in two comments cpufreq: nforce2: Fix typo in comment to function nforce2_init() cpufreq: governor: Serialize governor callbacks cpufreq: governor: split cpufreq_governor_dbs() cpufreq: governor: register notifier from cs_init() cpufreq: Remove cpufreq_update_policy() cpufreq: Restart governor as soon as possible cpufreq: Call cpufreq_policy_put_kobj() from cpufreq_policy_free() cpufreq: Initialize policy->kobj while allocating policy cpufreq: Stop migrating sysfs files on hotplug cpufreq: Don't allow updating inactive policies from sysfs intel_pstate: Force setting target pstate when required intel_pstate: change some inconsistent debug information cpufreq: Track cpu managing sysfs kobjects separately cpufreq: Fix for typos in two comments cpufreq: Mark policy->governor = NULL for inactive policies cpufreq: Manage governor usage history with 'policy->last_governor' ...
-rw-r--r--Documentation/cpu-freq/user-guide.txt2
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/arm_big_little.c40
-rw-r--r--drivers/cpufreq/cpufreq-dt.c1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c2
-rw-r--r--drivers/cpufreq/cpufreq.c573
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c28
-rw-r--r--drivers/cpufreq/cpufreq_governor.c345
-rw-r--r--drivers/cpufreq/cpufreq_governor.h16
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c6
-rw-r--r--drivers/cpufreq/gx-suspmod.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c72
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c20
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c32
-rw-r--r--include/linux/cpufreq.h5
-rw-r--r--include/trace/events/power.h25
16 files changed, 670 insertions, 503 deletions
diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt
index ff2f28332cc4..109e97bbab77 100644
--- a/Documentation/cpu-freq/user-guide.txt
+++ b/Documentation/cpu-freq/user-guide.txt
@@ -196,8 +196,6 @@ affected_cpus : List of Online CPUs that require software
related_cpus : List of Online + Offline CPUs that need software
coordination of frequency.
-scaling_driver : Hardware driver for cpufreq.
-
scaling_cur_freq : Current frequency of the CPU as determined by
the governor and cpufreq core, in KHz. This is
the frequency the kernel thinks the CPU runs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 4f3dbc8cf729..611cb09239eb 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,7 +5,7 @@
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
+ depends on (ARM_CPU_TOPOLOGY || ARM64) && HAVE_CLK
select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index e1a6ba66a7f5..f1e42f8ce0fc 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>
-#include <asm/bL_switcher.h>
#include "arm_big_little.h"
@@ -41,12 +40,16 @@
#define MAX_CLUSTERS 2
#ifdef CONFIG_BL_SWITCHER
+#include <asm/bL_switcher.h>
static bool bL_switching_enabled;
#define is_bL_switching_enabled() bL_switching_enabled
#define set_switching_enabled(x) (bL_switching_enabled = (x))
#else
#define is_bL_switching_enabled() false
#define set_switching_enabled(x) do { } while (0)
+#define bL_switch_request(...) do { } while (0)
+#define bL_switcher_put_enabled() do { } while (0)
+#define bL_switcher_get_enabled() do { } while (0)
#endif
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
@@ -186,6 +189,15 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
mutex_unlock(&cluster_lock[old_cluster]);
}
+ /*
+ * FIXME: clk_set_rate has to handle the case where clk_change_rate
+ * can fail due to hardware or firmware issues. Until the clk core
+ * layer is fixed, we can check here. In most of the cases we will
+ * be reading only the cached value anyway. This needs to be removed
+ * once clk core is fixed.
+ */
+ if (bL_cpufreq_get_rate(cpu) != new_rate)
+ return -EIO;
return 0;
}
@@ -322,7 +334,6 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
- char name[14] = "cpu-cluster.";
int ret;
if (freq_table[cluster])
@@ -342,8 +353,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
goto free_opp_table;
}
- name[12] = cluster + '0';
- clk[cluster] = clk_get(cpu_dev, name);
+ clk[cluster] = clk_get(cpu_dev, NULL);
if (!IS_ERR(clk[cluster])) {
dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
__func__, clk[cluster], freq_table[cluster],
@@ -506,6 +516,7 @@ static struct cpufreq_driver bL_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
+#ifdef CONFIG_BL_SWITCHER
static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
unsigned long action, void *_arg)
{
@@ -538,6 +549,20 @@ static struct notifier_block bL_switcher_notifier = {
.notifier_call = bL_cpufreq_switcher_notifier,
};
+static int __bLs_register_notifier(void)
+{
+ return bL_switcher_register_notifier(&bL_switcher_notifier);
+}
+
+static int __bLs_unregister_notifier(void)
+{
+ return bL_switcher_unregister_notifier(&bL_switcher_notifier);
+}
+#else
+static int __bLs_register_notifier(void) { return 0; }
+static int __bLs_unregister_notifier(void) { return 0; }
+#endif
+
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
{
int ret, i;
@@ -555,8 +580,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
arm_bL_ops = ops;
- ret = bL_switcher_get_enabled();
- set_switching_enabled(ret);
+ set_switching_enabled(bL_switcher_get_enabled());
for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]);
@@ -567,7 +591,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
__func__, ops->name, ret);
arm_bL_ops = NULL;
} else {
- ret = bL_switcher_register_notifier(&bL_switcher_notifier);
+ ret = __bLs_register_notifier();
if (ret) {
cpufreq_unregister_driver(&bL_cpufreq_driver);
arm_bL_ops = NULL;
@@ -591,7 +615,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
}
bL_switcher_get_enabled();
- bL_switcher_unregister_notifier(&bL_switcher_notifier);
+ __bLs_unregister_notifier();
cpufreq_unregister_driver(&bL_cpufreq_driver);
bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index bab67db54b7e..528a82bf5038 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -416,6 +416,7 @@ static struct platform_driver dt_cpufreq_platdrv = {
};
module_platform_driver(dt_cpufreq_platdrv);
+MODULE_ALIAS("platform:cpufreq-dt");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic cpufreq driver");
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index a2258090b58b..db69eeb501a7 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -414,7 +414,7 @@ static int nforce2_detect_chipset(void)
* nforce2_init - initializes the nForce2 CPUFreq driver
*
* Initializes the nForce2 FSB support. Returns -ENODEV on unsupported
- * devices, -EINVAL on problems during initiatization, and zero on
+ * devices, -EINVAL on problems during initialization, and zero on
* success.
*/
static int __init nforce2_init(void)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8ae655c364f4..b612411655f9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -31,10 +31,62 @@
#include <linux/tick.h>
#include <trace/events/power.h>
-/* Macros to iterate over lists */
-/* Iterate over online CPUs policies */
static LIST_HEAD(cpufreq_policy_list);
-#define for_each_policy(__policy) \
+
+static inline bool policy_is_inactive(struct cpufreq_policy *policy)
+{
+ return cpumask_empty(policy->cpus);
+}
+
+static bool suitable_policy(struct cpufreq_policy *policy, bool active)
+{
+ return active == !policy_is_inactive(policy);
+}
+
+/* Finds Next Acive/Inactive policy */
+static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
+ bool active)
+{
+ do {
+ policy = list_next_entry(policy, policy_list);
+
+ /* No more policies in the list */
+ if (&policy->policy_list == &cpufreq_policy_list)
+ return NULL;
+ } while (!suitable_policy(policy, active));
+
+ return policy;
+}
+
+static struct cpufreq_policy *first_policy(bool active)
+{
+ struct cpufreq_policy *policy;
+
+ /* No policies in the list */
+ if (list_empty(&cpufreq_policy_list))
+ return NULL;
+
+ policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
+ policy_list);
+
+ if (!suitable_policy(policy, active))
+ policy = next_policy(policy, active);
+
+ return policy;
+}
+
+/* Macros to iterate over CPU policies */
+#define for_each_suitable_policy(__policy, __active) \
+ for (__policy = first_policy(__active); \
+ __policy; \
+ __policy = next_policy(__policy, __active))
+
+#define for_each_active_policy(__policy) \
+ for_each_suitable_policy(__policy, true)
+#define for_each_inactive_policy(__policy) \
+ for_each_suitable_policy(__policy, false)
+
+#define for_each_policy(__policy) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
/* Iterate over governors */
@@ -49,13 +101,9 @@ static LIST_HEAD(cpufreq_governor_list);
*/
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
-static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
static DEFINE_RWLOCK(cpufreq_driver_lock);
DEFINE_MUTEX(cpufreq_governor_lock);
-/* This one keeps track of the previously set governor of a removed CPU */
-static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
-
/* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended;
@@ -178,7 +226,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
policy->cpuinfo.transition_latency = transition_latency;
/*
- * The driver only supports the SMP configuartion where all processors
+ * The driver only supports the SMP configuration where all processors
* share the clock and voltage and clock.
*/
cpumask_setall(policy->cpus);
@@ -187,10 +235,18 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
-unsigned int cpufreq_generic_get(unsigned int cpu)
+/* Only for cpufreq core internal use */
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+ return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
+}
+
+unsigned int cpufreq_generic_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+
if (!policy || IS_ERR(policy->clk)) {
pr_err("%s: No %s associated to cpu: %d\n",
__func__, policy ? "clk" : "policy", cpu);
@@ -201,18 +257,29 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
-/* Only for cpufreq core internal use */
-struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
-{
- return per_cpu(cpufreq_cpu_data, cpu);
-}
-
+/**
+ * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
+ *
+ * @cpu: cpu to find policy for.
+ *
+ * This returns policy for 'cpu', returns NULL if it doesn't exist.
+ * It also increments the kobject reference count to mark it busy and so would
+ * require a corresponding call to cpufreq_cpu_put() to decrement it back.
+ * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
+ * freed as that depends on the kobj count.
+ *
+ * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
+ * valid policy is found. This is done to make sure the driver doesn't get
+ * unregistered while the policy is being used.
+ *
+ * Return: A valid policy on success, otherwise NULL on failure.
+ */
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
unsigned long flags;
- if (cpu >= nr_cpu_ids)
+ if (WARN_ON(cpu >= nr_cpu_ids))
return NULL;
if (!down_read_trylock(&cpufreq_rwsem))
@@ -223,7 +290,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
if (cpufreq_driver) {
/* get the CPU */
- policy = per_cpu(cpufreq_cpu_data, cpu);
+ policy = cpufreq_cpu_get_raw(cpu);
if (policy)
kobject_get(&policy->kobj);
}
@@ -237,6 +304,16 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
+/**
+ * cpufreq_cpu_put: Decrements the usage count of a policy
+ *
+ * @policy: policy earlier returned by cpufreq_cpu_get().
+ *
+ * This decrements the kobject reference count incremented earlier by calling
+ * cpufreq_cpu_get().
+ *
+ * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
+ */
void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
kobject_put(&policy->kobj);
@@ -798,11 +875,18 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
down_write(&policy->rwsem);
+ /* Updating inactive policies is invalid, so avoid doing that. */
+ if (unlikely(policy_is_inactive(policy))) {
+ ret = -EBUSY;
+ goto unlock_policy_rwsem;
+ }
+
if (fattr->store)
ret = fattr->store(policy, buf, count);
else
ret = -EIO;
+unlock_policy_rwsem:
up_write(&policy->rwsem);
up_read(&cpufreq_rwsem);
@@ -873,28 +957,67 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr)
}
EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
-/* symlink affected CPUs */
+static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
+{
+ struct device *cpu_dev;
+
+ pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
+
+ if (!policy)
+ return 0;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (WARN_ON(!cpu_dev))
+ return 0;
+
+ return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
+}
+
+static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
+{
+ struct device *cpu_dev;
+
+ pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
+
+ cpu_dev = get_cpu_device(cpu);
+ if (WARN_ON(!cpu_dev))
+ return;
+
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+}
+
+/* Add/remove symlinks for all related CPUs */
static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
{
unsigned int j;
int ret = 0;
- for_each_cpu(j, policy->cpus) {
- struct device *cpu_dev;
-
- if (j == policy->cpu)
+ /* Some related CPUs might not be present (physically hotplugged) */
+ for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ if (j == policy->kobj_cpu)
continue;
- pr_debug("Adding link for CPU: %u\n", j);
- cpu_dev = get_cpu_device(j);
- ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
- "cpufreq");
+ ret = add_cpu_dev_symlink(policy, j);
if (ret)
break;
}
+
return ret;
}
+static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
+{
+ unsigned int j;
+
+ /* Some related CPUs might not be present (physically hotplugged) */
+ for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ if (j == policy->kobj_cpu)
+ continue;
+
+ remove_cpu_dev_symlink(policy, j);
+ }
+}
+
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
struct device *dev)
{
@@ -937,7 +1060,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
memcpy(&new_policy, policy, sizeof(*policy));
/* Update governor of new_policy to the governor used before hotplug */
- gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+ gov = find_governor(policy->last_governor);
if (gov)
pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu);
@@ -963,7 +1086,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
unsigned int cpu, struct device *dev)
{
int ret = 0;
- unsigned long flags;
+
+ /* Has this CPU been taken care of already? */
+ if (cpumask_test_cpu(cpu, policy->cpus))
+ return 0;
if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
@@ -974,13 +1100,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
down_write(&policy->rwsem);
-
- write_lock_irqsave(&cpufreq_driver_lock, flags);
-
cpumask_set_cpu(cpu, policy->cpus);
- per_cpu(cpufreq_cpu_data, cpu) = policy;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
up_write(&policy->rwsem);
if (has_target()) {
@@ -994,7 +1114,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
}
- return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+ return 0;
}
static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
@@ -1003,20 +1123,25 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
-
- policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
-
+ policy = per_cpu(cpufreq_cpu_data, cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (policy)
- policy->governor = NULL;
+ if (likely(policy)) {
+ /* Policy should be inactive here */
+ WARN_ON(!policy_is_inactive(policy));
+
+ down_write(&policy->rwsem);
+ policy->cpu = cpu;
+ up_write(&policy->rwsem);
+ }
return policy;
}
-static struct cpufreq_policy *cpufreq_policy_alloc(void)
+static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
{
struct cpufreq_policy *policy;
+ int ret;
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
if (!policy)
@@ -1028,6 +1153,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
+ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
+ "cpufreq");
+ if (ret) {
+ pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+ goto err_free_rcpumask;
+ }
+
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
@@ -1035,8 +1167,15 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
+ policy->cpu = dev->id;
+
+ /* Set this once on allocation */
+ policy->kobj_cpu = dev->id;
+
return policy;
+err_free_rcpumask:
+ free_cpumask_var(policy->related_cpus);
err_free_cpumask:
free_cpumask_var(policy->cpus);
err_free_policy:
@@ -1045,18 +1184,20 @@ err_free_policy:
return NULL;
}
-static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
{
struct kobject *kobj;
struct completion *cmp;
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_REMOVE_POLICY, policy);
+ if (notify)
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_REMOVE_POLICY, policy);
- down_read(&policy->rwsem);
+ down_write(&policy->rwsem);
+ cpufreq_remove_dev_symlink(policy);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
- up_read(&policy->rwsem);
+ up_write(&policy->rwsem);
kobject_put(kobj);
/*
@@ -1069,68 +1210,64 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
pr_debug("wait complete\n");
}
-static void cpufreq_policy_free(struct cpufreq_policy *policy)
-{
- free_cpumask_var(policy->related_cpus);
- free_cpumask_var(policy->cpus);
- kfree(policy);
-}
-
-static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
- struct device *cpu_dev)
+static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
{
- int ret;
-
- if (WARN_ON(cpu == policy->cpu))
- return 0;
+ unsigned long flags;
+ int cpu;
- /* Move kobject to the new policy->cpu */
- ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
- if (ret) {
- pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
- return ret;
- }
+ /* Remove policy from list */
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_del(&policy->policy_list);
- down_write(&policy->rwsem);
- policy->cpu = cpu;
- up_write(&policy->rwsem);
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return 0;
+ cpufreq_policy_put_kobj(policy, notify);
+ free_cpumask_var(policy->related_cpus);
+ free_cpumask_var(policy->cpus);
+ kfree(policy);
}
-static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+/**
+ * cpufreq_add_dev - add a CPU device
+ *
+ * Adds the cpufreq interface for a CPU device.
+ *
+ * The Oracle says: try running cpufreq registration/unregistration concurrently
+ * with with cpu hotplugging and all hell will break loose. Tried to clean this
+ * mess up, but more thorough testing is needed. - Mathieu
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int j, cpu = dev->id;
int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
- bool recover_policy = cpufreq_suspended;
-
- if (cpu_is_offline(cpu))
- return 0;
+ bool recover_policy = !sif;
pr_debug("adding CPU %u\n", cpu);
- /* check whether a different CPU already registered this
- * CPU because it is in the same boat. */
- policy = cpufreq_cpu_get_raw(cpu);
- if (unlikely(policy))
- return 0;
+ /*
+ * Only possible if 'cpu' wasn't physically present earlier and we are
+ * here from subsys_interface add callback. A hotplug notifier will
+ * follow and we will handle it like logical CPU hotplug then. For now,
+ * just create the sysfs link.
+ */
+ if (cpu_is_offline(cpu))
+ return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
- /* Check if this cpu was hot-unplugged earlier and has siblings */
- read_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_policy(policy) {
- if (cpumask_test_cpu(cpu, policy->related_cpus)) {
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_add_policy_cpu(policy, cpu, dev);
- up_read(&cpufreq_rwsem);
- return ret;
- }
+ /* Check if this CPU already has a policy to manage it */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy && !policy_is_inactive(policy)) {
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ ret = cpufreq_add_policy_cpu(policy, cpu, dev);
+ up_read(&cpufreq_rwsem);
+ return ret;
}
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
/*
* Restore the saved policy when doing light-weight init and fall back
@@ -1139,22 +1276,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
if (!policy) {
recover_policy = false;
- policy = cpufreq_policy_alloc();
+ policy = cpufreq_policy_alloc(dev);
if (!policy)
goto nomem_out;
}
- /*
- * In the resume path, since we restore a saved policy, the assignment
- * to policy->cpu is like an update of the existing policy, rather than
- * the creation of a brand new one. So we need to perform this update
- * by invoking update_policy_cpu().
- */
- if (recover_policy && cpu != policy->cpu)
- WARN_ON(update_policy_cpu(policy, cpu, dev));
- else
- policy->cpu = cpu;
-
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* call driver. From then on the cpufreq must be able
@@ -1181,21 +1307,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
- /* prepare interface data */
- ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
- &dev->kobj, "cpufreq");
- if (ret) {
- pr_err("%s: failed to init policy->kobj: %d\n",
- __func__, ret);
- goto err_init_policy_kobj;
- }
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_cpu(j, policy->related_cpus)
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
}
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu(j, policy->cpus)
- per_cpu(cpufreq_cpu_data, j) = policy;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
@@ -1253,11 +1370,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto err_out_unregister;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
- }
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- list_add(&policy->policy_list, &cpufreq_policy_list);
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_add(&policy->policy_list, &cpufreq_policy_list);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ }
cpufreq_init_policy(policy);
@@ -1281,68 +1398,28 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
err_out_unregister:
err_get_freq:
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu(j, policy->cpus)
- per_cpu(cpufreq_cpu_data, j) = NULL;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- if (!recover_policy) {
- kobject_put(&policy->kobj);
- wait_for_completion(&policy->kobj_unregister);
- }
-err_init_policy_kobj:
up_write(&policy->rwsem);
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
- if (recover_policy) {
- /* Do not leave stale fallback data behind. */
- per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
- cpufreq_policy_put_kobj(policy);
- }
- cpufreq_policy_free(policy);
-
+ cpufreq_policy_free(policy, recover_policy);
nomem_out:
up_read(&cpufreq_rwsem);
return ret;
}
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
-{
- return __cpufreq_add_dev(dev, sif);
-}
-
static int __cpufreq_remove_dev_prepare(struct device *dev,
struct subsys_interface *sif)
{
- unsigned int cpu = dev->id, cpus;
- int ret;
- unsigned long flags;
+ unsigned int cpu = dev->id;
+ int ret = 0;
struct cpufreq_policy *policy;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
- write_lock_irqsave(&cpufreq_driver_lock, flags);
-
- policy = per_cpu(cpufreq_cpu_data, cpu);
-
- /* Save the policy somewhere when doing a light-weight tear-down */
- if (cpufreq_suspended)
- per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
-
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
+ policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL;
@@ -1354,108 +1431,75 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
pr_err("%s: Failed to stop governor\n", __func__);
return ret;
}
-
- strncpy(per_cpu(cpufreq_cpu_governor, cpu),
- policy->governor->name, CPUFREQ_NAME_LEN);
}
- down_read(&policy->rwsem);
- cpus = cpumask_weight(policy->cpus);
- up_read(&policy->rwsem);
+ down_write(&policy->rwsem);
+ cpumask_clear_cpu(cpu, policy->cpus);
- if (cpu != policy->cpu) {
- sysfs_remove_link(&dev->kobj, "cpufreq");
- } else if (cpus > 1) {
+ if (policy_is_inactive(policy)) {
+ if (has_target())
+ strncpy(policy->last_governor, policy->governor->name,
+ CPUFREQ_NAME_LEN);
+ } else if (cpu == policy->cpu) {
/* Nominate new CPU */
- int new_cpu = cpumask_any_but(policy->cpus, cpu);
- struct device *cpu_dev = get_cpu_device(new_cpu);
+ policy->cpu = cpumask_any(policy->cpus);
+ }
+ up_write(&policy->rwsem);
- sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
- ret = update_policy_cpu(policy, new_cpu, cpu_dev);
- if (ret) {
- if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
- "cpufreq"))
- pr_err("%s: Failed to restore kobj link to cpu:%d\n",
- __func__, cpu_dev->id);
- return ret;
- }
+ /* Start governor again for active policy */
+ if (!policy_is_inactive(policy)) {
+ if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
- if (!cpufreq_suspended)
- pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
- __func__, new_cpu, cpu);
+ if (ret)
+ pr_err("%s: Failed to start governor\n", __func__);
+ }
} else if (cpufreq_driver->stop_cpu) {
cpufreq_driver->stop_cpu(policy);
}
- return 0;
+ return ret;
}
static int __cpufreq_remove_dev_finish(struct device *dev,
struct subsys_interface *sif)
{
- unsigned int cpu = dev->id, cpus;
+ unsigned int cpu = dev->id;
int ret;
- unsigned long flags;
- struct cpufreq_policy *policy;
-
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = per_cpu(cpufreq_cpu_data, cpu);
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL;
}
- down_write(&policy->rwsem);
- cpus = cpumask_weight(policy->cpus);
-
- if (cpus > 1)
- cpumask_clear_cpu(cpu, policy->cpus);
- up_write(&policy->rwsem);
+ /* Only proceed for inactive policies */
+ if (!policy_is_inactive(po