diff options
Diffstat (limited to 'kernel')
33 files changed, 990 insertions, 567 deletions
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 24b5c2ab5598..feb59380c896 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -310,6 +310,9 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.utime += src_bstat->cputime.utime; dst_bstat->cputime.stime += src_bstat->cputime.stime; dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime; +#ifdef CONFIG_SCHED_CORE + dst_bstat->forceidle_sum += src_bstat->forceidle_sum; +#endif } static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, @@ -318,6 +321,9 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.utime -= src_bstat->cputime.utime; dst_bstat->cputime.stime -= src_bstat->cputime.stime; dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime; +#ifdef CONFIG_SCHED_CORE + dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; +#endif } static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) @@ -398,6 +404,11 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, case CPUTIME_SOFTIRQ: rstatc->bstat.cputime.stime += delta_exec; break; +#ifdef CONFIG_SCHED_CORE + case CPUTIME_FORCEIDLE: + rstatc->bstat.forceidle_sum += delta_exec; + break; +#endif default: break; } @@ -411,8 +422,9 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, * with how it is done by __cgroup_account_cputime_field for each bit of * cpu time attributed to a cgroup. */ -static void root_cgroup_cputime(struct task_cputime *cputime) +static void root_cgroup_cputime(struct cgroup_base_stat *bstat) { + struct task_cputime *cputime = &bstat->cputime; int i; cputime->stime = 0; @@ -438,6 +450,10 @@ static void root_cgroup_cputime(struct task_cputime *cputime) cputime->sum_exec_runtime += user; cputime->sum_exec_runtime += sys; cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL]; + +#ifdef CONFIG_SCHED_CORE + bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; +#endif } } @@ -445,27 +461,43 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) { struct cgroup *cgrp = seq_css(seq)->cgroup; u64 usage, utime, stime; - struct task_cputime cputime; + struct cgroup_base_stat bstat; +#ifdef CONFIG_SCHED_CORE + u64 forceidle_time; +#endif if (cgroup_parent(cgrp)) { cgroup_rstat_flush_hold(cgrp); usage = cgrp->bstat.cputime.sum_exec_runtime; cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime); +#ifdef CONFIG_SCHED_CORE + forceidle_time = cgrp->bstat.forceidle_sum; +#endif cgroup_rstat_flush_release(); } else { - root_cgroup_cputime(&cputime); - usage = cputime.sum_exec_runtime; - utime = cputime.utime; - stime = cputime.stime; + root_cgroup_cputime(&bstat); + usage = bstat.cputime.sum_exec_runtime; + utime = bstat.cputime.utime; + stime = bstat.cputime.stime; +#ifdef CONFIG_SCHED_CORE + forceidle_time = bstat.forceidle_sum; +#endif } do_div(usage, NSEC_PER_USEC); do_div(utime, NSEC_PER_USEC); do_div(stime, NSEC_PER_USEC); +#ifdef CONFIG_SCHED_CORE + do_div(forceidle_time, NSEC_PER_USEC); +#endif seq_printf(seq, "usage_usec %llu\n" "user_usec %llu\n" "system_usec %llu\n", usage, utime, stime); + +#ifdef CONFIG_SCHED_CORE + seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time); +#endif } diff --git a/kernel/configs/x86_debug.config b/kernel/configs/x86_debug.config index dcd86f32f4ed..6fac5b405334 100644 --- a/kernel/configs/x86_debug.config +++ b/kernel/configs/x86_debug.config @@ -7,12 +7,11 @@ CONFIG_DEBUG_SLAB=y CONFIG_DEBUG_KMEMLEAK=y CONFIG_DEBUG_PAGEALLOC=y CONFIG_SLUB_DEBUG_ON=y -CONFIG_KMEMCHECK=y CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 CONFIG_GCOV_KERNEL=y CONFIG_LOCKDEP=y CONFIG_PROVE_LOCKING=y CONFIG_SCHEDSTATS=y -CONFIG_VMLINUX_VALIDATION=y +CONFIG_NOINSTR_VALIDATION=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y diff --git a/kernel/events/core.c b/kernel/events/core.c index d2b354991bf5..c9d32d4d2e20 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1819,6 +1819,9 @@ static void __perf_event_read_size(struct perf_event *event, int nr_siblings) if (event->attr.read_format & PERF_FORMAT_ID) entry += sizeof(u64); + if (event->attr.read_format & PERF_FORMAT_LOST) + entry += sizeof(u64); + if (event->attr.read_format & PERF_FORMAT_GROUP) { nr += nr_siblings; size += sizeof(u64); @@ -5260,11 +5263,15 @@ static int __perf_read_group_add(struct perf_event *leader, values[n++] += perf_event_count(leader); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); + if (read_format & PERF_FORMAT_LOST) + values[n++] = atomic64_read(&leader->lost_samples); for_each_sibling_event(sub, leader) { values[n++] += perf_event_count(sub); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); + if (read_format & PERF_FORMAT_LOST) + values[n++] = atomic64_read(&sub->lost_samples); } raw_spin_unlock_irqrestore(&ctx->lock, flags); @@ -5321,7 +5328,7 @@ static int perf_read_one(struct perf_event *event, u64 read_format, char __user *buf) { u64 enabled, running; - u64 values[4]; + u64 values[5]; int n = 0; values[n++] = __perf_event_read_value(event, &enabled, &running); @@ -5331,6 +5338,8 @@ static int perf_read_one(struct perf_event *event, values[n++] = running; if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); + if (read_format & PERF_FORMAT_LOST) + values[n++] = atomic64_read(&event->lost_samples); if (copy_to_user(buf, values, n * sizeof(u64))) return -EFAULT; @@ -6858,7 +6867,7 @@ static void perf_output_read_one(struct perf_output_handle *handle, u64 enabled, u64 running) { u64 read_format = event->attr.read_format; - u64 values[4]; + u64 values[5]; int n = 0; values[n++] = perf_event_count(event); @@ -6872,6 +6881,8 @@ static void perf_output_read_one(struct perf_output_handle *handle, } if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); + if (read_format & PERF_FORMAT_LOST) + values[n++] = atomic64_read(&event->lost_samples); __output_copy(handle, values, n * sizeof(u64)); } @@ -6882,7 +6893,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, { struct perf_event *leader = event->group_leader, *sub; u64 read_format = event->attr.read_format; - u64 values[5]; + u64 values[6]; int n = 0; values[n++] = 1 + leader->nr_siblings; @@ -6900,6 +6911,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, values[n++] = perf_event_count(leader); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); + if (read_format & PERF_FORMAT_LOST) + values[n++] = atomic64_read(&leader->lost_samples); __output_copy(handle, values, n * sizeof(u64)); @@ -6913,6 +6926,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, values[n++] = perf_event_count(sub); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); + if (read_format & PERF_FORMAT_LOST) + values[n++] = atomic64_read(&sub->lost_samples); __output_copy(handle, values, n * sizeof(u64)); } diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index fb35b926024c..726132039c38 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -172,8 +172,10 @@ __perf_output_begin(struct perf_output_handle *handle, goto out; if (unlikely(rb->paused)) { - if (rb->nr_pages) + if (rb->nr_pages) { local_inc(&rb->lost); + atomic64_inc(&event->lost_samples); + } goto out; } @@ -254,6 +256,7 @@ __perf_output_begin(struct perf_output_handle *handle, fail: local_inc(&rb->lost); + atomic64_inc(&event->lost_samples); perf_output_put_handle(handle); out: rcu_read_unlock(); diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 10929eda9825..db3d174c53d4 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -24,6 +24,7 @@ config GENERIC_IRQ_SHOW_LEVEL # Supports effective affinity mask config GENERIC_IRQ_EFFECTIVE_AFF_MASK + depends on SMP bool # Support for delayed migration from interrupt context @@ -82,6 +83,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS # Generic IRQ IPI support config GENERIC_IRQ_IPI bool + depends on SMP select IRQ_DOMAIN_HIERARCHY # Generic MSI interrupt support diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 886789dcee43..8ac37e8e738a 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -188,7 +188,8 @@ enum { #ifdef CONFIG_SMP static int -__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) +__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, + bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); @@ -224,7 +225,8 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) } #else static __always_inline int -__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) +__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, + bool force) { return IRQ_STARTUP_NORMAL; } @@ -252,7 +254,7 @@ static int __irq_startup(struct irq_desc *desc) int irq_startup(struct irq_desc *desc, bool resend, bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); - struct cpumask *aff = irq_data_get_affinity_mask(d); + const struct cpumask *aff = irq_data_get_affinity_mask(d); int ret = 0; desc->depth = 0; @@ -1516,7 +1518,8 @@ int irq_chip_request_resources_parent(struct irq_data *data) if (data->chip->irq_request_resources) return data->chip->irq_request_resources(data); - return -ENOSYS; + /* no error on missing optional irq_chip::irq_request_resources */ + return 0; } EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index bc8e40cf2b65..bbcaac64038e 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -30,7 +30,7 @@ static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state, static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); - struct cpumask *msk; + const struct cpumask *msk; msk = irq_data_get_affinity_mask(data); seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk)); diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index f0862eb6b506..c653cd31548d 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c @@ -431,7 +431,7 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, return 0; } -static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq) +void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq) { struct irq_data *data = irq_domain_get_irq_data(d, virq); struct irq_domain_chip_generic *dgc = d->gc; diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c index 08ce7da3b57c..bbd945bacef0 100644 --- a/kernel/irq/ipi.c +++ b/kernel/irq/ipi.c @@ -115,11 +115,11 @@ free_descs: int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) { struct irq_data *data = irq_get_irq_data(irq); - struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; + const struct cpumask *ipimask; struct irq_domain *domain; unsigned int nr_irqs; - if (!irq || !data || !ipimask) + if (!irq || !data) return -EINVAL; domain = data->domain; @@ -131,7 +131,8 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) return -EINVAL; } - if (WARN_ON(!cpumask_subset(dest, ipimask))) + ipimask = irq_data_get_affinity_mask(data); + if (!ipimask || WARN_ON(!cpumask_subset(dest, ipimask))) /* * Must be destroying a subset of CPUs to which this IPI * was set up to target @@ -162,12 +163,13 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) { struct irq_data *data = irq_get_irq_data(irq); - struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; + const struct cpumask *ipimask; - if (!data || !ipimask || cpu >= nr_cpu_ids) + if (!data || cpu >= nr_cpu_ids) return INVALID_HWIRQ; - if (!cpumask_test_cpu(cpu, ipimask)) + ipimask = irq_data_get_affinity_mask(data); + if (!ipimask || !cpumask_test_cpu(cpu, ipimask)) return INVALID_HWIRQ; /* @@ -186,7 +188,7 @@ EXPORT_SYMBOL_GPL(ipi_get_hwirq); static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data, const struct cpumask *dest, unsigned int cpu) { - struct cpumask *ipimask = irq_data_get_affinity_mask(data); + const struct cpumask *ipimask = irq_data_get_affinity_mask(data); if (!chip || !ipimask) return -EINVAL; diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index d323b180b0f3..5db0230aa6b5 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -251,7 +251,7 @@ static ssize_t actions_show(struct kobject *kobj, char *p = ""; raw_spin_lock_irq(&desc->lock); - for (action = desc->action; action != NULL; action = action->next) { + for_each_action_of_desc(desc, action) { ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", p, action->name); p = ","; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index d5ce96510549..8fe1da9614ee 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -147,7 +147,8 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int s static atomic_t unknown_domains; if (WARN_ON((size && direct_max) || - (!IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && direct_max))) + (!IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && direct_max) || + (direct_max && (direct_max != hwirq_max)))) return NULL; domain = kzalloc_node(struct_size(domain, revmap, size), @@ -219,7 +220,6 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int s domain->hwirq_max = hwirq_max; if (direct_max) { - size = direct_max; domain->flags |= IRQ_DOMAIN_FLAG_NO_MAP; } @@ -650,9 +650,9 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) pr_debug("create_direct virq allocation failed\n"); return 0; } - if (virq >= domain->revmap_size) { - pr_err("ERROR: no free irqs available below %i maximum\n", - domain->revmap_size); + if (virq >= domain->hwirq_max) { + pr_err("ERROR: no free irqs available below %lu maximum\n", + domain->hwirq_max); irq_free_desc(virq); return 0; } @@ -906,10 +906,12 @@ struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, return desc; if (irq_domain_is_nomap(domain)) { - if (hwirq < domain->revmap_size) { + if (hwirq < domain->hwirq_max) { data = irq_domain_get_irq_data(domain, hwirq); if (data && data->hwirq == hwirq) desc = irq_data_to_desc(data); + if (irq && desc) + *irq = hwirq; } return desc; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 8c396319d5ac..40fe7806cc8c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -205,16 +205,8 @@ static void irq_validate_effective_affinity(struct irq_data *data) pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", chip->name, data->irq); } - -static inline void irq_init_effective_affinity(struct irq_data *data, - const struct cpumask *mask) -{ - cpumask_copy(irq_data_get_effective_affinity_mask(data), mask); -} #else static inline void irq_validate_effective_affinity(struct irq_data *data) { } -static inline void irq_init_effective_affinity(struct irq_data *data, - const struct cpumask *mask) { } #endif int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, @@ -347,7 +339,7 @@ static bool irq_set_affinity_deactivated(struct irq_data *data, return false; cpumask_copy(desc->irq_common_data.affinity, mask); - irq_init_effective_affinity(data, mask); + irq_data_update_effective_affinity(data, mask); irqd_set(data, IRQD_AFFINITY_SET); return true; } diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index ca71123a6130..c556bc49d213 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -147,7 +147,6 @@ void suspend_device_irqs(void) synchronize_irq(irq); } } -EXPORT_SYMBOL_GPL(suspend_device_irqs); static void resume_irq(struct irq_desc *desc) { @@ -259,4 +258,3 @@ void resume_device_irqs(void) { resume_irqs(false); } -EXPORT_SYMBOL_GPL(resume_device_irqs); diff --git a/kernel/jump_label.c b/kernel/jump_label.c index b156e152d6b4..714ac4c3b556 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -332,17 +332,13 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start, return 0; } -/* - * Update code which is definitely not currently executing. - * Architectures which need heavyweight synchronization to modify - * running code can override this to make the non-live update case - * cheaper. - */ -void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, - enum jump_label_type type) +#ifndef arch_jump_label_transform_static +static void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) { - arch_jump_label_transform(entry, type); + /* nothing to do on most architectures */ } +#endif static inline struct jump_entry *static_key_entries(struct static_key *key) { @@ -508,7 +504,7 @@ void __init jump_label_init(void) #ifdef CONFIG_MODULES -static enum jump_label_type jump_label_init_type(struct jump_entry *entry) +enum jump_label_type jump_label_init_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); bool type = static_key_type(key); @@ -596,31 +592,6 @@ static void __jump_label_mod_update(struct static_key *key) } } -/*** - * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() - * @mod: module to patch - * - * Allow for run-time selection of the optimal nops. Before the module - * loads patch these with arch_get_jump_label_nop(), which is specified by - * the arch specific jump label code. - */ -void jump_label_apply_nops(struct module *mod) -{ - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - - /* if the module doesn't have jump label entries, just return */ - if (iter_start == iter_stop) - return; - - for (iter = iter_start; iter < iter_stop; iter++) { - /* Only write NOPs for arch_branch_static(). */ - if (jump_label_init_type(iter) == JUMP_LABEL_NOP) - arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); - } -} - static int jump_label_add_module(struct module *mod) { struct jump_entry *iter_start = mod->jump_entries; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index f06b91ca6482..e2f179491b08 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -5238,9 +5238,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name, return 0; } - lockdep_init_map_waits(lock, name, key, 0, - lock->wait_type_inner, - lock->wait_type_outer); + lockdep_init_map_type(lock, name, key, 0, + lock->wait_type_inner, + lock->wait_type_outer, + lock->lock_type); class = register_lock_class(lock, subclass, 0); hlock->class_idx = class - lock_classes; diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 9d1db4a54d34..65f0262f635e 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -335,8 +335,6 @@ struct rwsem_waiter { struct task_struct *task; enum rwsem_waiter_type type; unsigned long timeout; - - /* Writer only, not initialized in reader */ bool handoff_set; }; #define rwsem_first_waiter(sem) \ @@ -459,10 +457,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, * to give up the lock), request a HANDOFF to * force the issue. */ - if (!(oldcount & RWSEM_FLAG_HANDOFF) && - time_after(jiffies, waiter->timeout)) { - adjustment -= RWSEM_FLAG_HANDOFF; - lockevent_inc(rwsem_rlock_handoff); + if (time_after(jiffies, waiter->timeout)) { + if (!(oldcount & RWSEM_FLAG_HANDOFF)) { + adjustment -= RWSEM_FLAG_HANDOFF; + lockevent_inc(rwsem_rlock_handoff); + } + waiter->handoff_set = true; } atomic_long_add(-adjustment, &sem->count); @@ -599,7 +599,7 @@ rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter, static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, struct rwsem_waiter *waiter) { - bool first = rwsem_first_waiter(sem) == waiter; + struct rwsem_waiter *first = rwsem_first_waiter(sem); long count, new; lockdep_assert_held(&sem->wait_lock); @@ -609,11 +609,20 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); if (has_handoff) { - if (!first) + /* + * Honor handoff bit and yield only when the first + * waiter is the one that set it. Otherwisee, we + * still try to acquire the rwsem. + */ + if (first->handoff_set && (waiter != first)) return false; - /* First waiter inherits a previously set handoff bit */ - waiter->handoff_set = true; + /* + * First waiter can inherit a previously set handoff + * bit and spin on rwsem if lock acquisition fails. + */ + if (waiter == first) + waiter->handoff_set = true; } new = count; @@ -1027,6 +1036,7 @@ queue: waiter.task = current; waiter.type = RWSEM_WAITING_FOR_READ; waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; + waiter.handoff_set = false; raw_spin_lock_irq(&sem->wait_lock); if (list_empty(&sem->wait_list)) { diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 6c373f2960e7..f82111837b8d 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -145,7 +145,7 @@ static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, /* * The power returned by active_state() is expected to be - * positive and to fit into 16 bits. + * positive and be in range. */ if (!power || power > EM_MAX_POWER) { dev_err(dev, "EM: invalid power: %lu\n", @@ -170,7 +170,7 @@ static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, goto free_ps_table; } } else { - power_res = em_scale_power(table[i].power); + power_res = table[i].power; cost = div64_u64(fmax * power_res, table[i].frequency); } @@ -201,9 +201,17 @@ static int em_create_pd(struct device *dev, int nr_states, { struct em_perf_domain *pd; struct device *cpu_dev; - int cpu, ret; + int cpu, ret, num_cpus; if (_is_cpu_device(dev)) { + num_cpus = cpumask_weight(cpus); + + /* Prevent max possible energy calculation to not overflow */ + if (num_cpus > EM_MAX_NUM_CPUS) { + dev_err(dev, "EM: too many CPUs, overflow possible\n"); + return -EINVAL; + } + pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); if (!pd) return -ENOMEM; @@ -314,13 +322,13 @@ EXPORT_SYMBOL_GPL(em_cpu_get); * @cpus : Pointer to cpumask_t, which in case of a CPU device is * obligatory. It can be taken from i.e. 'policy->cpus'. For other * type of devices this should be set to NULL. - * @milliwatts : Flag indicating that the power values are in milliWatts or + * @microwatts : Flag indicating that the power values are in micro-Watts or * in some other scale. It must be set properly. * * Create Energy Model tables for a performance domain using the callbacks * defined in cb. * - * The @milliwatts is important to set with correct value. Some kernel + * The @microwatts is important to set with correct value. Some kernel * sub-systems might rely on this flag and check if all devices in the EM are * using the same scale. * @@ -331,7 +339,7 @@ EXPORT_SYMBOL_GPL(em_cpu_get); */ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *cpus, - bool milliwatts) + bool microwatts) { unsigned long cap, prev_cap = 0; unsigned long flags = 0; @@ -381,8 +389,8 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, } } - if (milliwatts) - flags |= EM_PERF_DOMAIN_MILLIWATTS; + if (microwatts) + flags |= EM_PERF_DOMAIN_MICROWATTS; else if (cb->get_cost) flags |= EM_PERF_DOMAIN_ARTIFICIAL; diff --git a/kernel/power/qos.c b/kernel/power/qos.c index ec7e1e85923e..af51ed6d45ef 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -531,7 +531,7 @@ int freq_qos_add_request(struct freq_constraints *qos, { int ret; - if (IS_ERR_OR_NULL(qos) || !req) + if (IS_ERR_OR_NULL(qos) || !req || value < 0) return -EINVAL; if (WARN(freq_qos_request_active(req), @@ -563,7 +563,7 @@ EXPORT_SYMBOL_GPL(freq_qos_add_request); */ < |
