summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c23
-rw-r--r--kernel/events/core.c224
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c13
-rw-r--r--kernel/irq/Kconfig4
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c28
-rw-r--r--kernel/irq/cpuhotplug.c82
-rw-r--r--kernel/irq/handle.c7
-rw-r--r--kernel/irq/internals.h4
-rw-r--r--kernel/irq/irqdomain.c177
-rw-r--r--kernel/irq/manage.c221
-rw-r--r--kernel/irq/msi.c8
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/irq/settings.h12
-rw-r--r--kernel/kexec_core.c6
-rw-r--r--kernel/locking/locktorture.c164
-rw-r--r--kernel/locking/mcs_spinlock.h4
-rw-r--r--kernel/locking/mutex.c9
-rw-r--r--kernel/locking/osq_lock.c11
-rw-r--r--kernel/locking/percpu-rwsem.c90
-rw-r--r--kernel/locking/qrwlock.c8
-rw-r--r--kernel/locking/qspinlock_paravirt.h6
-rw-r--r--kernel/locking/rtmutex.c33
-rw-r--r--kernel/locking/rwsem-xadd.c5
-rw-r--r--kernel/memremap.c14
-rw-r--r--kernel/rcu/Makefile2
-rw-r--r--kernel/rcu/rcutorture.c16
-rw-r--r--kernel/rcu/srcu.c4
-rw-r--r--kernel/rcu/sync.c223
-rw-r--r--kernel/rcu/tiny.c8
-rw-r--r--kernel/rcu/tree.c512
-rw-r--r--kernel/rcu/tree.h69
-rw-r--r--kernel/rcu/tree_plugin.h437
-rw-r--r--kernel/rcu/tree_trace.c10
-rw-r--r--kernel/rcu/update.c2
-rw-r--r--kernel/sched/core.c204
-rw-r--r--kernel/sched/cpudeadline.c5
-rw-r--r--kernel/sched/cpudeadline.h1
-rw-r--r--kernel/sched/fair.c419
-rw-r--r--kernel/sched/features.h21
-rw-r--r--kernel/sched/rt.c22
-rw-r--r--kernel/sched/sched.h55
-rw-r--r--kernel/smpboot.c5
-rw-r--r--kernel/stop_machine.c90
-rw-r--r--kernel/time/clocksource.c7
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/ntp.c16
-rw-r--r--kernel/time/ntp_internal.h2
-rw-r--r--kernel/time/posix-cpu-timers.c63
-rw-r--r--kernel/time/timeconst.bc2
-rw-r--r--kernel/time/timekeeping.c18
-rw-r--r--kernel/time/timer.c13
-rw-r--r--kernel/torture.c1
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace_sched_switch.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
59 files changed, 2304 insertions, 1098 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 82cf9dff4295..85ff5e26e23b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -102,19 +102,6 @@ void get_online_cpus(void)
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-bool try_get_online_cpus(void)
-{
- if (cpu_hotplug.active_writer == current)
- return true;
- if (!mutex_trylock(&cpu_hotplug.lock))
- return false;
- cpuhp_lock_acquire_tryread();
- atomic_inc(&cpu_hotplug.refcount);
- mutex_unlock(&cpu_hotplug.lock);
- return true;
-}
-EXPORT_SYMBOL_GPL(try_get_online_cpus);
-
void put_online_cpus(void)
{
int refcount;
@@ -304,8 +291,8 @@ static inline void check_for_tasks(int dead_cpu)
{
struct task_struct *g, *p;
- read_lock_irq(&tasklist_lock);
- do_each_thread(g, p) {
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, p) {
if (!p->on_rq)
continue;
/*
@@ -320,8 +307,8 @@ static inline void check_for_tasks(int dead_cpu)
pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
- } while_each_thread(g, p);
- read_unlock_irq(&tasklist_lock);
+ }
+ read_unlock(&tasklist_lock);
}
struct take_cpu_down_param {
@@ -344,7 +331,7 @@ static int take_cpu_down(void *_param)
/* Give up timekeeping duties */
tick_handover_do_timer();
/* Park the stopper thread */
- kthread_park(current);
+ stop_machine_park((long)param->hcpu);
return 0;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 64754bfecd70..39db20c6248e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -196,7 +196,7 @@ static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
static int perf_sample_allowed_ns __read_mostly =
DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
-void update_perf_cpu_limits(void)
+static void update_perf_cpu_limits(void)
{
u64 tmp = perf_sample_period_ns;
@@ -472,7 +472,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
* mode SWOUT : schedule out everything
* mode SWIN : schedule in based on cgroup for next
*/
-void perf_cgroup_switch(struct task_struct *task, int mode)
+static void perf_cgroup_switch(struct task_struct *task, int mode)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
@@ -1939,7 +1939,7 @@ group_sched_in(struct perf_event *group_event,
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
- pmu->start_txn(pmu);
+ pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
if (event_sched_in(group_event, cpuctx, ctx)) {
pmu->cancel_txn(pmu);
@@ -3209,14 +3209,22 @@ void perf_event_exec(void)
rcu_read_unlock();
}
+struct perf_read_data {
+ struct perf_event *event;
+ bool group;
+ int ret;
+};
+
/*
* Cross CPU call to read the hardware event
*/
static void __perf_event_read(void *info)
{
- struct perf_event *event = info;
+ struct perf_read_data *data = info;
+ struct perf_event *sub, *event = data->event;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ struct pmu *pmu = event->pmu;
/*
* If this is a task context, we need to check whether it is
@@ -3233,9 +3241,35 @@ static void __perf_event_read(void *info)
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
+
update_event_times(event);
- if (event->state == PERF_EVENT_STATE_ACTIVE)
- event->pmu->read(event);
+ if (event->state != PERF_EVENT_STATE_ACTIVE)
+ goto unlock;
+
+ if (!data->group) {
+ pmu->read(event);
+ data->ret = 0;
+ goto unlock;
+ }
+
+ pmu->start_txn(pmu, PERF_PMU_TXN_READ);
+
+ pmu->read(event);
+
+ list_for_each_entry(sub, &event->sibling_list, group_entry) {
+ update_event_times(sub);
+ if (sub->state == PERF_EVENT_STATE_ACTIVE) {
+ /*
+ * Use sibling's PMU rather than @event's since
+ * sibling could be on different (eg: software) PMU.
+ */
+ sub->pmu->read(sub);
+ }
+ }
+
+ data->ret = pmu->commit_txn(pmu);
+
+unlock:
raw_spin_unlock(&ctx->lock);
}
@@ -3300,15 +3334,23 @@ u64 perf_event_read_local(struct perf_event *event)
return val;
}
-static u64 perf_event_read(struct perf_event *event)
+static int perf_event_read(struct perf_event *event, bool group)
{
+ int ret = 0;
+
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
+ struct perf_read_data data = {
+ .event = event,
+ .group = group,
+ .ret = 0,
+ };
smp_call_function_single(event->oncpu,
- __perf_event_read, event, 1);
+ __perf_event_read, &data, 1);
+ ret = data.ret;
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
@@ -3323,11 +3365,14 @@ static u64 perf_event_read(struct perf_event *event)
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
- update_event_times(event);
+ if (group)
+ update_group_times(event);
+ else
+ update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
- return perf_event_count(event);
+ return ret;
}
/*
@@ -3769,7 +3814,7 @@ static void put_event(struct perf_event *event)
* see the comment there.
*
* 2) there is a lock-inversion with mmap_sem through
- * perf_event_read_group(), which takes faults while
+ * perf_read_group(), which takes faults while
* holding ctx->mutex, however this is called after
* the last filedesc died, so there is no possibility
* to trigger the AB-BA case.
@@ -3843,14 +3888,18 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
*running = 0;
mutex_lock(&event->child_mutex);
- total += perf_event_read(event);
+
+ (void)perf_event_read(event, false);
+ total += perf_event_count(event);
+
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
*running += event->total_time_running +
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
- total += perf_event_read(child);
+ (void)perf_event_read(child, false);
+ total += perf_event_count(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
@@ -3860,55 +3909,95 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
}
EXPORT_SYMBOL_GPL(perf_event_read_value);
-static int perf_event_read_group(struct perf_event *event,
- u64 read_format, char __user *buf)
+static int __perf_read_group_add(struct perf_event *leader,
+ u64 read_format, u64 *values)
{
- struct perf_event *leader = event->group_leader, *sub;
- struct perf_event_context *ctx = leader->ctx;
- int n = 0, size = 0, ret;
- u64 count, enabled, running;
- u64 values[5];
+ struct perf_event *sub;
+ int n = 1; /* skip @nr */
+ int ret;
- lockdep_assert_held(&ctx->mutex);
+ ret = perf_event_read(leader, true);
+ if (ret)
+ return ret;
- count = perf_event_read_value(leader, &enabled, &running);
+ /*
+ * Since we co-schedule groups, {enabled,running} times of siblings
+ * will be identical to those of the leader, so we only publish one
+ * set.
+ */
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ values[n++] += leader->total_time_enabled +
+ atomic64_read(&leader->child_total_time_enabled);
+ }
- values[n++] = 1 + leader->nr_siblings;
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
- values[n++] = enabled;
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
- values[n++] = running;
- values[n++] = count;
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ values[n++] += leader->total_time_running +
+ atomic64_read(&leader->child_total_time_running);
+ }
+
+ /*
+ * Write {count,id} tuples for every sibling.
+ */
+ values[n++] += perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
- size = n * sizeof(u64);
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+ values[n++] += perf_event_count(sub);
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(sub);
+ }
- if (copy_to_user(buf, values, size))
- return -EFAULT;
+ return 0;
+}
+
+static int perf_read_group(struct perf_event *event,
+ u64 read_format, char __user *buf)
+{
+ struct perf_event *leader = event->group_leader, *child;
+ struct perf_event_context *ctx = leader->ctx;
+ int ret;
+ u64 *values;
- ret = size;
+ lockdep_assert_held(&ctx->mutex);
- list_for_each_entry(sub, &leader->sibling_list, group_entry) {
- n = 0;
+ values = kzalloc(event->read_size, GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
- values[n++] = perf_event_read_value(sub, &enabled, &running);
- if (read_format & PERF_FORMAT_ID)
- values[n++] = primary_event_id(sub);
+ values[0] = 1 + leader->nr_siblings;
- size = n * sizeof(u64);
+ /*
+ * By locking the child_mutex of the leader we effectively
+ * lock the child list of all siblings.. XXX explain how.
+ */
+ mutex_lock(&leader->child_mutex);
- if (copy_to_user(buf + ret, values, size)) {
- return -EFAULT;
- }
+ ret = __perf_read_group_add(leader, read_format, values);
+ if (ret)
+ goto unlock;
- ret += size;
+ list_for_each_entry(child, &leader->child_list, child_list) {
+ ret = __perf_read_group_add(child, read_format, values);
+ if (ret)
+ goto unlock;
}
+ mutex_unlock(&leader->child_mutex);
+
+ ret = event->read_size;
+ if (copy_to_user(buf, values, event->read_size))
+ ret = -EFAULT;
+ goto out;
+
+unlock:
+ mutex_unlock(&leader->child_mutex);
+out:
+ kfree(values);
return ret;
}
-static int perf_event_read_one(struct perf_event *event,
+static int perf_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
u64 enabled, running;
@@ -3946,7 +4035,7 @@ static bool is_event_hup(struct perf_event *event)
* Read the performance event - simple non blocking version for now
*/
static ssize_t
-perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
+__perf_read(struct perf_event *event, char __user *buf, size_t count)
{
u64 read_format = event->attr.read_format;
int ret;
@@ -3964,9 +4053,9 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
WARN_ON_ONCE(event->ctx->parent_ctx);
if (read_format & PERF_FORMAT_GROUP)
- ret = perf_event_read_group(event, read_format, buf);
+ ret = perf_read_group(event, read_format, buf);
else
- ret = perf_event_read_one(event, read_format, buf);
+ ret = perf_read_one(event, read_format, buf);
return ret;
}
@@ -3979,7 +4068,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
int ret;
ctx = perf_event_ctx_lock(event);
- ret = perf_read_hw(event, buf, count);
+ ret = __perf_read(event, buf, count);
perf_event_ctx_unlock(event, ctx);
return ret;
@@ -4010,7 +4099,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
static void _perf_event_reset(struct perf_event *event)
{
- (void)perf_event_read(event);
+ (void)perf_event_read(event, false);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}
@@ -7297,24 +7386,49 @@ static void perf_pmu_nop_void(struct pmu *pmu)
{
}
+static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
+{
+}
+
static int perf_pmu_nop_int(struct pmu *pmu)
{
return 0;
}
-static void perf_pmu_start_txn(struct pmu *pmu)
+static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
+
+static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
{
+ __this_cpu_write(nop_txn_flags, flags);
+
+ if (flags & ~PERF_PMU_TXN_ADD)
+ return;
+
perf_pmu_disable(pmu);
}
static int perf_pmu_commit_txn(struct pmu *pmu)
{
+ unsigned int flags = __this_cpu_read(nop_txn_flags);
+
+ __this_cpu_write(nop_txn_flags, 0);
+
+ if (flags & ~PERF_PMU_TXN_ADD)
+ return 0;
+
perf_pmu_enable(pmu);
return 0;
}
static void perf_pmu_cancel_txn(struct pmu *pmu)
{
+ unsigned int flags = __this_cpu_read(nop_txn_flags);
+
+ __this_cpu_write(nop_txn_flags, 0);
+
+ if (flags & ~PERF_PMU_TXN_ADD)
+ return;
+
perf_pmu_enable(pmu);
}
@@ -7553,7 +7667,7 @@ got_cpu_context:
pmu->commit_txn = perf_pmu_commit_txn;
pmu->cancel_txn = perf_pmu_cancel_txn;
} else {
- pmu->start_txn = perf_pmu_nop_void;
+ pmu->start_txn = perf_pmu_nop_txn;
pmu->commit_txn = perf_pmu_nop_int;
pmu->cancel_txn = perf_pmu_nop_void;
}
@@ -7641,7 +7755,7 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
return ret;
}
-struct pmu *perf_init_event(struct perf_event *event)
+static struct pmu *perf_init_event(struct perf_event *event)
{
struct pmu *pmu = NULL;
int idx;
@@ -9350,14 +9464,6 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css,
struct cgroup_subsys_state *old_css,
struct task_struct *task)
{
- /*
- * cgroup_exit() is called in the copy_process() failure path.
- * Ignore this case since the task hasn't ran yet, this avoids
- * trying to poke a half freed task state from generic code.
- */
- if (!(task->flags & PF_EXITING))
- return;
-
task_function_call(task, __perf_cgroup_move, task);
}
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 182bc30899d5..b5d1ea79c595 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -141,7 +141,7 @@ int perf_output_begin(struct perf_output_handle *handle,
perf_output_get_handle(handle);
do {
- tail = READ_ONCE_CTRL(rb->user_page->data_tail);
+ tail = READ_ONCE(rb->user_page->data_tail);
offset = head = local_read(&rb->head);
if (!rb->overwrite &&
unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
diff --git a/kernel/exit.c b/kernel/exit.c
index ea95ee1b5ef7..07110c6020a0 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -706,10 +706,12 @@ void do_exit(long code)
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
- if (unlikely(in_atomic()))
+ if (unlikely(in_atomic())) {
pr_info("note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
preempt_count());
+ preempt_count_set(PREEMPT_ENABLED);
+ }
/* sync mm's RSS info before statistics gathering */
if (tsk->mm)
@@ -761,7 +763,9 @@ void do_exit(long code)
*/
flush_ptrace_hw_breakpoint(tsk);
+ TASKS_RCU(preempt_disable());
TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
+ TASKS_RCU(preempt_enable());
exit_notify(tsk, group_dead);
proc_exit_connector(tsk);
#ifdef CONFIG_NUMA
diff --git a/kernel/fork.c b/kernel/fork.c
index 2845623fb582..6ac894244d39 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1101,7 +1101,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
- sig->cputimer.running = 1;
+ sig->cputimer.running = true;
}
/* The timer lists. */
diff --git a/kernel/futex.c b/kernel/futex.c
index 6e443efc65f4..dfc86e93c31d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -255,9 +255,18 @@ struct futex_hash_bucket {
struct plist_head chain;
} ____cacheline_aligned_in_smp;
-static unsigned long __read_mostly futex_hashsize;
+/*
+ * The base of the bucket array and its size are always used together
+ * (after initialization only in hash_futex()), so ensure that they
+ * reside in the same cacheline.
+ */
+static struct {
+ struct futex_hash_bucket *queues;
+ unsigned long hashsize;
+} __futex_data __read_mostly __aligned(2*sizeof(long));
+#define futex_queues (__futex_data.queues)
+#define futex_hashsize (__futex_data.hashsize)
-static struct futex_hash_bucket *futex_queues;
/*
* Fault injections for futexes.
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 9a76e3beda54..3b48dab80164 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -30,6 +30,10 @@ config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
config GENERIC_PENDING_IRQ
bool
+# Support for generic irq migrating off cpu before the cpu is offline.
+config GENERIC_IRQ_MIGRATION
+ bool
+
# Alpha specific irq affinity mechanism
config AUTO_IRQ_AFFINITY
bool
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index d12123526e2b..2fc9cbdf35b6 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
+obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
obj-$(CONFIG_PM_SLEEP) += pm.o
obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index e28169dd1c36..15206453b12a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -21,6 +21,20 @@
#include "internals.h"
+static irqreturn_t bad_chained_irq(int irq, void *dev_id)
+{
+ WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
+ return IRQ_NONE;
+}
+
+/*
+ * Chained handlers should never call action on their IRQ. This default
+ * action will emit warning if such thing happens.
+ */
+struct irqaction chained_action = {
+ .handler = bad_chained_irq,
+};
+
/**
* irq_set_chip - set the irq chip for an irq
* @irq: irq number
@@ -227,6 +241,13 @@ void irq_enable(struct irq_desc *desc)
* disabled. If an interrupt happens, then the interrupt flow
* handler masks the line at the hardware level and marks it
* pending.
+ *
+ * If the interrupt chip does not implement the irq_disable callback,
+