summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig5
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/alarmtimer.c96
-rw-r--r--kernel/time/clockevents.c42
-rw-r--r--kernel/time/clocksource.c40
-rw-r--r--kernel/time/hrtimer.c230
-rw-r--r--kernel/time/itimer.c22
-rw-r--r--kernel/time/ntp.c840
-rw-r--r--kernel/time/posix-cpu-timers.c72
-rw-r--r--kernel/time/posix-timers.c267
-rw-r--r--kernel/time/posix-timers.h8
-rw-r--r--kernel/time/sleep_timeout.c377
-rw-r--r--kernel/time/tick-internal.h3
-rw-r--r--kernel/time/tick-sched.c25
-rw-r--r--kernel/time/time.c20
-rw-r--r--kernel/time/timekeeping.c532
-rw-r--r--kernel/time/timekeeping_internal.h10
-rw-r--r--kernel/time/timer.c195
-rw-r--r--kernel/time/vsyscall.c5
19 files changed, 1354 insertions, 1437 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 8ebb6d5a106b..b0b97a60aaa6 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -17,11 +17,6 @@ config ARCH_CLOCKSOURCE_DATA
config ARCH_CLOCKSOURCE_INIT
bool
-# Clocksources require validation of the clocksource against the last
-# cycle update - x86/TSC misfeature
-config CLOCKSOURCE_VALIDATE_LAST_CYCLE
- bool
-
# Timekeeping vsyscall support
config GENERIC_TIME_VSYSCALL
bool
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 4af2a264a160..fe0ae82124fe 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += time.o timer.o hrtimer.o
+obj-y += time.o timer.o hrtimer.o sleep_timeout.o
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
obj-y += timeconv.o timecounter.o alarmtimer.o
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 8bf888641694..0ddccdff119a 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -197,28 +197,15 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
{
struct alarm *alarm = container_of(timer, struct alarm, timer);
struct alarm_base *base = &alarm_bases[alarm->type];
- unsigned long flags;
- int ret = HRTIMER_NORESTART;
- int restart = ALARMTIMER_NORESTART;
- spin_lock_irqsave(&base->lock, flags);
- alarmtimer_dequeue(base, alarm);
- spin_unlock_irqrestore(&base->lock, flags);
+ scoped_guard (spinlock_irqsave, &base->lock)
+ alarmtimer_dequeue(base, alarm);
if (alarm->function)
- restart = alarm->function(alarm, base->get_ktime());
-
- spin_lock_irqsave(&base->lock, flags);
- if (restart != ALARMTIMER_NORESTART) {
- hrtimer_set_expires(&alarm->timer, alarm->node.expires);
- alarmtimer_enqueue(base, alarm);
- ret = HRTIMER_RESTART;
- }
- spin_unlock_irqrestore(&base->lock, flags);
+ alarm->function(alarm, base->get_ktime());
trace_alarmtimer_fired(alarm, base->get_ktime());
- return ret;
-
+ return HRTIMER_NORESTART;
}
ktime_t alarm_expires_remaining(const struct alarm *alarm)
@@ -334,10 +321,9 @@ static int alarmtimer_resume(struct device *dev)
static void
__alarm_init(struct alarm *alarm, enum alarmtimer_type type,
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
+ void (*function)(struct alarm *, ktime_t))
{
timerqueue_init(&alarm->node);
- alarm->timer.function = alarmtimer_fired;
alarm->function = function;
alarm->type = type;
alarm->state = ALARMTIMER_STATE_INACTIVE;
@@ -350,10 +336,10 @@ __alarm_init(struct alarm *alarm, enum alarmtimer_type type,
* @function: callback that is run when the alarm fires
*/
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
+ void (*function)(struct alarm *, ktime_t))
{
- hrtimer_init(&alarm->timer, alarm_bases[type].base_clockid,
- HRTIMER_MODE_ABS);
+ hrtimer_setup(&alarm->timer, alarmtimer_fired, alarm_bases[type].base_clockid,
+ HRTIMER_MODE_ABS);
__alarm_init(alarm, type, function);
}
EXPORT_SYMBOL_GPL(alarm_init);
@@ -480,35 +466,11 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(alarm_forward);
-static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle)
+u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
{
struct alarm_base *base = &alarm_bases[alarm->type];
- ktime_t now = base->get_ktime();
-
- if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) {
- /*
- * Same issue as with posix_timer_fn(). Timers which are
- * periodic but the signal is ignored can starve the system
- * with a very small interval. The real fix which was
- * promised in the context of posix_timer_fn() never
- * materialized, but someone should really work on it.
- *
- * To prevent DOS fake @now to be 1 jiffy out which keeps
- * the overrun accounting correct but creates an
- * inconsistency vs. timer_gettime(2).
- */
- ktime_t kj = NSEC_PER_SEC / HZ;
- if (interval < kj)
- now = ktime_add(now, kj);
- }
-
- return alarm_forward(alarm, now, interval);
-}
-
-u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
-{
- return __alarm_forward_now(alarm, interval, false);
+ return alarm_forward(alarm, base->get_ktime(), interval);
}
EXPORT_SYMBOL_GPL(alarm_forward_now);
@@ -567,30 +529,12 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
*
* Return: whether the timer is to be restarted
*/
-static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
- ktime_t now)
+static void alarm_handle_timer(struct alarm *alarm, ktime_t now)
{
- struct k_itimer *ptr = container_of(alarm, struct k_itimer,
- it.alarm.alarmtimer);
- enum alarmtimer_restart result = ALARMTIMER_NORESTART;
- unsigned long flags;
+ struct k_itimer *ptr = container_of(alarm, struct k_itimer, it.alarm.alarmtimer);
- spin_lock_irqsave(&ptr->it_lock, flags);
-
- if (posix_timer_queue_signal(ptr) && ptr->it_interval) {
- /*
- * Handle ignored signals and rearm the timer. This will go
- * away once we handle ignored signals proper. Ensure that
- * small intervals cannot starve the system.
- */
- ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true);
- ++ptr->it_requeue_pending;
- ptr->it_active = 1;
- result = ALARMTIMER_RESTART;
- }
- spin_unlock_irqrestore(&ptr->it_lock, flags);
-
- return result;
+ guard(spinlock_irqsave)(&ptr->it_lock);
+ posix_timer_queue_signal(ptr);
}
/**
@@ -751,18 +695,14 @@ static int alarm_timer_create(struct k_itimer *new_timer)
* @now: time at the timer expiration
*
* Wakes up the task that set the alarmtimer
- *
- * Return: ALARMTIMER_NORESTART
*/
-static enum alarmtimer_restart alarmtimer_nsleep_wakeup(struct alarm *alarm,
- ktime_t now)
+static void alarmtimer_nsleep_wakeup(struct alarm *alarm, ktime_t now)
{
struct task_struct *task = alarm->data;
alarm->data = NULL;
if (task)
wake_up_process(task);
- return ALARMTIMER_NORESTART;
}
/**
@@ -814,10 +754,10 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
static void
alarm_init_on_stack(struct alarm *alarm, enum alarmtimer_type type,
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
+ void (*function)(struct alarm *, ktime_t))
{
- hrtimer_init_on_stack(&alarm->timer, alarm_bases[type].base_clockid,
- HRTIMER_MODE_ABS);
+ hrtimer_setup_on_stack(&alarm->timer, alarmtimer_fired, alarm_bases[type].base_clockid,
+ HRTIMER_MODE_ABS);
__alarm_init(alarm, type, function);
}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 78c7bd64d0dd..f3e831f62906 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -337,13 +337,21 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
}
/*
- * Called after a notify add to make devices available which were
- * released from the notifier call.
+ * Called after a clockevent has been added which might
+ * have replaced a current regular or broadcast device. A
+ * released normal device might be a suitable replacement
+ * for the current broadcast device. Similarly a released
+ * broadcast device might be a suitable replacement for a
+ * normal device.
*/
static void clockevents_notify_released(void)
{
struct clock_event_device *dev;
+ /*
+ * Keep iterating as long as tick_check_new_device()
+ * replaces a device.
+ */
while (!list_empty(&clockevents_released)) {
dev = list_entry(clockevents_released.next,
struct clock_event_device, list);
@@ -610,39 +618,30 @@ void clockevents_resume(void)
#ifdef CONFIG_HOTPLUG_CPU
-# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
/**
- * tick_offline_cpu - Take CPU out of the broadcast mechanism
+ * tick_offline_cpu - Shutdown all clock events related
+ * to this CPU and take it out of the
+ * broadcast mechanism.
* @cpu: The outgoing CPU
*
- * Called on the outgoing CPU after it took itself offline.
+ * Called by the dying CPU during teardown.
*/
void tick_offline_cpu(unsigned int cpu)
{
- raw_spin_lock(&clockevents_lock);
- tick_broadcast_offline(cpu);
- raw_spin_unlock(&clockevents_lock);
-}
-# endif
-
-/**
- * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
- * @cpu: The dead CPU
- */
-void tick_cleanup_dead_cpu(int cpu)
-{
struct clock_event_device *dev, *tmp;
- unsigned long flags;
- raw_spin_lock_irqsave(&clockevents_lock, flags);
+ raw_spin_lock(&clockevents_lock);
+ tick_broadcast_offline(cpu);
tick_shutdown(cpu);
+
/*
* Unregister the clock event devices which were
- * released from the users in the notify chain.
+ * released above.
*/
list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
list_del(&dev->list);
+
/*
* Now check whether the CPU has left unused per cpu devices
*/
@@ -654,7 +653,8 @@ void tick_cleanup_dead_cpu(int cpu)
list_del(&dev->list);
}
}
- raw_spin_unlock_irqrestore(&clockevents_lock, flags);
+
+ raw_spin_unlock(&clockevents_lock);
}
#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 23336eecb4f4..aab6472853fa 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -20,6 +20,8 @@
#include "tick-internal.h"
#include "timekeeping_internal.h"
+static void clocksource_enqueue(struct clocksource *cs);
+
static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
{
u64 delta = clocksource_delta(end, start, cs->mask);
@@ -171,7 +173,6 @@ static inline void clocksource_watchdog_unlock(unsigned long *flags)
}
static int clocksource_watchdog_kthread(void *data);
-static void __clocksource_change_rating(struct clocksource *cs, int rating);
static void clocksource_watchdog_work(struct work_struct *work)
{
@@ -191,6 +192,13 @@ static void clocksource_watchdog_work(struct work_struct *work)
kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
}
+static void clocksource_change_rating(struct clocksource *cs, int rating)
+{
+ list_del(&cs->list);
+ cs->rating = rating;
+ clocksource_enqueue(cs);
+}
+
static void __clocksource_unstable(struct clocksource *cs)
{
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
@@ -697,7 +705,7 @@ static int __clocksource_watchdog_kthread(void)
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
list_del_init(&cs->wd_list);
- __clocksource_change_rating(cs, 0);
+ clocksource_change_rating(cs, 0);
select = 1;
}
if (cs->flags & CLOCK_SOURCE_RESELECT) {
@@ -1255,34 +1263,6 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
}
EXPORT_SYMBOL_GPL(__clocksource_register_scale);
-static void __clocksource_change_rating(struct clocksource *cs, int rating)
-{
- list_del(&cs->list);
- cs->rating = rating;
- clocksource_enqueue(cs);
-}
-
-/**
- * clocksource_change_rating - Change the rating of a registered clocksource
- * @cs: clocksource to be changed
- * @rating: new rating
- */
-void clocksource_change_rating(struct clocksource *cs, int rating)
-{
- unsigned long flags;
-
- mutex_lock(&clocksource_mutex);
- clocksource_watchdog_lock(&flags);
- __clocksource_change_rating(cs, rating);
- clocksource_watchdog_unlock(&flags);
-
- clocksource_select();
- clocksource_select_watchdog(false);
- clocksource_suspend_select(false);
- mutex_unlock(&clocksource_mutex);
-}
-EXPORT_SYMBOL(clocksource_change_rating);
-
/*
* Unbind clocksource @cs. Called with clocksource_mutex held
*/
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index d9911516e743..80fe3749d2db 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -417,6 +417,11 @@ static inline void debug_hrtimer_init(struct hrtimer *timer)
debug_object_init(timer, &hrtimer_debug_descr);
}
+static inline void debug_hrtimer_init_on_stack(struct hrtimer *timer)
+{
+ debug_object_init_on_stack(timer, &hrtimer_debug_descr);
+}
+
static inline void debug_hrtimer_activate(struct hrtimer *timer,
enum hrtimer_mode mode)
{
@@ -428,28 +433,6 @@ static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
debug_object_deactivate(timer, &hrtimer_debug_descr);
}
-static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
- enum hrtimer_mode mode);
-
-void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
- enum hrtimer_mode mode)
-{
- debug_object_init_on_stack(timer, &hrtimer_debug_descr);
- __hrtimer_init(timer, clock_id, mode);
-}
-EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
-
-static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
- clockid_t clock_id, enum hrtimer_mode mode);
-
-void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
- clockid_t clock_id, enum hrtimer_mode mode)
-{
- debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
- __hrtimer_init_sleeper(sl, clock_id, mode);
-}
-EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
-
void destroy_hrtimer_on_stack(struct hrtimer *timer)
{
debug_object_free(timer, &hrtimer_debug_descr);
@@ -459,6 +442,7 @@ EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
#else
static inline void debug_hrtimer_init(struct hrtimer *timer) { }
+static inline void debug_hrtimer_init_on_stack(struct hrtimer *timer) { }
static inline void debug_hrtimer_activate(struct hrtimer *timer,
enum hrtimer_mode mode) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
@@ -472,6 +456,13 @@ debug_init(struct hrtimer *timer, clockid_t clockid,
trace_hrtimer_init(timer, clockid, mode);
}
+static inline void debug_init_on_stack(struct hrtimer *timer, clockid_t clockid,
+ enum hrtimer_mode mode)
+{
+ debug_hrtimer_init_on_stack(timer);
+ trace_hrtimer_init(timer, clockid, mode);
+}
+
static inline void debug_activate(struct hrtimer *timer,
enum hrtimer_mode mode)
{
@@ -1544,6 +1535,11 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
return HRTIMER_BASE_MONOTONIC;
}
+static enum hrtimer_restart hrtimer_dummy_timeout(struct hrtimer *unused)
+{
+ return HRTIMER_NORESTART;
+}
+
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
@@ -1580,6 +1576,18 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
timerqueue_init(&timer->node);
}
+static void __hrtimer_setup(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode)
+{
+ __hrtimer_init(timer, clock_id, mode);
+
+ if (WARN_ON_ONCE(!function))
+ timer->function = hrtimer_dummy_timeout;
+ else
+ timer->function = function;
+}
+
/**
* hrtimer_init - initialize a timer to the given clock
* @timer: the timer to be initialized
@@ -1600,6 +1608,46 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
}
EXPORT_SYMBOL_GPL(hrtimer_init);
+/**
+ * hrtimer_setup - initialize a timer to the given clock
+ * @timer: the timer to be initialized
+ * @function: the callback function
+ * @clock_id: the clock to be used
+ * @mode: The modes which are relevant for initialization:
+ * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
+ * HRTIMER_MODE_REL_SOFT
+ *
+ * The PINNED variants of the above can be handed in,
+ * but the PINNED bit is ignored as pinning happens
+ * when the hrtimer is started
+ */
+void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode)
+{
+ debug_init(timer, clock_id, mode);
+ __hrtimer_setup(timer, function, clock_id, mode);
+}
+EXPORT_SYMBOL_GPL(hrtimer_setup);
+
+/**
+ * hrtimer_setup_on_stack - initialize a timer on stack memory
+ * @timer: The timer to be initialized
+ * @function: the callback function
+ * @clock_id: The clock to be used
+ * @mode: The timer mode
+ *
+ * Similar to hrtimer_setup(), except that this one must be used if struct hrtimer is in stack
+ * memory.
+ */
+void hrtimer_setup_on_stack(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode)
+{
+ debug_init_on_stack(timer, clock_id, mode);
+ __hrtimer_setup(timer, function, clock_id, mode);
+}
+EXPORT_SYMBOL_GPL(hrtimer_setup_on_stack);
+
/*
* A timer is active, when it is enqueued into the rbtree or the
* callback function is running or it's in the state of being migrated
@@ -1944,7 +1992,7 @@ void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
* Make the enqueue delivery mode check work on RT. If the sleeper
* was initialized for hard interrupt delivery, force the mode bit.
* This is a special case for hrtimer_sleepers because
- * hrtimer_init_sleeper() determines the delivery mode on RT so the
+ * __hrtimer_init_sleeper() determines the delivery mode on RT so the
* fiddling with this decision is avoided at the call sites.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
@@ -1987,19 +2035,18 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
}
/**
- * hrtimer_init_sleeper - initialize sleeper to the given clock
+ * hrtimer_setup_sleeper_on_stack - initialize a sleeper in stack memory
* @sl: sleeper to be initialized
* @clock_id: the clock to be used
* @mode: timer mode abs/rel
*/
-void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
- enum hrtimer_mode mode)
+void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl,
+ clockid_t clock_id, enum hrtimer_mode mode)
{
- debug_init(&sl->timer, clock_id, mode);
+ debug_init_on_stack(&sl->timer, clock_id, mode);
__hrtimer_init_sleeper(sl, clock_id, mode);
-
}
-EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
+EXPORT_SYMBOL_GPL(hrtimer_setup_sleeper_on_stack);
int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
{
@@ -2060,8 +2107,7 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
struct hrtimer_sleeper t;
int ret;
- hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
- HRTIMER_MODE_ABS);
+ hrtimer_setup_sleeper_on_stack(&t, restart->nanosleep.clockid, HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
destroy_hrtimer_on_stack(&t.timer);
@@ -2075,7 +2121,7 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
struct hrtimer_sleeper t;
int ret = 0;
- hrtimer_init_sleeper_on_stack(&t, clockid, mode);
+ hrtimer_setup_sleeper_on_stack(&t, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns);
ret = do_nanosleep(&t, mode);
if (ret != -ERESTART_RESTARTBLOCK)
@@ -2242,123 +2288,3 @@ void __init hrtimers_init(void)
hrtimers_prepare_cpu(smp_processor_id());
open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
}
-
-/**
- * schedule_hrtimeout_range_clock - sleep until timeout
- * @expires: timeout value (ktime_t)
- * @delta: slack in expires timeout (ktime_t)
- * @mode: timer mode
- * @clock_id: timer clock to be used
- */
-int __sched
-schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
- const enum hrtimer_mode mode, clockid_t clock_id)
-{
- struct hrtimer_sleeper t;
-
- /*
- * Optimize when a zero timeout value is given. It does not
- * matter whether this is an absolute or a relative time.
- */
- if (expires && *expires == 0) {
- __set_current_state(TASK_RUNNING);
- return 0;
- }
-
- /*
- * A NULL parameter means "infinite"
- */
- if (!expires) {
- schedule();
- return -EINTR;
- }
-
- hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
- hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
- hrtimer_sleeper_start_expires(&t, mode);
-
- if (likely(t.task))
- schedule();
-
- hrtimer_cancel(&t.timer);
- destroy_hrtimer_on_stack(&t.timer);
-
- __set_current_state(TASK_RUNNING);
-
- return !t.task ? 0 : -EINTR;
-}
-EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock);
-
-/**
- * schedule_hrtimeout_range - sleep until timeout
- * @expires: timeout value (ktime_t)
- * @delta: slack in expires timeout (ktime_t)
- * @mode: timer mode
- *
- * Make the current task sleep until the given expiry time has
- * elapsed. The routine will return immediately unless
- * the current task state has been set (see set_current_state()).
- *
- * The @delta argument gives the kernel the freedom to schedule the
- * actual wakeup to a time that is both power and performance friendly
- * for regular (non RT/DL) tasks.
- * The kernel give the normal best effort behavior for "@expires+@delta",
- * but may decide to fire the timer earlier, but no earlier than @expires.
- *
- * You can set the task state as follows -
- *
- * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
- * pass before the routine returns unless the current task is explicitly
- * woken up, (e.g. by wake_up_process()).
- *
- * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task or the current task is explicitly woken
- * up.
- *
- * The current task state is guaranteed to be TASK_RUNNING when this
- * routine returns.
- *
- * Returns 0 when the timer has expired. If the task was woken before the
- * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
- * by an explicit wakeup, it returns -EINTR.
- */
-int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
- const enum hrtimer_mode mode)
-{
- return schedule_hrtimeout_range_clock(expires, delta, mode,
- CLOCK_MONOTONIC);
-}
-EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
-
-/**
- * schedule_hrtimeout - sleep until timeout
- * @expires: timeout value (ktime_t)
- * @mode: timer mode
- *
- * Make the current task sleep until the given expiry time has
- * elapsed. The routine will return immediately unless
- * the current task state has been set (see set_current_state()).
- *
- * You can set the task state as follows -
- *
- * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
- * pass before the routine returns unless the current task is explicitly
- * woken up, (e.g. by wake_up_process()).
- *
- * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task or the current task is explicitly woken
- * up.
- *
- * The current task state is guaranteed to be TASK_RUNNING when this
- * routine returns.
- *
- * Returns 0 when the timer has expired. If the task was woken before the
- * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
- * by an explicit wakeup, it returns -EINTR.
- */
-int __sched schedule_hrtimeout(ktime_t *expires,
- const enum hrtimer_mode mode)
-{
- return schedule_hrtimeout_range(expires, 0, mode);
-}
-EXPORT_SYMBOL_GPL(schedule_hrtimeout);
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 00629e658ca1..876d389b2e21 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -151,7 +151,27 @@ COMPAT_SYSCALL_DEFINE2(getitimer, int, which,
#endif
/*
- * The timer is automagically restarted, when interval != 0
+ * Invoked from dequeue_signal() when SIG_ALRM is delivered.
+ *
+ * Restart the ITIMER_REAL timer if it is armed as periodic timer. Doing
+ * this in the signal delivery path instead of self rearming prevents a DoS
+ * with small increments in the high reolution timer case and reduces timer
+ * noise in general.
+ */
+void posixtimer_rearm_itimer(struct task_struct *tsk)
+{
+ struct hrtimer *tmr = &tsk->signal->real_timer;
+
+ if (!hrtimer_is_queued(tmr) && tsk->signal->it_real_incr != 0) {
+ hrtimer_forward(tmr, tmr->base->get_time(),
+ tsk->signal->it_real_incr);
+ hrtimer_restart(tmr);
+ }
+}
+
+/*
+ * Interval timers are restarted in the signal delivery path. See
+ * posixtimer_rearm_itimer().
*/
enum hrtimer_restart it_real_fn(struct hrtimer *timer)
{
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 802b336f4b8c..b550ebe0f03b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -22,22 +22,79 @@
#include "ntp_internal.h"
#include "timekeeping_internal.h"
-
-/*
- * NTP timekeeping variables:
+/**
+ * struct ntp_data - Structure holding all NTP related state
+ * @tick_usec: USER_HZ period in microseconds
+ * @tick_length: Adjusted tick length
+ * @tick_length_base: Base value for @tick_length
+ * @time_state: State of the clock synchronization
+ * @time_status: Clock status bits
+ * @time_offset: Time adjustment in nanoseconds
+ * @time_constant: PLL time constant
+ * @time_maxerror: Maximum error in microseconds holding the NTP sync distance
+ * (NTP dispersion + delay / 2)
+ * @time_esterror: Estimated error in microseconds holding NTP dispersion
+ * @time_freq: Frequency offset scaled nsecs/secs
+ * @time_reftime: Time at last adjustment in seconds
+ * @time_adjust: Adjustment value
+ * @ntp_tick_adj: Constant boot-param configurable NTP tick adjustment (upscaled)
+ * @ntp_next_leap_sec: Second value of the next pending leapsecond, or TIME64_MAX if no leap
*
- * Note: All of the NTP state is protected by the timekeeping locks.
+ * @pps_valid: PPS signal watchdog counter
+ * @pps_tf: PPS phase median filter
+ * @pps_jitter: PPS current jitter in nanoseconds
+ * @pps_fbase: PPS beginning of the last freq interval
+ * @pps_shift: PPS current interval duration in seconds (shift value)
+ * @pps_intcnt: PPS interval counter
+ * @pps_freq: PPS frequency offset in scaled ns/s
+ * @pps_stabil: PPS current stability in scaled ns/s
+ * @pps_calcnt: PPS monitor: calibration intervals
+ * @pps_jitcnt: PPS monitor: jitter limit exceeded
+ * @pps_stbcnt: PPS monitor: stability limit exceeded
+ * @pps_errcnt: PPS monitor: calibration errors
+ *
+ * Protected by the timekeeping locks.
*/
+struct ntp_data {
+ unsigned long tick_usec;
+ u64 tick_length;
+ u64 tick_length_base;
+ int time_state;
+ int time_status;
+ s64 time_offset;
+ long time_constant;
+ long time_maxerror;
+ long time_esterror;
+ s64 time_freq;
+ time64_t time_reftime;
+ long time_adjust;
+ s64 ntp_tick_adj;
+ time64_t ntp_next_leap_sec;
+#ifdef CONFIG_NTP_PPS
+ int pps_valid;
+ long pps_tf[3];
+ long pps_jitter;
+ struct timespec64 pps_fbase;
+ int pps_shift;
+ int pps_intcnt;
+ s64 pps_freq;
+ long pps_stabil;
+ long pps_calcnt;
+ long pps_jitcnt;
+ long pps_stbcnt;
+ long pps_errcnt;
+#endif
+};
-
-/* USER_HZ period (usecs): */
-unsigned long tick_usec = USER_TICK_USEC;
-
-/* SHIFTED_HZ period (nsecs): */
-unsigned long tick_nsec;
-
-static u64 tick_length;
-static u64 tick_length_base;
+static struct ntp_data tk_ntp_data = {
+ .tick_usec = USER_TICK_USEC,
+ .time_state = TIME_OK,
+ .time_status = STA_UNSYNC,
+ .time_constant = 2,
+ .time_maxerror = NTP_PHASE_LIMIT,
+ .time_esterror = NTP_PHASE_LIMIT,
+ .ntp_next_leap_sec = TIME64_MAX,
+};
#define SECS_PER_DAY 86400
#define MAX_TICKADJ 500LL /* usecs */
@@ -45,46 +102,6 @@ static u64 tick_length_base;
(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
#define MAX_TAI_OFFSET 100000
-/*
- * phase-lock loop variables
- */
-
-/*
- * clock synchronization status
- *
- * (TIME_ERROR prevents overwriting the CMOS clock)
- */
-static int time_state = TIME_OK;
-
-/* clock status bits: */
-static int time_status = STA_UNSYNC;
-
-/* time adjustment (nsecs): */
-static s64 time_offset;
-
-/* pll time constant: */
-static long time_constant = 2;
-
-/* maximum error (usecs): */
-static long time_maxerror = NTP_PHASE_LIMIT;
-
-/* estimated error (usecs): */
-static long time_esterror = NTP_PHASE_LIMIT;
-
-/* frequency offset (scaled nsecs/secs): */
-static s64 time_freq;
-
-/* time at last adjustment (secs): */
-static time64_t time_reftime;
-
-static long time_adjust;
-
-/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
-static s64 ntp_tick_adj;
-
-/* second value of the next pending leapsecond, or TIME64_MAX if no leap */
-static time64_t ntp_next_leap_sec = TIME64_MAX;
-
#ifdef CONFIG_NTP_PPS
/*
@@ -101,128 +118,115 @@ static time64_t ntp_next_leap_sec = TIME64_MAX;
intervals to decrease it */
#define PPS_MAXWANDER 100000 /* max PPS freq wander (ns/s) */
-static int pps_valid; /* signal watchdog counter */
-static long pps_tf[3]; /* phase median filter */
-static long pps_jitter; /* current jitter (ns) */
-static struct timespec64 pps_fbase; /* beginning of the last freq interval */
-static int pps_shift; /* current interval duration (s) (shift) */
-static int pps_intcnt; /* interval counter */
-static s64 pps_freq; /* frequency offset (scaled ns/s) */
-static long pps_stabil; /* current stability (scaled ns/s) */
-
/*
- * PPS signal quality monitors
- */
-static long pps_calcnt; /* calibration intervals */
-static long pps_jitcnt; /* jitter limit exceeded */
-static long pps_stbcnt; /* stability limit exceeded */
-static long pps_errcnt; /* calibration errors */
-
-
-/* PPS kernel consumer compensates the whole phase error immediately.
+ * PPS kernel consumer compensates the whole phase error immediately.
* Otherwise, reduce the offset by a fixed factor times the time constant.
*/
-static inline s64 ntp_offset_chunk(s64 offset)
+static inline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset)
{
- if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
+ if (ntpdata->time_status & STA_PPSTIME && ntpdata->time_status & STA_PPSSIGNAL)
return offset;
else
- return shift_right(offset, SHIFT_PLL + time_constant);
+ return shift_right(offset, SHIFT_PLL + ntpdata->time_constant);
}
-static inline void pps_reset_freq_interval(void)
+static inline void pps_reset_freq_interval(struct ntp_data *ntpdata)
{
- /* the PPS calibration interval may end
- surprisingly early */
- pps_shift = PPS_INTMIN;
- pps_intcnt = 0;
+ /* The PPS calibration interval may end surprisingly early */
+ ntpdata->pps_shift = PPS_INTMIN;
+ ntpdata->pps_intcnt = 0;
}
/**
* pps_clear - Clears the PPS state variables