// SPDX-License-Identifier: GPL-2.0+
/*
* 2002-10-15 Posix Clocks & timers
* by George Anzinger george@mvista.com
* Copyright (C) 2002 2003 by MontaVista Software.
*
* 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
* Copyright (C) 2004 Boris Hu
*
* These are all the functions necessary to implement POSIX clocks & timers
*/
#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/nospec.h>
#include <linux/posix-clock.h>
#include <linux/posix-timers.h>
#include <linux/prctl.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/time.h>
#include <linux/time_namespace.h>
#include <linux/uaccess.h>
#include "timekeeping.h"
#include "posix-timers.h"
/*
* Timers are managed in a hash table for lockless lookup. The hash key is
* constructed from current::signal and the timer ID and the timer is
* matched against current::signal and the timer ID when walking the hash
* bucket list.
*
* This allows checkpoint/restore to reconstruct the exact timer IDs for
* a process.
*/
struct timer_hash_bucket {
spinlock_t lock;
struct hlist_head head;
};
static struct {
struct timer_hash_bucket *buckets;
unsigned long mask;
struct kmem_cache *cache;
} __timer_data __ro_after_init __aligned(4*sizeof(long));
#define timer_buckets (__timer_data.buckets)
#define timer_hashmask (__timer_data.mask)
#define posix_timers_cache (__timer_data.cache)
static const struct k_clock * const posix_clocks[];
static const struct k_clock *clockid_to_kclock(const clockid_t id);
static const struct k_clock clock_realtime, clock_monotonic;
#define TIMER_ANY_ID INT_MIN
/* SIGEV_THREAD_ID cannot share a bit with the other SIGEV values. */
#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
#endif
static struct k_itimer *__lock_timer(timer_t timer_id);
#define lock_timer(tid) \
({ struct k_itimer *__timr; \
__cond_lock(&__timr->it_lock, __timr = __lock_timer(tid)); \
__timr; \
})
static inline void unlock_timer(struct k_itimer *timr)
{
if (likely((timr)))
spin_unlock_irq(&timr->it_lock);
}
#define scoped_timer_get_or_fail(_id) \
scoped_cond_guard(lock_timer, return -EINVAL, _id)
#define scoped_timer (scope)
DEFINE_CLASS(lock_timer, struct k_itimer *, unlock_timer(_T), __lock_timer(id), timer_t id);
DEFINE_CLASS_IS_COND_GUARD(lock_timer);
static struct timer_hash_bucket *hash_bucket(struct signal_struct *sig, unsigned int nr)
{
return &timer_buckets[jhash2((u32 *)&sig, sizeof(sig) / sizeof(u32), nr) & timer_hashmask];
}
static struct k_itimer *posix_timer_by_id(timer_t id)
{
struct signal_struct *sig = current->signal;
struct timer_hash_bucket *bucket = hash_bucket(sig, id);
struct k_itimer *timer;
hlist_for_each_entry_rcu(timer, &bucket->head, t_hash) {
/* timer->it_signal can be set concurrently */
if ((READ_ONCE(timer->it_signal) == sig) && (timer->it_id == id))
return timer;
}
return NULL;
}
static inline struct signal_struct *posix_sig_owner(const struct k_itimer *timer)
{
unsigned long val = (unsigned long)timer->it_signal;
/*
* Mask out bit 0, which acts as invalid marker to prevent
* posix_timer_by_id() detecting it as valid.
*/
return (struct signal_struct *)(val & ~1UL);
}
static bool posix_timer_hashed(struct timer_hash_bucket *bucket, struct signal_struct *sig,
timer_t id)
{
struct hlist_head *head = &bucket->head;
struct k_itimer *timer;
hlist_for_each_entry_rcu(timer, head, t_hash, lockdep_is_held(&bucket->lock)) {
if ((posix_sig_owner(timer) == sig) && (timer->it_id == id))
return true;
}
return false;
}
static bool posix_timer_add_at(struct k_itimer *timer, struct signal_struct *sig, unsigned int id)
{
struct timer_hash_bucket *bucket = hash_bucket(sig, id);
scoped_guard (spinlock, &bucket->lock) {
/*
* Validate under the lock as this could have raced against
* another thread ending up with the same ID, which is
* highly unlikely, but possible.
*/
if (!posix_timer_hashed(bucket, sig, id)) {
/*
* Set the timer ID and the signal pointer to make
* it identifiable in the hash table. The signal
* pointer has bit 0 set to indicate that it is not
* yet fully initialized. posix_timer_hashed()
* masks this bit out, but the syscall lookup fails
* to match due to it being set. This guarantees
* that there can't be duplicate timer IDs handed
* out.
*/
timer->it_id = (timer_t)id;
timer->it_signal = (struct signal_struct *)((unsigned long)sig | 1UL);
hlist_add_head_rcu(&timer->t_hash, &bucket->head);
return true;
}
}
return false;
}
static int posix_timer_add(struct k_itimer *timer, int req_id)
{
struct signal_struct *sig = current->signal;
if (unlikely(req_id != TIMER_ANY_ID)) {
if (!posix_timer_add_at(timer, sig, req_id))
return -EBUSY;
/*
* Move the ID counter past the requested ID, so that after
* switching back to normal mode the IDs are outside of the
* exact allocated region. That avoids ID collisions on the
* next regular timer_create() invocations.
*/
atomic_set(&sig->next_posix_timer_id, req_id + 1);
return req_id;
}
for (unsigned int cnt = 0; cnt <= INT_MAX; cnt++) {
/* Get the next timer ID and clamp it to positive space */
unsigned int id = atomic_fetch_inc(&sig->next_posix_timer_id) & INT_MAX;
if (posix_timer_add_at(timer, sig, id))
return id;
cond_resched();
}
/* POSIX return code when no timer ID could be allocated */
return -EAGAIN;
}
static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_real_ts64(tp);
return 0;
}
static ktime_t posix_get_realtime_ktime(clockid_t which_clock)
{
return ktime_get_real();
}
static int posix_clock_realtime_set(const clockid_t which_clock,
const struct timespec64 *tp)
{
return
|