zephyr/kernel/sys_clock.c
Ramesh Thomas 89ffd44dfb kernel: tickless: Add tickless kernel support
Adds event based scheduling logic to the kernel. Updates
management of timeouts, timers, idling etc. based on
time tracked at events rather than periodic ticks. Provides
interfaces for timers to announce and get next timer expiry
based on kernel scheduling decisions involving time slicing
of threads, timeouts and idling. Uses wall time units instead
of ticks in all scheduling activities.

The implementation involves changes in the following areas

1. Management of time in wall units like ms/us instead of ticks
The existing implementation already had an option to configure
number of ticks in a second. The new implementation builds on
top of that feature and provides option to set the size of the
scheduling granurality to mili seconds or micro seconds. This
allows most of the current implementation to be reused. Due to
this re-use and co-existence with tick based kernel, the names
of variables may contain the word "tick". However, in the
tickless kernel implementation, it represents the currently
configured time unit, which would be be mili seconds or
micro seconds. The APIs that take time as a parameter are not
impacted and they continue to pass time in mili seconds.

2. Timers would not be programmed in periodic mode
generating ticks. Instead they would be programmed in one
shot mode to generate events at the time the kernel scheduler
needs to gain control for its scheduling activities like
timers, timeouts, time slicing, idling etc.

3. The scheduler provides interfaces that the timer drivers
use to announce elapsed time and get the next time the scheduler
needs a timer event. It is possible that the scheduler may not
need another timer event, in which case the system would wait
for a non-timer event to wake it up if it is idling.

4. New APIs are defined to be implemented by timer drivers. Also
they need to handler timer events differently. These changes
have been done in the HPET timer driver. In future other timers
that support tickles kernel should implement these APIs as well.
These APIs are to re-program the timer, update and announce
elapsed time.

5. Philosopher and timer_api applications have been enabled to
test tickless kernel. Separate configuration files are created
which define the necessary CONFIG flags. Run these apps using
following command
make pristine && make BOARD=qemu_x86 CONF_FILE=prj_tickless.conf qemu

Jira: ZEP-339 ZEP-1946 ZEP-948
Change-Id: I7d950c31bf1ff929a9066fad42c2f0559a2e5983
Signed-off-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-04-27 13:46:28 +00:00

385 lines
9.8 KiB
C

/* system clock support */
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel_structs.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <drivers/system_timer.h>
#ifdef CONFIG_SYS_CLOCK_EXISTS
#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
#warning "non-optimized system clock frequency chosen: performance may suffer"
#endif
#endif
#ifdef CONFIG_SYS_CLOCK_EXISTS
int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec;
int sys_clock_hw_cycles_per_tick =
CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
#endif
#else
/* don't initialize to avoid division-by-zero error */
int sys_clock_us_per_tick;
int sys_clock_hw_cycles_per_tick;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int sys_clock_hw_cycles_per_sec;
#endif
#endif
/* updated by timer driver for tickless, stays at 1 for non-tickless */
s32_t _sys_idle_elapsed_ticks = 1;
volatile u64_t _sys_clock_tick_count;
#ifdef CONFIG_TICKLESS_KERNEL
/*
* If this flag is set, system clock will run continuously even if
* there are no timer events programmed. This allows using the
* system clock to track passage of time without interruption.
* To save power, this should be turned on only when required.
*/
int _sys_clock_always_on;
static u32_t next_ts;
#endif
/**
*
* @brief Return the lower part of the current system tick count
*
* @return the current system tick count
*
*/
u32_t _tick_get_32(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
return (u32_t)_get_elapsed_clock_time();
#else
return (u32_t)_sys_clock_tick_count;
#endif
}
FUNC_ALIAS(_tick_get_32, sys_tick_get_32, u32_t);
u32_t k_uptime_get_32(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
__ASSERT(_sys_clock_always_on,
"Call k_enable_sys_clock_always_on to use clock API");
#endif
return __ticks_to_ms(_tick_get_32());
}
/**
*
* @brief Return the current system tick count
*
* @return the current system tick count
*
*/
s64_t _tick_get(void)
{
s64_t tmp_sys_clock_tick_count;
/*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
* variable. Some architectures (x86) do not handle 64-bit atomically,
* so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count
*/
unsigned int imask = irq_lock();
#ifdef CONFIG_TICKLESS_KERNEL
tmp_sys_clock_tick_count = _get_elapsed_clock_time();
#else
tmp_sys_clock_tick_count = _sys_clock_tick_count;
#endif
irq_unlock(imask);
return tmp_sys_clock_tick_count;
}
FUNC_ALIAS(_tick_get, sys_tick_get, s64_t);
s64_t k_uptime_get(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
__ASSERT(_sys_clock_always_on,
"Call k_enable_sys_clock_always_on to use clock API");
#endif
return __ticks_to_ms(_tick_get());
}
/**
*
* @brief Return number of ticks since a reference time
*
* This function is meant to be used in contained fragments of code. The first
* call to it in a particular code fragment fills in a reference time variable
* which then gets passed and updated every time the function is called. From
* the second call on, the delta between the value passed to it and the current
* tick count is the return value. Since the first call is meant to only fill in
* the reference time, its return value should be discarded.
*
* Since a code fragment that wants to use sys_tick_delta() passes in its
* own reference time variable, multiple code fragments can make use of this
* function concurrently.
*
* e.g.
* u64_t reftime;
* (void) sys_tick_delta(&reftime); /# prime it #/
* [do stuff]
* x = sys_tick_delta(&reftime); /# how long since priming #/
* [do more stuff]
* y = sys_tick_delta(&reftime); /# how long since [do stuff] #/
*
* @return tick count since reference time; undefined for first invocation
*
* NOTE: We use inline function for both 64-bit and 32-bit functions.
* Compiler optimizes out 64-bit result handling in 32-bit version.
*/
static ALWAYS_INLINE s64_t _nano_tick_delta(s64_t *reftime)
{
s64_t delta;
s64_t saved;
/*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
* variable. Some architectures (x86) do not handle 64-bit atomically,
* so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count
*/
unsigned int imask = irq_lock();
#ifdef CONFIG_TICKLESS_KERNEL
saved = _get_elapsed_clock_time();
#else
saved = _sys_clock_tick_count;
#endif
irq_unlock(imask);
delta = saved - (*reftime);
*reftime = saved;
return delta;
}
/**
*
* @brief Return number of ticks since a reference time
*
* @return tick count since reference time; undefined for first invocation
*/
s64_t sys_tick_delta(s64_t *reftime)
{
return _nano_tick_delta(reftime);
}
u32_t sys_tick_delta_32(s64_t *reftime)
{
return (u32_t)_nano_tick_delta(reftime);
}
s64_t k_uptime_delta(s64_t *reftime)
{
s64_t uptime, delta;
uptime = k_uptime_get();
delta = uptime - *reftime;
*reftime = uptime;
return delta;
}
u32_t k_uptime_delta_32(s64_t *reftime)
{
return (u32_t)k_uptime_delta(reftime);
}
/* handle the expired timeouts in the nano timeout queue */
#ifdef CONFIG_SYS_CLOCK_EXISTS
#include <wait_q.h>
/*
* Handle timeouts by dequeuing the expired ones from _timeout_q and queue
* them on a local one, then doing the real handling from that queue. This
* allows going through the second queue without needing to have the
* interrupts locked since it is a local queue. Each expired timeout is marked
* as _EXPIRED so that an ISR preempting us and releasing an object on which
* a thread was timing out and expired will not give the object to that thread.
*
* Always called from interrupt level, and always only from the system clock
* interrupt.
*/
volatile int _handling_timeouts;
static inline void handle_timeouts(s32_t ticks)
{
sys_dlist_t expired;
unsigned int key;
/* init before locking interrupts */
sys_dlist_init(&expired);
key = irq_lock();
struct _timeout *head =
(struct _timeout *)sys_dlist_peek_head(&_timeout_q);
K_DEBUG("head: %p, delta: %d\n",
head, head ? head->delta_ticks_from_prev : -2112);
if (!head) {
irq_unlock(key);
return;
}
head->delta_ticks_from_prev -= ticks;
/*
* Dequeue all expired timeouts from _timeout_q, relieving irq lock
* pressure between each of them, allowing handling of higher priority
* interrupts. We know that no new timeout will be prepended in front
* of a timeout which delta is 0, since timeouts of 0 ticks are
* prohibited.
*/
sys_dnode_t *next = &head->node;
struct _timeout *timeout = (struct _timeout *)next;
_handling_timeouts = 1;
while (timeout && timeout->delta_ticks_from_prev == 0) {
sys_dlist_remove(next);
/*
* Reverse the order that that were queued in the timeout_q:
* timeouts expiring on the same ticks are queued in the
* reverse order, time-wise, that they are added to shorten the
* amount of time with interrupts locked while walking the
* timeout_q. By reversing the order _again_ when building the
* expired queue, they end up being processed in the same order
* they were added, time-wise.
*/
sys_dlist_prepend(&expired, next);
timeout->delta_ticks_from_prev = _EXPIRED;
irq_unlock(key);
key = irq_lock();
next = sys_dlist_peek_head(&_timeout_q);
timeout = (struct _timeout *)next;
}
irq_unlock(key);
_handle_expired_timeouts(&expired);
_handling_timeouts = 0;
}
#else
#define handle_timeouts(ticks) do { } while ((0))
#endif
#ifdef CONFIG_TIMESLICING
s32_t _time_slice_elapsed;
s32_t _time_slice_duration = CONFIG_TIMESLICE_SIZE;
int _time_slice_prio_ceiling = CONFIG_TIMESLICE_PRIORITY;
/*
* Always called from interrupt level, and always only from the system clock
* interrupt, thus:
* - _current does not have to be protected, since it only changes at thread
* level or when exiting a non-nested interrupt
* - _time_slice_elapsed does not have to be protected, since it can only change
* in this function and at thread level
* - _time_slice_duration does not have to be protected, since it can only
* change at thread level
*/
static void handle_time_slicing(s32_t ticks)
{
#ifdef CONFIG_TICKLESS_KERNEL
next_ts = 0;
if (!_is_thread_time_slicing(_current)) {
return;
}
#else
if (_time_slice_duration == 0) {
return;
}
if (_is_prio_higher(_current->base.prio, _time_slice_prio_ceiling)) {
return;
}
#endif
_time_slice_elapsed += __ticks_to_ms(ticks);
if (_time_slice_elapsed >= _time_slice_duration) {
unsigned int key;
_time_slice_elapsed = 0;
key = irq_lock();
_move_thread_to_end_of_prio_q(_current);
irq_unlock(key);
}
#ifdef CONFIG_TICKLESS_KERNEL
next_ts =
_ms_to_ticks(_time_slice_duration - _time_slice_elapsed);
#endif
}
#else
#define handle_time_slicing(ticks) do { } while (0)
#endif
/**
*
* @brief Announce a tick to the kernel
*
* This function is only to be called by the system clock timer driver when a
* tick is to be announced to the kernel. It takes care of dequeuing the
* timers that have expired and wake up the threads pending on them.
*
* @return N/A
*/
void _nano_sys_clock_tick_announce(s32_t ticks)
{
#ifndef CONFIG_TICKLESS_KERNEL
unsigned int key;
K_DEBUG("ticks: %d\n", ticks);
/* 64-bit value, ensure atomic access with irq lock */
key = irq_lock();
_sys_clock_tick_count += ticks;
irq_unlock(key);
#endif
handle_timeouts(ticks);
/* time slicing is basically handled like just yet another timeout */
handle_time_slicing(ticks);
#ifdef CONFIG_TICKLESS_KERNEL
u32_t next_to = _get_next_timeout_expiry();
next_to = next_to == K_FOREVER ? 0 : next_to;
next_to = !next_to || (next_ts
&& next_to) > next_ts ? next_ts : next_to;
u32_t remaining = _get_remaining_program_time();
if ((!remaining && next_to) || (next_to < remaining)) {
/* Clears current program if next_to = 0 and remaining > 0 */
_set_time(next_to);
}
#endif
}