zephyr/kernel/include/ksched.h
Benjamin Walsh f955476559 kernel/arch: optimize memory use of some thread fields
Some thread fields were 32-bit wide, when they are not even close to
using that full range of values. They are instead changed to 8-bit fields.

- prio can fit in one byte, limiting the priorities range to -128 to 127

- recursive scheduler locking can be limited to 255; a rollover results
  most probably from a logic error

- flags are split into execution flags and thread states; 8 bits is
  enough for each of them currently, with at worst two states and four
  flags to spare (on x86, on other archs, there are six flags to spare)

Doing this saves 8 bytes per stack. It also sets up an incoming
enhancement when checking if the current thread is preemptible on
interrupt exit.

Change-Id: Ieb5321a5b99f99173b0605dd4a193c3bc7ddabf4
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2017-01-09 20:52:24 +00:00

473 lines
12 KiB
C

/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _ksched__h_
#define _ksched__h_
#include <kernel_structs.h>
extern k_tid_t const _main_thread;
extern k_tid_t const _idle_thread;
extern void _add_thread_to_ready_q(struct k_thread *thread);
extern void _remove_thread_from_ready_q(struct k_thread *thread);
extern void _reschedule_threads(int key);
extern void k_sched_unlock(void);
extern void _pend_thread(struct k_thread *thread,
_wait_q_t *wait_q, int32_t timeout);
extern void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout);
extern void _move_thread_to_end_of_prio_q(struct k_thread *thread);
extern int __must_switch_threads(void);
#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
extern int32_t _ms_to_ticks(int32_t ms);
#endif
extern void idle(void *, void *, void *);
/* find which one is the next thread to run */
/* must be called with interrupts locked */
static ALWAYS_INLINE struct k_thread *_get_next_ready_thread(void)
{
return _ready_q.cache;
}
static inline int _is_idle_thread(void *entry_point)
{
return entry_point == idle;
}
#ifdef CONFIG_MULTITHREADING
#define _ASSERT_VALID_PRIO(prio, entry_point) do { \
__ASSERT(((prio) == K_IDLE_PRIO && _is_idle_thread(entry_point)) || \
(_is_prio_higher_or_equal((prio), \
K_LOWEST_APPLICATION_THREAD_PRIO) && \
_is_prio_lower_or_equal((prio), \
K_HIGHEST_APPLICATION_THREAD_PRIO)), \
"invalid priority (%d); allowed range: %d to %d", \
(prio), \
K_LOWEST_APPLICATION_THREAD_PRIO, \
K_HIGHEST_APPLICATION_THREAD_PRIO); \
} while ((0))
#else
#define _ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#endif
/*
* The _is_prio_higher family: I created this because higher priorities are
* lower numerically and I always found somewhat confusing seeing, e.g.:
*
* if (t1.prio < t2.prio) /# is t1's priority higher then t2's priority ? #/
*
* in code. And the fact that most of the time that kind of code has this
* exact comment warrants a function where it is embedded in the name.
*
* IMHO, feel free to remove them and do the comparison directly if this feels
* like overkill.
*/
static inline int _is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
{
return prio1 <= prio2;
}
static inline int _is_prio_higher_or_equal(int prio1, int prio2)
{
return _is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
}
static inline int _is_prio1_higher_than_prio2(int prio1, int prio2)
{
return prio1 < prio2;
}
static inline int _is_prio_higher(int prio, int test_prio)
{
return _is_prio1_higher_than_prio2(prio, test_prio);
}
static inline int _is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
{
return prio1 >= prio2;
}
static inline int _is_prio_lower_or_equal(int prio1, int prio2)
{
return _is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
}
static inline int _is_prio1_lower_than_prio2(int prio1, int prio2)
{
return prio1 > prio2;
}
static inline int _is_prio_lower(int prio1, int prio2)
{
return _is_prio1_lower_than_prio2(prio1, prio2);
}
static inline int _is_t1_higher_prio_than_t2(struct k_thread *t1,
struct k_thread *t2)
{
return _is_prio1_higher_than_prio2(t1->base.prio, t2->base.prio);
}
static inline int _is_higher_prio_than_current(struct k_thread *thread)
{
return _is_t1_higher_prio_than_t2(thread, _current);
}
/* is thread currenlty cooperative ? */
static inline int _is_coop(struct k_thread *thread)
{
#if defined(CONFIG_PREEMPT_ENABLED) && defined(CONFIG_COOP_ENABLED)
return thread->base.prio < 0;
#elif defined(CONFIG_COOP_ENABLED)
return 1;
#elif defined(CONFIG_PREEMPT_ENABLED)
return 0;
#else
#error "Impossible configuration"
#endif
}
/* is thread currently preemptible ? */
static inline int _is_preempt(struct k_thread *thread)
{
#ifdef CONFIG_PREEMPT_ENABLED
return !_is_coop(thread) && !thread->base.sched_locked;
#else
return 0;
#endif
}
/* is current thread preemptible and we are not running in ISR context */
static inline int _is_current_execution_context_preemptible(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
return !_is_in_isr() && _is_preempt(_current);
#else
return 0;
#endif
}
/* find out if priority is under priority inheritance ceiling */
static inline int _is_under_prio_ceiling(int prio)
{
return prio >= CONFIG_PRIORITY_CEILING;
}
/*
* Find out what priority to set a thread to taking the prio ceiling into
* consideration.
*/
static inline int _get_new_prio_with_ceiling(int prio)
{
return _is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
}
/* find out the prio bitmap index for a given prio */
static inline int _get_ready_q_prio_bmap_index(int prio)
{
return (prio + CONFIG_NUM_COOP_PRIORITIES) >> 5;
}
/* find out the prio bit for a given prio */
static inline int _get_ready_q_prio_bit(int prio)
{
return (1 << ((prio + CONFIG_NUM_COOP_PRIORITIES) & 0x1f));
}
/* find out the ready queue array index for a given prio */
static inline int _get_ready_q_q_index(int prio)
{
return prio + CONFIG_NUM_COOP_PRIORITIES;
}
/* find out the currently highest priority where a thread is ready to run */
/* interrupts must be locked */
static inline int _get_highest_ready_prio(void)
{
int bitmap = 0;
uint32_t ready_range;
#if (K_NUM_PRIORITIES <= 32)
ready_range = _ready_q.prio_bmap[0];
#else
for (;; bitmap++) {
__ASSERT(bitmap < K_NUM_PRIO_BITMAPS, "prio out-of-range\n");
if (_ready_q.prio_bmap[bitmap]) {
ready_range = _ready_q.prio_bmap[bitmap];
break;
}
}
#endif
int abs_prio = (find_lsb_set(ready_range) - 1) + (bitmap << 5);
__ASSERT(abs_prio < K_NUM_PRIORITIES, "prio out-of-range\n");
return abs_prio - CONFIG_NUM_COOP_PRIORITIES;
}
/*
* Checks if current thread must be context-switched out. The caller must
* already know that the execution context is a thread.
*/
static inline int _must_switch_threads(void)
{
return _is_preempt(_current) && __must_switch_threads();
}
/*
* Internal equivalent to k_sched_lock so that it does not incur a function
* call penalty in the kernel guts.
*
* Must be kept in sync until the header files are cleaned-up and the
* applications have access to the kernel internal deta structures (through
* APIs of course).
*/
static inline void _sched_lock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 1, "");
--_current->base.sched_locked;
K_DEBUG("scheduler locked (%p:%d)\n",
_current, _current->base.sched_locked);
#endif
}
/**
* @brief Unlock the scheduler but do NOT reschedule
*
* It is incumbent upon the caller to ensure that the reschedule occurs
* sometime after the scheduler is unlocked.
*/
static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 0, "");
++_current->base.sched_locked;
#endif
}
static inline void _set_thread_states(struct k_thread *thread, uint32_t states)
{
thread->base.thread_state |= states;
}
static inline void _reset_thread_states(struct k_thread *thread,
uint32_t states)
{
thread->base.thread_state &= ~states;
}
/* mark a thread as being suspended */
static inline void _mark_thread_as_suspended(struct k_thread *thread)
{
thread->base.thread_state |= K_SUSPENDED;
}
/* mark a thread as not being suspended */
static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
{
thread->base.thread_state &= ~K_SUSPENDED;
}
static ALWAYS_INLINE int _is_thread_timeout_expired(struct k_thread *thread)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
return thread->base.timeout.delta_ticks_from_prev == _EXPIRED;
#else
return 0;
#endif
}
/* check if a thread is on the timeout queue */
static inline int _is_thread_timeout_active(struct k_thread *thread)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
return thread->base.timeout.delta_ticks_from_prev != _INACTIVE;
#else
return 0;
#endif
}
static inline int _has_thread_started(struct k_thread *thread)
{
return !(thread->base.thread_state & K_PRESTART);
}
static inline int _is_thread_prevented_from_running(struct k_thread *thread)
{
return thread->base.thread_state & (K_PENDING | K_PRESTART |
K_DEAD | K_DUMMY |
K_SUSPENDED);
}
/* check if a thread is ready */
static inline int _is_thread_ready(struct k_thread *thread)
{
return !(_is_thread_prevented_from_running(thread) ||
_is_thread_timeout_active(thread));
}
/* mark a thread as pending in its TCS */
static inline void _mark_thread_as_pending(struct k_thread *thread)
{
thread->base.thread_state |= K_PENDING;
}
/* mark a thread as not pending in its TCS */
static inline void _mark_thread_as_not_pending(struct k_thread *thread)
{
thread->base.thread_state &= ~K_PENDING;
}
/* check if a thread is pending */
static inline int _is_thread_pending(struct k_thread *thread)
{
return !!(thread->base.thread_state & K_PENDING);
}
/**
* @brief Mark a thread as started
*
* This routine must be called with interrupts locked.
*/
static inline void _mark_thread_as_started(struct k_thread *thread)
{
thread->base.thread_state &= ~K_PRESTART;
}
/*
* Put the thread in the ready queue according to its priority if it is not
* blocked for another reason (eg. suspended).
*
* Must be called with interrupts locked.
*/
static inline void _ready_thread(struct k_thread *thread)
{
__ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) ||
((thread->base.prio == K_LOWEST_THREAD_PRIO) &&
(thread == _idle_thread)),
"thread %p prio too low (is %d, cannot be lower than %d)",
thread, thread->base.prio,
thread == _idle_thread ? K_LOWEST_THREAD_PRIO :
K_LOWEST_APPLICATION_THREAD_PRIO);
__ASSERT(!_is_prio_higher(thread->base.prio, K_HIGHEST_THREAD_PRIO),
"thread %p prio too high (id %d, cannot be higher than %d)",
thread, thread->base.prio, K_HIGHEST_THREAD_PRIO);
/* needed to handle the start-with-delay case */
_mark_thread_as_started(thread);
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
}
}
/**
* @brief Mark thread as dead
*
* This routine must be called with interrupts locked.
*/
static inline void _mark_thread_as_dead(struct k_thread *thread)
{
thread->base.thread_state |= K_DEAD;
}
/*
* Set a thread's priority. If the thread is ready, place it in the correct
* queue.
*/
/* must be called with interrupts locked */
static inline void _thread_priority_set(struct k_thread *thread, int prio)
{
if (_is_thread_ready(thread)) {
_remove_thread_from_ready_q(thread);
thread->base.prio = prio;
_add_thread_to_ready_q(thread);
} else {
thread->base.prio = prio;
}
}
/* check if thread is a thread pending on a particular wait queue */
static inline struct k_thread *_peek_first_pending_thread(_wait_q_t *wait_q)
{
return (struct k_thread *)sys_dlist_peek_head(wait_q);
}
static inline struct k_thread *_get_thread_to_unpend(_wait_q_t *wait_q)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
extern volatile int _handling_timeouts;
if (_handling_timeouts) {
sys_dlist_t *q = (sys_dlist_t *)wait_q;
sys_dnode_t *cur, *next;
/* skip threads that have an expired timeout */
SYS_DLIST_FOR_EACH_NODE_SAFE(q, cur, next) {
struct k_thread *thread = (struct k_thread *)cur;
if (_is_thread_timeout_expired(thread)) {
continue;
}
sys_dlist_remove(cur);
return thread;
}
return NULL;
}
#endif
return (struct k_thread *)sys_dlist_get(wait_q);
}
/* unpend the first thread from a wait queue */
/* must be called with interrupts locked */
static inline struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
{
struct k_thread *thread = _get_thread_to_unpend(wait_q);
if (thread) {
_mark_thread_as_not_pending(thread);
}
return thread;
}
/* Unpend a thread from the wait queue it is on. Thread must be pending. */
/* must be called with interrupts locked */
static inline void _unpend_thread(struct k_thread *thread)
{
__ASSERT(thread->base.thread_state & K_PENDING, "");
sys_dlist_remove(&thread->base.k_q_node);
_mark_thread_as_not_pending(thread);
}
#endif /* _ksched__h_ */