mirror of
https://github.com/zephyrproject-rtos/zephyr
synced 2025-09-03 17:32:49 +00:00
Instead of checking every time we hit the low-level context switch path to see if the new thread has a "partner" with which it needs to share time, just run the slice timer always and reset it from the scheduler at the points where it has already decided a switch needs to happen. In TICKLESS_KERNEL situations, we pay the cost of extra timer interrupts at ~10Hz or whatever, which is low (note also that this kind of regular wakeup architecture is required on SMP anyway so the scheduler can "notice" threads scheduled by other CPUs). Advantages: 1. Much simpler logic. Significantly smaller code. No variance or dependence on tickless modes or timer driver (beyond setting a simple timeout). 2. No arch-specific assembly integration with _Swap() needed 3. Better performance on many workloads, as the accounting now happens at most once per timer interrupt (~5 Hz) and true rescheduling and not on every unrelated context switch and interrupt return. 4. It's SMP-safe. The previous scheme kept the slice ticks as a global variable, which was an unnoticed bug. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
101 lines
2.3 KiB
ArmAsm
101 lines
2.3 KiB
ArmAsm
/*
|
|
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
* @brief ARM Cortex-M exception/interrupt exit API
|
|
*
|
|
*
|
|
* Provides functions for performing kernel handling when exiting exceptions or
|
|
* interrupts that are installed directly in the vector table (i.e. that are not
|
|
* wrapped around by _isr_wrapper()).
|
|
*/
|
|
|
|
#include <kernel_structs.h>
|
|
#include <offsets_short.h>
|
|
#include <toolchain.h>
|
|
#include <arch/cpu.h>
|
|
|
|
_ASM_FILE_PROLOGUE
|
|
|
|
GTEXT(_ExcExit)
|
|
GTEXT(_IntExit)
|
|
GDATA(_kernel)
|
|
|
|
/**
|
|
*
|
|
* @brief Kernel housekeeping when exiting interrupt handler installed
|
|
* directly in vector table
|
|
*
|
|
* Kernel allows installing interrupt handlers (ISRs) directly into the vector
|
|
* table to get the lowest interrupt latency possible. This allows the ISR to be
|
|
* invoked directly without going through a software interrupt table. However,
|
|
* upon exiting the ISR, some kernel work must still be performed, namely
|
|
* possible context switching. While ISRs connected in the software interrupt
|
|
* table do this automatically via a wrapper, ISRs connected directly in the
|
|
* vector table must invoke _IntExit() as the *very last* action before
|
|
* returning.
|
|
*
|
|
* e.g.
|
|
*
|
|
* void myISR(void)
|
|
* {
|
|
* printk("in %s\n", __FUNCTION__);
|
|
* doStuff();
|
|
* _IntExit();
|
|
* }
|
|
*
|
|
* @return N/A
|
|
*/
|
|
|
|
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
|
|
|
|
/* _IntExit falls through to _ExcExit (they are aliases of each other) */
|
|
|
|
/**
|
|
*
|
|
* @brief Kernel housekeeping when exiting exception handler installed
|
|
* directly in vector table
|
|
*
|
|
* See _IntExit().
|
|
*
|
|
* @return N/A
|
|
*/
|
|
|
|
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
|
|
|
#ifdef CONFIG_PREEMPT_ENABLED
|
|
ldr r0, =_kernel
|
|
|
|
ldr r1, [r0, #_kernel_offset_to_current]
|
|
|
|
ldr r0, [r0, _kernel_offset_to_ready_q_cache]
|
|
cmp r0, r1
|
|
beq _EXIT_EXC
|
|
|
|
/* context switch required, pend the PendSV exception */
|
|
ldr r1, =_SCS_ICSR
|
|
ldr r2, =_SCS_ICSR_PENDSV
|
|
str r2, [r1]
|
|
|
|
_ExcExitWithGdbStub:
|
|
|
|
_EXIT_EXC:
|
|
#endif /* CONFIG_PREEMPT_ENABLED */
|
|
|
|
#ifdef CONFIG_STACK_SENTINEL
|
|
push {lr}
|
|
bl _check_stack_sentinel
|
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
|
pop {r0}
|
|
mov lr, r0
|
|
#else
|
|
pop {lr}
|
|
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
|
#endif /* CONFIG_STACK_SENTINEL */
|
|
|
|
bx lr
|