mirror of
https://github.com/zephyrproject-rtos/zephyr
synced 2025-09-08 23:53:00 +00:00
Update reserved function names starting with one underscore, replacing them as follows: '_k_' with 'z_' '_K_' with 'Z_' '_handler_' with 'z_handl_' '_Cstart' with 'z_cstart' '_Swap' with 'z_swap' This renaming is done on both global and those static function names in kernel/include and include/. Other static function names in kernel/ are renamed by removing the leading underscore. Other function names not starting with any prefix listed above are renamed starting with a 'z_' or 'Z_' prefix. Function names starting with two or three leading underscores are not automatcally renamed since these names will collide with the variants with two or three leading underscores. Various generator scripts have also been updated as well as perf, linker and usb files. These are drivers/serial/uart_handlers.c include/linker/kobject-text.ld kernel/include/syscall_handler.h scripts/gen_kobject_list.py scripts/gen_syscall_header.py Signed-off-by: Patrik Flykt <patrik.flykt@intel.com>
117 lines
2.3 KiB
C
117 lines
2.3 KiB
C
/*
|
|
* Copyright (c) 2018 Intel corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#include <kernel.h>
|
|
#include <kernel_structs.h>
|
|
#include <spinlock.h>
|
|
#include <kswap.h>
|
|
#include <kernel_internal.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
static atomic_t global_lock;
|
|
|
|
unsigned int z_smp_global_lock(void)
|
|
{
|
|
unsigned int key = z_arch_irq_lock();
|
|
|
|
if (!_current->base.global_lock_count) {
|
|
while (!atomic_cas(&global_lock, 0, 1)) {
|
|
}
|
|
}
|
|
|
|
_current->base.global_lock_count++;
|
|
|
|
return key;
|
|
}
|
|
|
|
void z_smp_global_unlock(unsigned int key)
|
|
{
|
|
if (_current->base.global_lock_count) {
|
|
_current->base.global_lock_count--;
|
|
|
|
if (!_current->base.global_lock_count) {
|
|
atomic_clear(&global_lock);
|
|
}
|
|
}
|
|
|
|
z_arch_irq_unlock(key);
|
|
}
|
|
|
|
void z_smp_reacquire_global_lock(struct k_thread *thread)
|
|
{
|
|
if (thread->base.global_lock_count) {
|
|
z_arch_irq_lock();
|
|
|
|
while (!atomic_cas(&global_lock, 0, 1)) {
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
/* Called from within z_swap(), so assumes lock already held */
|
|
void z_smp_release_global_lock(struct k_thread *thread)
|
|
{
|
|
if (!thread->base.global_lock_count) {
|
|
atomic_clear(&global_lock);
|
|
}
|
|
}
|
|
|
|
#endif
|
|
|
|
extern k_thread_stack_t _interrupt_stack1[];
|
|
extern k_thread_stack_t _interrupt_stack2[];
|
|
extern k_thread_stack_t _interrupt_stack3[];
|
|
|
|
#ifdef CONFIG_SMP
|
|
static void smp_init_top(int key, void *arg)
|
|
{
|
|
atomic_t *start_flag = arg;
|
|
|
|
/* Wait for the signal to begin scheduling */
|
|
do {
|
|
k_busy_wait(100);
|
|
} while (!atomic_get(start_flag));
|
|
|
|
/* Switch out of a dummy thread. Trick cribbed from the main
|
|
* thread init. Should probably unify implementations.
|
|
*/
|
|
struct k_thread dummy_thread = {
|
|
.base.user_options = K_ESSENTIAL,
|
|
.base.thread_state = _THREAD_DUMMY,
|
|
};
|
|
|
|
z_arch_curr_cpu()->current = &dummy_thread;
|
|
smp_timer_init();
|
|
z_swap_unlocked();
|
|
|
|
CODE_UNREACHABLE;
|
|
}
|
|
#endif
|
|
|
|
void smp_init(void)
|
|
{
|
|
atomic_t start_flag;
|
|
|
|
(void)atomic_clear(&start_flag);
|
|
|
|
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
|
|
z_arch_start_cpu(1, _interrupt_stack1, CONFIG_ISR_STACK_SIZE,
|
|
smp_init_top, &start_flag);
|
|
#endif
|
|
|
|
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
|
|
z_arch_start_cpu(2, _interrupt_stack2, CONFIG_ISR_STACK_SIZE,
|
|
smp_init_top, &start_flag);
|
|
#endif
|
|
|
|
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
|
|
z_arch_start_cpu(3, _interrupt_stack3, CONFIG_ISR_STACK_SIZE,
|
|
smp_init_top, &start_flag);
|
|
#endif
|
|
|
|
(void)atomic_set(&start_flag, 1);
|
|
}
|