mirror of
https://github.com/zephyrproject-rtos/zephyr
synced 2025-09-02 10:12:51 +00:00
Update reserved function names starting with one underscore, replacing them as follows: '_k_' with 'z_' '_K_' with 'Z_' '_handler_' with 'z_handl_' '_Cstart' with 'z_cstart' '_Swap' with 'z_swap' This renaming is done on both global and those static function names in kernel/include and include/. Other static function names in kernel/ are renamed by removing the leading underscore. Other function names not starting with any prefix listed above are renamed starting with a 'z_' or 'Z_' prefix. Function names starting with two or three leading underscores are not automatcally renamed since these names will collide with the variants with two or three leading underscores. Various generator scripts have also been updated as well as perf, linker and usb files. These are drivers/serial/uart_handlers.c include/linker/kobject-text.ld kernel/include/syscall_handler.h scripts/gen_kobject_list.py scripts/gen_syscall_header.py Signed-off-by: Patrik Flykt <patrik.flykt@intel.com>
177 lines
3.7 KiB
C
177 lines
3.7 KiB
C
/*
|
|
* Copyright (c) 2010-2016 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/**
|
|
* @brief fixed-size stack object
|
|
*/
|
|
|
|
#include <kernel.h>
|
|
#include <kernel_structs.h>
|
|
#include <debug/object_tracing_common.h>
|
|
#include <toolchain.h>
|
|
#include <linker/sections.h>
|
|
#include <ksched.h>
|
|
#include <wait_q.h>
|
|
#include <misc/__assert.h>
|
|
#include <init.h>
|
|
#include <syscall_handler.h>
|
|
#include <kernel_internal.h>
|
|
|
|
extern struct k_stack _k_stack_list_start[];
|
|
extern struct k_stack _k_stack_list_end[];
|
|
|
|
#ifdef CONFIG_OBJECT_TRACING
|
|
|
|
struct k_stack *_trace_list_k_stack;
|
|
|
|
/*
|
|
* Complete initialization of statically defined stacks.
|
|
*/
|
|
static int init_stack_module(struct device *dev)
|
|
{
|
|
ARG_UNUSED(dev);
|
|
|
|
struct k_stack *stack;
|
|
|
|
for (stack = _k_stack_list_start; stack < _k_stack_list_end; stack++) {
|
|
SYS_TRACING_OBJ_INIT(k_stack, stack);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
SYS_INIT(init_stack_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|
|
|
#endif /* CONFIG_OBJECT_TRACING */
|
|
|
|
void k_stack_init(struct k_stack *stack, u32_t *buffer,
|
|
u32_t num_entries)
|
|
{
|
|
z_waitq_init(&stack->wait_q);
|
|
stack->lock = (struct k_spinlock) {};
|
|
stack->next = stack->base = buffer;
|
|
stack->top = stack->base + num_entries;
|
|
|
|
SYS_TRACING_OBJ_INIT(k_stack, stack);
|
|
z_object_init(stack);
|
|
}
|
|
|
|
s32_t z_impl_k_stack_alloc_init(struct k_stack *stack, u32_t num_entries)
|
|
{
|
|
void *buffer;
|
|
s32_t ret;
|
|
|
|
buffer = z_thread_malloc(num_entries);
|
|
if (buffer != NULL) {
|
|
k_stack_init(stack, buffer, num_entries);
|
|
stack->flags = K_STACK_FLAG_ALLOC;
|
|
ret = (s32_t)0;
|
|
} else {
|
|
ret = -ENOMEM;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
Z_SYSCALL_HANDLER(k_stack_alloc_init, stack, num_entries)
|
|
{
|
|
Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_STACK));
|
|
Z_OOPS(Z_SYSCALL_VERIFY(num_entries > 0));
|
|
|
|
return z_impl_k_stack_alloc_init((struct k_stack *)stack, num_entries);
|
|
}
|
|
#endif
|
|
|
|
void k_stack_cleanup(struct k_stack *stack)
|
|
{
|
|
__ASSERT_NO_MSG(z_waitq_head(&stack->wait_q) == NULL);
|
|
|
|
if ((stack->flags & K_STACK_FLAG_ALLOC) != (u8_t)0) {
|
|
k_free(stack->base);
|
|
stack->base = NULL;
|
|
stack->flags &= ~K_STACK_FLAG_ALLOC;
|
|
}
|
|
}
|
|
|
|
void z_impl_k_stack_push(struct k_stack *stack, u32_t data)
|
|
{
|
|
struct k_thread *first_pending_thread;
|
|
k_spinlock_key_t key;
|
|
|
|
__ASSERT(stack->next != stack->top, "stack is full");
|
|
|
|
key = k_spin_lock(&stack->lock);
|
|
|
|
first_pending_thread = z_unpend_first_thread(&stack->wait_q);
|
|
|
|
if (first_pending_thread != NULL) {
|
|
z_ready_thread(first_pending_thread);
|
|
|
|
z_set_thread_return_value_with_data(first_pending_thread,
|
|
0, (void *)data);
|
|
z_reschedule(&stack->lock, key);
|
|
return;
|
|
} else {
|
|
*(stack->next) = data;
|
|
stack->next++;
|
|
k_spin_unlock(&stack->lock, key);
|
|
}
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
Z_SYSCALL_HANDLER(k_stack_push, stack_p, data)
|
|
{
|
|
struct k_stack *stack = (struct k_stack *)stack_p;
|
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(stack, K_OBJ_STACK));
|
|
Z_OOPS(Z_SYSCALL_VERIFY_MSG(stack->next != stack->top,
|
|
"stack is full"));
|
|
|
|
z_impl_k_stack_push(stack, data);
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
int z_impl_k_stack_pop(struct k_stack *stack, u32_t *data, s32_t timeout)
|
|
{
|
|
k_spinlock_key_t key;
|
|
int result;
|
|
|
|
key = k_spin_lock(&stack->lock);
|
|
|
|
if (likely(stack->next > stack->base)) {
|
|
stack->next--;
|
|
*data = *(stack->next);
|
|
k_spin_unlock(&stack->lock, key);
|
|
return 0;
|
|
}
|
|
|
|
if (timeout == K_NO_WAIT) {
|
|
k_spin_unlock(&stack->lock, key);
|
|
return -EBUSY;
|
|
}
|
|
|
|
result = z_pend_curr(&stack->lock, key, &stack->wait_q, timeout);
|
|
if (result == -EAGAIN) {
|
|
return -EAGAIN;
|
|
}
|
|
|
|
*data = (u32_t)_current->base.swap_data;
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
Z_SYSCALL_HANDLER(k_stack_pop, stack, data, timeout)
|
|
{
|
|
Z_OOPS(Z_SYSCALL_OBJ(stack, K_OBJ_STACK));
|
|
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, sizeof(u32_t)));
|
|
|
|
return z_impl_k_stack_pop((struct k_stack *)stack, (u32_t *)data,
|
|
timeout);
|
|
}
|
|
#endif
|