zephyr/kernel/work_q.c
Andrew Boie 507852a4ad kernel: introduce opaque data type for stacks
Historically, stacks were just character buffers and could be treated
as such if the user wanted to look inside the stack data, and also
declared as an array of the desired stack size.

This is no longer the case. Certain architectures will create a memory
region much larger to account for MPU/MMU guard pages. Unfortunately,
the kernel interfaces treat both the declared stack, and the valid
stack buffer within it as the same char * data type, even though these
absolutely cannot be used interchangeably.

We introduce an opaque k_thread_stack_t which gets instantiated by
K_THREAD_STACK_DECLARE(), this is no longer treated by the compiler
as a character pointer, even though it really is.

To access the real stack buffer within, the result of
K_THREAD_STACK_BUFFER() can be used, which will return a char * type.

This should catch a bunch of programming mistakes at build time:

- Declaring a character array outside of K_THREAD_STACK_DECLARE() and
  passing it to K_THREAD_CREATE
- Directly examining the stack created by K_THREAD_STACK_DECLARE()
  which is not actually the memory desired and may trigger a CPU
  exception

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-01 16:43:15 -07:00

140 lines
2.7 KiB
C

/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* Workqueue support functions
*/
#include <kernel_structs.h>
#include <wait_q.h>
#include <errno.h>
static void work_q_main(void *work_q_ptr, void *p2, void *p3)
{
struct k_work_q *work_q = work_q_ptr;
ARG_UNUSED(p2);
ARG_UNUSED(p3);
while (1) {
struct k_work *work;
k_work_handler_t handler;
work = k_fifo_get(&work_q->fifo, K_FOREVER);
handler = work->handler;
/* Reset pending state so it can be resubmitted by handler */
if (atomic_test_and_clear_bit(work->flags,
K_WORK_STATE_PENDING)) {
handler(work);
}
/* Make sure we don't hog up the CPU if the FIFO never (or
* very rarely) gets empty.
*/
k_yield();
}
}
void k_work_q_start(struct k_work_q *work_q, k_thread_stack_t stack,
size_t stack_size, int prio)
{
k_fifo_init(&work_q->fifo);
k_thread_create(&work_q->thread, stack, stack_size, work_q_main,
work_q, 0, 0, prio, 0, 0);
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
static void work_timeout(struct _timeout *t)
{
struct k_delayed_work *w = CONTAINER_OF(t, struct k_delayed_work,
timeout);
/* submit work to workqueue */
k_work_submit_to_queue(w->work_q, &w->work);
/* detach from workqueue, for cancel to return appropriate status */
w->work_q = NULL;
}
void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
{
k_work_init(&work->work, handler);
_init_timeout(&work->timeout, work_timeout);
work->work_q = NULL;
}
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
s32_t delay)
{
int key = irq_lock();
int err;
/* Work cannot be active in multiple queues */
if (work->work_q && work->work_q != work_q) {
err = -EADDRINUSE;
goto done;
}
/* Cancel if work has been submitted */
if (work->work_q == work_q) {
err = k_delayed_work_cancel(work);
if (err < 0) {
goto done;
}
}
/* Attach workqueue so the timeout callback can submit it */
work->work_q = work_q;
if (!delay) {
/* Submit work if no ticks is 0 */
k_work_submit_to_queue(work_q, &work->work);
} else {
/* Add timeout */
_add_timeout(NULL, &work->timeout, NULL,
_TICK_ALIGN + _ms_to_ticks(delay));
}
err = 0;
done:
irq_unlock(key);
return err;
}
int k_delayed_work_cancel(struct k_delayed_work *work)
{
int key = irq_lock();
if (k_work_pending(&work->work)) {
irq_unlock(key);
return -EINPROGRESS;
}
if (!work->work_q) {
irq_unlock(key);
return -EINVAL;
}
/* Abort timeout, if it has expired this will do nothing */
_abort_timeout(&work->timeout);
/* Detach from workqueue */
work->work_q = NULL;
irq_unlock(key);
return 0;
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */