mirror of
https://github.com/zephyrproject-rtos/zephyr
synced 2025-08-18 15:45:22 +00:00
Convert code to use u{8,16,32,64}_t and s{8,16,32,64}_t instead of C99 integer types. This handles the remaining includes and kernel, plus touching up various points that we skipped because of include dependancies. We also convert the PRI printf formatters in the arch code over to normal formatters. Jira: ZEP-2051 Change-Id: Iecbb12601a3ee4ea936fd7ddea37788a645b08b0 Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
141 lines
2.7 KiB
C
141 lines
2.7 KiB
C
/*
|
|
* Copyright (c) 2016 Intel Corporation
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
*
|
|
* Workqueue support functions
|
|
*/
|
|
|
|
#include <kernel_structs.h>
|
|
#include <wait_q.h>
|
|
#include <errno.h>
|
|
|
|
static void work_q_main(void *work_q_ptr, void *p2, void *p3)
|
|
{
|
|
struct k_work_q *work_q = work_q_ptr;
|
|
|
|
ARG_UNUSED(p2);
|
|
ARG_UNUSED(p3);
|
|
|
|
while (1) {
|
|
struct k_work *work;
|
|
k_work_handler_t handler;
|
|
|
|
work = k_fifo_get(&work_q->fifo, K_FOREVER);
|
|
|
|
handler = work->handler;
|
|
|
|
/* Reset pending state so it can be resubmitted by handler */
|
|
if (atomic_test_and_clear_bit(work->flags,
|
|
K_WORK_STATE_PENDING)) {
|
|
handler(work);
|
|
}
|
|
|
|
/* Make sure we don't hog up the CPU if the FIFO never (or
|
|
* very rarely) gets empty.
|
|
*/
|
|
k_yield();
|
|
}
|
|
}
|
|
|
|
void k_work_q_start(struct k_work_q *work_q, char *stack,
|
|
size_t stack_size, int prio)
|
|
{
|
|
k_fifo_init(&work_q->fifo);
|
|
|
|
k_thread_spawn(stack, stack_size,
|
|
work_q_main, work_q, 0, 0,
|
|
prio, 0, 0);
|
|
}
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
static void work_timeout(struct _timeout *t)
|
|
{
|
|
struct k_delayed_work *w = CONTAINER_OF(t, struct k_delayed_work,
|
|
timeout);
|
|
|
|
/* submit work to workqueue */
|
|
k_work_submit_to_queue(w->work_q, &w->work);
|
|
/* detach from workqueue, for cancel to return appropriate status */
|
|
w->work_q = NULL;
|
|
}
|
|
|
|
void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
|
|
{
|
|
k_work_init(&work->work, handler);
|
|
_init_timeout(&work->timeout, work_timeout);
|
|
work->work_q = NULL;
|
|
}
|
|
|
|
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
|
|
struct k_delayed_work *work,
|
|
s32_t delay)
|
|
{
|
|
int key = irq_lock();
|
|
int err;
|
|
|
|
/* Work cannot be active in multiple queues */
|
|
if (work->work_q && work->work_q != work_q) {
|
|
err = -EADDRINUSE;
|
|
goto done;
|
|
}
|
|
|
|
/* Cancel if work has been submitted */
|
|
if (work->work_q == work_q) {
|
|
err = k_delayed_work_cancel(work);
|
|
if (err < 0) {
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
/* Attach workqueue so the timeout callback can submit it */
|
|
work->work_q = work_q;
|
|
|
|
if (!delay) {
|
|
/* Submit work if no ticks is 0 */
|
|
k_work_submit_to_queue(work_q, &work->work);
|
|
} else {
|
|
/* Add timeout */
|
|
_add_timeout(NULL, &work->timeout, NULL,
|
|
_TICK_ALIGN + _ms_to_ticks(delay));
|
|
}
|
|
|
|
err = 0;
|
|
|
|
done:
|
|
irq_unlock(key);
|
|
|
|
return err;
|
|
}
|
|
|
|
int k_delayed_work_cancel(struct k_delayed_work *work)
|
|
{
|
|
int key = irq_lock();
|
|
|
|
if (k_work_pending(&work->work)) {
|
|
irq_unlock(key);
|
|
return -EINPROGRESS;
|
|
}
|
|
|
|
if (!work->work_q) {
|
|
irq_unlock(key);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Abort timeout, if it has expired this will do nothing */
|
|
_abort_timeout(&work->timeout);
|
|
|
|
/* Detach from workqueue */
|
|
work->work_q = NULL;
|
|
|
|
irq_unlock(key);
|
|
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_SYS_CLOCK_EXISTS */
|