mirror of
https://github.com/zephyrproject-rtos/zephyr
synced 2025-09-14 00:01:57 +00:00
Making a reference to the common work queue code should not necessarily drag in the system workqueue, since it is possible to use a workqueue that is not the system workqueue. This is done by moving the system workqueue into its own code module. Moving the system workqueue to its own code module allows removing the NANO_WORKQUEUE and SYSTEM_WORKQUEUE kconfig options, and compiling the common workqueue code and system workqueue all the time. They are only linked in the final image if a reference to them exist, same as the other kernel modules. Change-Id: I6f48d2542bda24f4702e7c2e317818dd082b3c11 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
149 lines
3.2 KiB
C
149 lines
3.2 KiB
C
/*
|
|
* Copyright (c) 2016 Intel Corporation
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
*
|
|
* Workqueue support functions
|
|
*/
|
|
|
|
#include <nano_private.h>
|
|
#include <wait_q.h>
|
|
#include <errno.h>
|
|
|
|
static void work_q_main(void *work_q_ptr, void *p2, void *p3)
|
|
{
|
|
struct k_work_q *work_q = work_q_ptr;
|
|
|
|
ARG_UNUSED(p2);
|
|
ARG_UNUSED(p3);
|
|
|
|
while (1) {
|
|
struct k_work *work;
|
|
k_work_handler_t handler;
|
|
|
|
work = k_fifo_get(&work_q->fifo, K_FOREVER);
|
|
|
|
handler = work->handler;
|
|
|
|
/* Reset pending state so it can be resubmitted by handler */
|
|
if (atomic_test_and_clear_bit(work->flags,
|
|
K_WORK_STATE_PENDING)) {
|
|
handler(work);
|
|
}
|
|
|
|
/* Make sure we don't hog up the CPU if the FIFO never (or
|
|
* very rarely) gets empty.
|
|
*/
|
|
k_yield();
|
|
}
|
|
}
|
|
|
|
void k_work_q_start(struct k_work_q *work_q, char *stack,
|
|
unsigned stack_size, unsigned prio)
|
|
{
|
|
k_fifo_init(&work_q->fifo);
|
|
|
|
k_thread_spawn(stack, stack_size,
|
|
work_q_main, work_q, 0, 0,
|
|
prio, 0, 0);
|
|
}
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
static void work_timeout(struct _timeout *t)
|
|
{
|
|
struct k_delayed_work *w = CONTAINER_OF(t, struct k_delayed_work,
|
|
timeout);
|
|
|
|
/* submit work to workqueue */
|
|
k_work_submit_to_queue(w->work_q, &w->work);
|
|
}
|
|
|
|
void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
|
|
{
|
|
k_work_init(&work->work, handler);
|
|
_init_timeout(&work->timeout, work_timeout);
|
|
work->work_q = NULL;
|
|
}
|
|
|
|
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
|
|
struct k_delayed_work *work,
|
|
int32_t delay)
|
|
{
|
|
int key = irq_lock();
|
|
int err;
|
|
|
|
/* Work cannot be active in multiple queues */
|
|
if (work->work_q && work->work_q != work_q) {
|
|
err = -EADDRINUSE;
|
|
goto done;
|
|
}
|
|
|
|
/* Cancel if work has been submitted */
|
|
if (work->work_q == work_q) {
|
|
err = k_delayed_work_cancel(work);
|
|
if (err < 0) {
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
/* Attach workqueue so the timeout callback can submit it */
|
|
work->work_q = work_q;
|
|
|
|
if (!delay) {
|
|
/* Submit work if no ticks is 0 */
|
|
k_work_submit_to_queue(work_q, &work->work);
|
|
} else {
|
|
/* Add timeout */
|
|
_add_timeout(NULL, &work->timeout, NULL,
|
|
_TICK_ALIGN + _ms_to_ticks(delay));
|
|
}
|
|
|
|
err = 0;
|
|
|
|
done:
|
|
irq_unlock(key);
|
|
|
|
return err;
|
|
}
|
|
|
|
int k_delayed_work_cancel(struct k_delayed_work *work)
|
|
{
|
|
int key = irq_lock();
|
|
|
|
if (k_work_pending(&work->work)) {
|
|
irq_unlock(key);
|
|
return -EINPROGRESS;
|
|
}
|
|
|
|
if (!work->work_q) {
|
|
irq_unlock(key);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Abort timeout, if it has expired this will do nothing */
|
|
_abort_timeout(&work->timeout);
|
|
|
|
/* Detach from workqueue */
|
|
work->work_q = NULL;
|
|
|
|
irq_unlock(key);
|
|
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_SYS_CLOCK_EXISTS */
|