zephyr/include/sys/atomic_c.h
Andy Ross 820c94e5dd arch/xtensa: Inline atomics
The xtensa atomics layer was written with hand-coded assembly that had
to be called as functions.  That's needlessly slow, given that the low
level primitives are a two-instruction sequence.  Ideally the compiler
should see this as an inline to permit it to better optimize around
the needed barriers.

There was also a bug with the atomic_cas function, which had a loop
internally instead of returning the old value synchronously on a
failed swap.  That's benign right now because our existing spin lock
does nothing but retry it in a tight loop anyway, but it's incorrect
per spec and would have caused a contention hang with more elaborate
algorithms (for example a spinlock with backoff semantics).

Remove the old implementation and replace with a much smaller inline C
one based on just two assembly primitives.

This patch also contains a little bit of refactoring to address the
scheme has been split out into a separate header for each, and the
ATOMIC_OPERATIONS_CUSTOM kconfig has been renamed to
ATOMIC_OPERATIONS_ARCH to better capture what it means.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2021-03-08 11:14:27 -05:00

79 lines
1.8 KiB
C

/*
* Copyright (c) 1997-2015, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_SYS_ATOMIC_C_H_
#define ZEPHYR_INCLUDE_SYS_ATOMIC_C_H_
/* Included from <atomic.h> */
#ifdef __cplusplus
extern "C" {
#endif
/* Simple and correct (but very slow) implementation of atomic
* primitives that require nothing more than kernel interrupt locking.
*/
__syscall bool atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
__syscall bool atomic_ptr_cas(atomic_ptr_t *target, void *old_value,
void *new_value);
__syscall atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
__syscall atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
static inline atomic_val_t atomic_inc(atomic_t *target)
{
return atomic_add(target, 1);
}
static inline atomic_val_t atomic_dec(atomic_t *target)
{
return atomic_sub(target, 1);
}
extern atomic_val_t atomic_get(const atomic_t *target);
extern void *atomic_ptr_get(const atomic_ptr_t *target);
__syscall atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
__syscall void *atomic_ptr_set(atomic_ptr_t *target, void *value);
static inline atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
static inline void *atomic_ptr_clear(atomic_ptr_t *target)
{
return atomic_ptr_set(target, NULL);
}
__syscall atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
__syscall atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
__syscall atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
__syscall atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
#ifdef __cplusplus
}
#endif
#ifdef CONFIG_ATOMIC_OPERATIONS_C
#include <syscalls/atomic_c.h>
#endif
#endif /* ZEPHYR_INCLUDE_SYS_ATOMIC_C_H_ */