mirror of
https://github.com/zephyrproject-rtos/zephyr
synced 2025-08-05 14:54:32 +00:00
uart: Fix uart_irq_callback_user_data_set usage
Now providing the struct device * to the callback. Fixes #26923 Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
This commit is contained in:
parent
04d6d0b120
commit
701869fc48
|
@ -243,16 +243,16 @@ static int uart_mux_init(struct device *dev)
|
|||
* data from it in uart_mux_rx_work(), we push the data to GSM mux API which
|
||||
* will call proper callbacks to pass data to correct recipient.
|
||||
*/
|
||||
static void uart_mux_isr(void *user_data)
|
||||
static void uart_mux_isr(struct device *uart, void *user_data)
|
||||
{
|
||||
struct uart_mux *real_uart = user_data;
|
||||
int rx = 0;
|
||||
size_t wrote = 0;
|
||||
|
||||
/* Read all data off UART, and send to RX worker for unmuxing */
|
||||
while (uart_irq_update(real_uart->uart) &&
|
||||
uart_irq_rx_ready(real_uart->uart)) {
|
||||
rx = uart_fifo_read(real_uart->uart, real_uart->rx_buf,
|
||||
while (uart_irq_update(uart) &&
|
||||
uart_irq_rx_ready(uart)) {
|
||||
rx = uart_fifo_read(uart, real_uart->rx_buf,
|
||||
sizeof(real_uart->rx_buf));
|
||||
if (rx <= 0) {
|
||||
continue;
|
||||
|
|
|
@ -784,10 +784,9 @@ static void ppp_uart_flush(struct device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static void ppp_uart_isr(void *user_data)
|
||||
static void ppp_uart_isr(struct device *uart, void *user_data)
|
||||
{
|
||||
struct ppp_driver_context *context = user_data;
|
||||
struct device *uart = context->dev;
|
||||
int rx = 0, ret;
|
||||
|
||||
/* get all of the data off UART as fast as we can */
|
||||
|
|
|
@ -29,17 +29,14 @@ uint8_t buffer0[RING_BUF_SIZE];
|
|||
uint8_t buffer1[RING_BUF_SIZE];
|
||||
|
||||
static struct serial_data {
|
||||
struct device *dev;
|
||||
struct device *peer;
|
||||
struct serial_data *peer_data;
|
||||
struct ring_buf ringbuf;
|
||||
} peers[2];
|
||||
|
||||
static void interrupt_handler(void *user_data)
|
||||
static void interrupt_handler(struct device *dev, void *user_data)
|
||||
{
|
||||
struct serial_data *dev_data = user_data;
|
||||
struct device *dev = dev_data->dev;
|
||||
|
||||
|
||||
while (uart_irq_update(dev) && uart_irq_is_pending(dev)) {
|
||||
struct device *peer = dev_data->peer;
|
||||
|
@ -162,12 +159,10 @@ void main(void)
|
|||
uart_line_set(dev0);
|
||||
uart_line_set(dev1);
|
||||
|
||||
dev_data0->dev = dev0;
|
||||
dev_data0->peer = dev1;
|
||||
dev_data0->peer_data = dev_data1;
|
||||
ring_buf_init(&dev_data0->ringbuf, sizeof(buffer0), buffer0);
|
||||
|
||||
dev_data1->dev = dev1;
|
||||
dev_data1->peer = dev0;
|
||||
dev_data1->peer_data = dev_data0;
|
||||
ring_buf_init(&dev_data1->ringbuf, sizeof(buffer1), buffer1);
|
||||
|
|
|
@ -12,10 +12,9 @@
|
|||
static int tty_irq_input_hook(struct tty_serial *tty, uint8_t c);
|
||||
static int tty_putchar(struct tty_serial *tty, uint8_t c);
|
||||
|
||||
static void tty_uart_isr(void *user_data)
|
||||
static void tty_uart_isr(struct device *dev, void *user_data)
|
||||
{
|
||||
struct tty_serial *tty = user_data;
|
||||
struct device *dev = tty->uart_dev;
|
||||
|
||||
uart_irq_update(dev);
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ static bool is_panic_mode;
|
|||
static const uint8_t *write_buffer;
|
||||
static uint16_t write_length;
|
||||
|
||||
static void uart_rx_handle(void)
|
||||
static void uart_rx_handle(struct device *dev)
|
||||
{
|
||||
uint8_t *data;
|
||||
uint32_t len;
|
||||
|
@ -61,9 +61,7 @@ static void uart_rx_handle(void)
|
|||
ot_uart.rx_ringbuf, &data,
|
||||
ot_uart.rx_ringbuf->size);
|
||||
if (len > 0) {
|
||||
rd_len = uart_fifo_read(
|
||||
ot_uart.dev, data, len);
|
||||
|
||||
rd_len = uart_fifo_read(dev, data, len);
|
||||
if (rd_len > 0) {
|
||||
new_data = true;
|
||||
}
|
||||
|
@ -78,8 +76,7 @@ static void uart_rx_handle(void)
|
|||
/* No space in the ring buffer - consume byte. */
|
||||
LOG_WRN("RX ring buffer full.");
|
||||
|
||||
rd_len = uart_fifo_read(
|
||||
ot_uart.dev, &dummy, 1);
|
||||
rd_len = uart_fifo_read(dev, &dummy, 1);
|
||||
}
|
||||
} while (rd_len && (rd_len == len));
|
||||
|
||||
|
@ -88,36 +85,34 @@ static void uart_rx_handle(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void uart_tx_handle(void)
|
||||
static void uart_tx_handle(struct device *dev)
|
||||
{
|
||||
uint32_t len;
|
||||
|
||||
if (write_length) {
|
||||
len = uart_fifo_fill(ot_uart.dev, write_buffer,
|
||||
write_length);
|
||||
len = uart_fifo_fill(dev, write_buffer, write_length);
|
||||
write_buffer += len;
|
||||
write_length -= len;
|
||||
} else {
|
||||
uart_irq_tx_disable(ot_uart.dev);
|
||||
uart_irq_tx_disable(dev);
|
||||
ot_uart.tx_busy = 0;
|
||||
atomic_set(&(ot_uart.tx_finished), 1);
|
||||
otSysEventSignalPending();
|
||||
}
|
||||
}
|
||||
|
||||
static void uart_callback(void *user_data)
|
||||
static void uart_callback(struct device *dev, void *user_data)
|
||||
{
|
||||
ARG_UNUSED(user_data);
|
||||
|
||||
while (uart_irq_update(ot_uart.dev) &&
|
||||
uart_irq_is_pending(ot_uart.dev)) {
|
||||
while (uart_irq_update(dev) && uart_irq_is_pending(dev)) {
|
||||
|
||||
if (uart_irq_rx_ready(ot_uart.dev)) {
|
||||
uart_rx_handle();
|
||||
if (uart_irq_rx_ready(dev)) {
|
||||
uart_rx_handle(dev);
|
||||
}
|
||||
|
||||
if (uart_irq_tx_ready(ot_uart.dev)) {
|
||||
uart_tx_handle();
|
||||
if (uart_irq_tx_ready(dev)) {
|
||||
uart_tx_handle(dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -182,10 +177,9 @@ otError otPlatUartEnable(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
uart_irq_callback_user_data_set(
|
||||
ot_uart.dev,
|
||||
uart_callback,
|
||||
(void *)&ot_uart);
|
||||
uart_irq_callback_user_data_set(ot_uart.dev,
|
||||
uart_callback,
|
||||
(void *)&ot_uart);
|
||||
uart_irq_rx_enable(ot_uart.dev);
|
||||
|
||||
return OT_ERROR_NONE;
|
||||
|
|
|
@ -27,7 +27,8 @@ SHELL_DEFINE(shell_uart, CONFIG_SHELL_PROMPT_UART, &shell_transport_uart,
|
|||
SHELL_FLAG_OLF_CRLF);
|
||||
|
||||
#ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
|
||||
static void uart_rx_handle(const struct shell_uart *sh_uart)
|
||||
static void uart_rx_handle(struct device *dev,
|
||||
const struct shell_uart *sh_uart)
|
||||
{
|
||||
uint8_t *data;
|
||||
uint32_t len;
|
||||
|
@ -39,8 +40,7 @@ static void uart_rx_handle(const struct shell_uart *sh_uart)
|
|||
sh_uart->rx_ringbuf->size);
|
||||
|
||||
if (len > 0) {
|
||||
rd_len = uart_fifo_read(sh_uart->ctrl_blk->dev,
|
||||
data, len);
|
||||
rd_len = uart_fifo_read(dev, data, len);
|
||||
#ifdef CONFIG_MCUMGR_SMP_SHELL
|
||||
/* Divert bytes from shell handling if it is
|
||||
* part of an mcumgr frame.
|
||||
|
@ -76,8 +76,7 @@ static void uart_rx_handle(const struct shell_uart *sh_uart)
|
|||
/* No space in the ring buffer - consume byte. */
|
||||
LOG_WRN("RX ring buffer full.");
|
||||
|
||||
rd_len = uart_fifo_read(sh_uart->ctrl_blk->dev,
|
||||
&dummy, 1);
|
||||
rd_len = uart_fifo_read(dev, &dummy, 1);
|
||||
#ifdef CONFIG_MCUMGR_SMP_SHELL
|
||||
/* Divert this byte from shell handling if it
|
||||
* is part of an mcumgr frame.
|
||||
|
@ -93,9 +92,8 @@ static void uart_rx_handle(const struct shell_uart *sh_uart)
|
|||
}
|
||||
}
|
||||
|
||||
static void uart_tx_handle(const struct shell_uart *sh_uart)
|
||||
static void uart_tx_handle(struct device *dev, const struct shell_uart *sh_uart)
|
||||
{
|
||||
struct device *dev = sh_uart->ctrl_blk->dev;
|
||||
uint32_t len;
|
||||
int err;
|
||||
const uint8_t *data;
|
||||
|
@ -115,19 +113,18 @@ static void uart_tx_handle(const struct shell_uart *sh_uart)
|
|||
sh_uart->ctrl_blk->context);
|
||||
}
|
||||
|
||||
static void uart_callback(void *user_data)
|
||||
static void uart_callback(struct device *dev, void *user_data)
|
||||
{
|
||||
const struct shell_uart *sh_uart = (struct shell_uart *)user_data;
|
||||
struct device *dev = sh_uart->ctrl_blk->dev;
|
||||
|
||||
uart_irq_update(dev);
|
||||
|
||||
if (uart_irq_rx_ready(dev)) {
|
||||
uart_rx_handle(sh_uart);
|
||||
uart_rx_handle(dev, sh_uart);
|
||||
}
|
||||
|
||||
if (uart_irq_tx_ready(dev)) {
|
||||
uart_tx_handle(sh_uart);
|
||||
uart_tx_handle(dev, sh_uart);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN */
|
||||
|
|
Loading…
Reference in New Issue
Block a user