mirror of
https://github.com/zephyrproject-rtos/zephyr
synced 2025-09-04 09:01:57 +00:00
The scheduler needs a few tweaks to work in SMP mode: 1. The "cache" field just doesn't work. With more than one CPU, caching the highest priority thread isn't useful as you may need N of them at any given time before another thread is returned to the scheduler. You could recalculate it at every change, but that provides no performance benefit. Remove. 2. The "bitmask" designed to prevent the need to individually check priorities is likewise dropped. This could work, but in fact on our only current SMP system and with current K_NUM_PRIOPRITIES values it provides no real benefit. 3. The individual threads now have a "current cpu" and "active" flag so that the choice of the next thread to run can correctly skip threads that are active on other CPUs. The upshot is that a decent amount of code gets #if'd out, and the new SMP implementations for _get_highest_ready_prio() and _get_next_ready_thread() are simpler and smaller, at the expense of having to drop older optimizations. Note that scheduler synchronization is unchanged: all scheduler APIs used to require that an irq_lock() be held, which means that they now require the global spinlock via the same API. This should be a very early candidate for lock granularity attention! Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
88 lines
2.1 KiB
C
88 lines
2.1 KiB
C
/*
|
|
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
#include <device.h>
|
|
|
|
#ifndef _kernel_offsets__h_
|
|
#define _kernel_offsets__h_
|
|
|
|
#include <syscall_list.h>
|
|
|
|
/*
|
|
* The final link step uses the symbol _OffsetAbsSyms to force the linkage of
|
|
* offsets.o into the ELF image.
|
|
*/
|
|
|
|
GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)
|
|
|
|
#ifndef CONFIG_SMP
|
|
GEN_OFFSET_SYM(_kernel_t, current);
|
|
GEN_OFFSET_SYM(_kernel_t, nested);
|
|
GEN_OFFSET_SYM(_kernel_t, irq_stack);
|
|
#endif
|
|
|
|
GEN_OFFSET_SYM(_cpu_t, current);
|
|
GEN_OFFSET_SYM(_cpu_t, nested);
|
|
GEN_OFFSET_SYM(_cpu_t, irq_stack);
|
|
|
|
#if defined(CONFIG_THREAD_MONITOR)
|
|
GEN_OFFSET_SYM(_kernel_t, threads);
|
|
#endif
|
|
|
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
|
GEN_OFFSET_SYM(_kernel_t, idle);
|
|
#endif
|
|
|
|
GEN_OFFSET_SYM(_kernel_t, ready_q);
|
|
GEN_OFFSET_SYM(_kernel_t, arch);
|
|
|
|
#ifndef CONFIG_SMP
|
|
GEN_OFFSET_SYM(_ready_q_t, cache);
|
|
#endif
|
|
|
|
#ifdef CONFIG_FP_SHARING
|
|
GEN_OFFSET_SYM(_kernel_t, current_fp);
|
|
#endif
|
|
|
|
GEN_ABSOLUTE_SYM(_STRUCT_KERNEL_SIZE, sizeof(struct _kernel));
|
|
|
|
GEN_OFFSET_SYM(_thread_base_t, user_options);
|
|
GEN_OFFSET_SYM(_thread_base_t, thread_state);
|
|
GEN_OFFSET_SYM(_thread_base_t, prio);
|
|
GEN_OFFSET_SYM(_thread_base_t, sched_locked);
|
|
GEN_OFFSET_SYM(_thread_base_t, preempt);
|
|
GEN_OFFSET_SYM(_thread_base_t, swap_data);
|
|
|
|
GEN_OFFSET_SYM(_thread_t, base);
|
|
GEN_OFFSET_SYM(_thread_t, caller_saved);
|
|
GEN_OFFSET_SYM(_thread_t, callee_saved);
|
|
GEN_OFFSET_SYM(_thread_t, arch);
|
|
|
|
#ifdef CONFIG_THREAD_STACK_INFO
|
|
GEN_OFFSET_SYM(_thread_stack_info_t, start);
|
|
GEN_OFFSET_SYM(_thread_stack_info_t, size);
|
|
|
|
GEN_OFFSET_SYM(_thread_t, stack_info);
|
|
#endif
|
|
|
|
#if defined(CONFIG_THREAD_MONITOR)
|
|
GEN_OFFSET_SYM(_thread_t, next_thread);
|
|
#endif
|
|
|
|
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
|
GEN_OFFSET_SYM(_thread_t, custom_data);
|
|
#endif
|
|
|
|
GEN_ABSOLUTE_SYM(K_THREAD_SIZEOF, sizeof(struct k_thread));
|
|
|
|
/* size of the device structure. Used by linker scripts */
|
|
GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_SIZE, sizeof(struct device));
|
|
|
|
/* Access to enum values in asm code */
|
|
GEN_ABSOLUTE_SYM(_SYSCALL_LIMIT, K_SYSCALL_LIMIT);
|
|
GEN_ABSOLUTE_SYM(_SYSCALL_BAD, K_SYSCALL_BAD);
|
|
|
|
#endif /* _kernel_offsets__h_ */
|