Zephyr API Documentation 4.1.99
A Scalable Open Source RTOS
 4.1.99
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
arch.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Contributors: 2018 Antmicro <www.antmicro.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
17#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
18#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
19
26#if defined(CONFIG_USERSPACE)
28#endif /* CONFIG_USERSPACE */
29#include <zephyr/irq.h>
30#include <zephyr/sw_isr_table.h>
31#include <zephyr/devicetree.h>
34
35/* stacks, for RISCV architecture stack should be 16byte-aligned */
36#define ARCH_STACK_PTR_ALIGN 16
37
38#define Z_RISCV_STACK_PMP_ALIGN \
39 MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
40
41#ifdef CONFIG_PMP_STACK_GUARD
42/*
43 * The StackGuard is an area at the bottom of the kernel-mode stack made to
44 * fault when accessed. It is _not_ faulting when in exception mode as we rely
45 * on that area to save the exception stack frame and to process said fault.
46 * Therefore the guard area must be large enough to hold the esf, plus some
47 * configurable stack wiggle room to execute the fault handling code off of,
48 * as well as some guard size to cover possible sudden stack pointer
49 * displacement before the fault.
50 */
51#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
52#define Z_RISCV_STACK_GUARD_SIZE \
53 Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
54 Z_RISCV_STACK_PMP_ALIGN))
55#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
56#else
57#define Z_RISCV_STACK_GUARD_SIZE \
58 ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
59 Z_RISCV_STACK_PMP_ALIGN)
60#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
61#endif
62
63/* Kernel-only stacks have the following layout if a stack guard is enabled:
64 *
65 * +------------+ <- thread.stack_obj
66 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
67 * +------------+ <- thread.stack_info.start
68 * | Kernel |
69 * | stack |
70 * | |
71 * +............|
72 * | TLS | } thread.stack_info.delta
73 * +------------+ <- thread.stack_info.start + thread.stack_info.size
74 */
75#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
76
77#else /* !CONFIG_PMP_STACK_GUARD */
78#define Z_RISCV_STACK_GUARD_SIZE 0
79#endif
80
81#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
82/* The privilege elevation stack is located in another area of memory
83 * generated at build time by gen_kobject_list.py
84 *
85 * +------------+ <- thread.arch.priv_stack_start
86 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
87 * +------------+
88 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
89 * +------------+ <- thread.arch.priv_stack_start +
90 * CONFIG_PRIVILEGED_STACK_SIZE +
91 * Z_RISCV_STACK_GUARD_SIZE
92 *
93 * The main stack will be initially (or potentially only) used by kernel
94 * mode so we need to make room for a possible stack guard area when enabled:
95 *
96 * +------------+ <- thread.stack_obj
97 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
98 * +............| <- thread.stack_info.start
99 * | Thread |
100 * | stack |
101 * | |
102 * +............|
103 * | TLS | } thread.stack_info.delta
104 * +------------+ <- thread.stack_info.start + thread.stack_info.size
105 *
106 * When transitioning to user space, the guard area will be removed from
107 * the main stack. Any thread running in user mode will have full access
108 * to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
109 *
110 * +------------+ <- thread.stack_obj = thread.stack_info.start
111 * | Thread |
112 * | stack |
113 * | |
114 * +............|
115 * | TLS | } thread.stack_info.delta
116 * +------------+ <- thread.stack_info.start + thread.stack_info.size
117 */
118#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
119#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
120 Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
121 Z_RISCV_STACK_PMP_ALIGN))
122#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
123 ARCH_THREAD_STACK_SIZE_ADJUST(size)
124
125#else /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
126
127/* The stack object will contain the PMP guard, the privilege stack, and then
128 * the usermode stack buffer in that order:
129 *
130 * +------------+ <- thread.stack_obj
131 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
132 * +------------+
133 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
134 * +------------+ <- thread.stack_info.start
135 * | Thread |
136 * | stack |
137 * | |
138 * +............|
139 * | TLS | } thread.stack_info.delta
140 * +------------+ <- thread.stack_info.start + thread.stack_info.size
141 */
142#define ARCH_THREAD_STACK_RESERVED \
143 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
144 Z_RISCV_STACK_PMP_ALIGN)
145#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
146 ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
147#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_STACK_PMP_ALIGN
148#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
149
150#ifdef CONFIG_64BIT
151#define RV_REGSIZE 8
152#define RV_REGSHIFT 3
153#else
154#define RV_REGSIZE 4
155#define RV_REGSHIFT 2
156#endif
157
158/* Common mstatus bits. All supported cores today have the same
159 * layouts.
160 */
161
162#define MSTATUS_IEN (1UL << 3)
163#define MSTATUS_MPP_M (3UL << 11)
164#define MSTATUS_MPIE_EN (1UL << 7)
165
166#define MSTATUS_FS_OFF (0UL << 13)
167#define MSTATUS_FS_INIT (1UL << 13)
168#define MSTATUS_FS_CLEAN (2UL << 13)
169#define MSTATUS_FS_DIRTY (3UL << 13)
170
171/* This comes from openisa_rv32m1, but doesn't seem to hurt on other
172 * platforms:
173 * - Preserve machine privileges in MPP. If you see any documentation
174 * telling you that MPP is read-only on this SoC, don't believe its
175 * lies.
176 * - Enable interrupts when exiting from exception into a new thread
177 * by setting MPIE now, so it will be copied into IE on mret.
178 */
179#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
180
181#ifndef _ASMLANGUAGE
182#include <zephyr/sys/util.h>
183
184#ifdef __cplusplus
185extern "C" {
186#endif
187
188#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
189#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
190#endif
191
192/* Kernel macros for memory attribution
193 * (access permissions and cache-ability).
194 *
195 * The macros are to be stored in k_mem_partition_attr_t
196 * objects. The format of a k_mem_partition_attr_t object
197 * is an uint8_t composed by configuration register flags
198 * located in arch/riscv/include/core_pmp.h
199 */
200
201/* Read-Write access permission attributes */
202#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
203 {PMP_R | PMP_W})
204#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
205 {PMP_R})
206#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
207 {0})
208#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
209 {PMP_R})
210#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
211 {0})
212#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
213 {0})
214
215/* Execution-allowed attributes */
216#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
217 {PMP_R | PMP_W | PMP_X})
218#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
219 {PMP_R | PMP_X})
220
221/* Typedef for the k_mem_partition attribute */
222typedef struct {
225
226struct arch_mem_domain {
227 unsigned int pmp_update_nr;
228};
229
230extern void z_irq_spurious(const void *unused);
231
232/*
233 * use atomic instruction csrrc to lock global irq
234 * csrrc: atomic read and clear bits in CSR register
235 */
236static ALWAYS_INLINE unsigned int arch_irq_lock(void)
237{
238#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
239 return z_soc_irq_lock();
240#else
241 unsigned int key;
242
243 __asm__ volatile ("csrrc %0, mstatus, %1"
244 : "=r" (key)
245 : "rK" (MSTATUS_IEN)
246 : "memory");
247
248 return key;
249#endif
250}
251
252/*
253 * use atomic instruction csrs to unlock global irq
254 * csrs: atomic set bits in CSR register
255 */
256static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
257{
258#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
259 z_soc_irq_unlock(key);
260#else
261 __asm__ volatile ("csrs mstatus, %0"
262 :
263 : "r" (key & MSTATUS_IEN)
264 : "memory");
265#endif
266}
267
268static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
269{
270#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
271 return z_soc_irq_unlocked(key);
272#else
273 return (key & MSTATUS_IEN) != 0;
274#endif
275}
276
277static ALWAYS_INLINE void arch_nop(void)
278{
279 __asm__ volatile("nop");
280}
281
283
284static inline uint32_t arch_k_cycle_get_32(void)
285{
286 return sys_clock_cycle_get_32();
287}
288
290
291static inline uint64_t arch_k_cycle_get_64(void)
292{
293 return sys_clock_cycle_get_64();
294}
295
297
298#ifdef __cplusplus
299}
300#endif
301
302#endif /*_ASMLANGUAGE */
303
304#if defined(CONFIG_RISCV_PRIVILEGED)
306#endif
307
308
309#endif
static ALWAYS_INLINE void arch_nop(void)
Definition arch.h:348
RISC-V public interrupt handling.
RISCV specific syscall header.
Per-arch thread definition.
Devicetree main header.
uint32_t k_mem_partition_attr_t
Definition arch.h:346
RISCV public error handling.
#define ALWAYS_INLINE
Definition common.h:160
Public interface for configuring interrupts.
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition arch.h:72
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition arch.h:83
uint64_t sys_clock_cycle_get_64(void)
uint32_t sys_clock_cycle_get_32(void)
static uint32_t arch_k_cycle_get_32(void)
Definition arch.h:108
static uint64_t arch_k_cycle_get_64(void)
Definition arch.h:115
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition arch.h:96
#define MSTATUS_IEN
Definition arch.h:162
RISCV public exception handling.
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
Definition arch.h:46
unsigned int pmp_update_nr
Definition arch.h:227
uint8_t pmp_attr
Definition arch.h:223
Software-managed ISR table.
Misc utilities.