Zephyr API Documentation 3.7.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
cache.h
Go to the documentation of this file.
1/*
2 * Copyright 2021 Intel Corporation
3 * SPDX-License-Identifier: Apache-2.0
4 */
5#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_
6#define ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_
7
8#include <xtensa/config/core-isa.h>
9#include <zephyr/toolchain.h>
10#include <zephyr/sys/util.h>
11#include <zephyr/debug/sparse.h>
12#include <xtensa/hal.h>
13
14#ifdef __cplusplus
15extern "C" {
16#endif
17
18#define Z_DCACHE_MAX (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
19
20#if XCHAL_DCACHE_SIZE
21BUILD_ASSERT(Z_IS_POW2(XCHAL_DCACHE_LINESIZE));
22BUILD_ASSERT(Z_IS_POW2(Z_DCACHE_MAX));
23#endif
24
25#if defined(CONFIG_DCACHE) || defined(__DOXYGEN__)
26
28static ALWAYS_INLINE int arch_dcache_flush_range(void *addr, size_t bytes)
29{
30#if XCHAL_DCACHE_SIZE
31 size_t step = XCHAL_DCACHE_LINESIZE;
32 size_t first = ROUND_DOWN(addr, step);
33 size_t last = ROUND_UP(((long)addr) + bytes, step);
34 size_t line;
35
36 for (line = first; bytes && line < last; line += step) {
37 __asm__ volatile("dhwb %0, 0" :: "r"(line));
38 }
39#endif
40 return 0;
41}
42
44static ALWAYS_INLINE int arch_dcache_flush_and_invd_range(void *addr, size_t bytes)
45{
46#if XCHAL_DCACHE_SIZE
47 size_t step = XCHAL_DCACHE_LINESIZE;
48 size_t first = ROUND_DOWN(addr, step);
49 size_t last = ROUND_UP(((long)addr) + bytes, step);
50 size_t line;
51
52 for (line = first; bytes && line < last; line += step) {
53 __asm__ volatile("dhwbi %0, 0" :: "r"(line));
54 }
55#endif
56 return 0;
57}
58
60static ALWAYS_INLINE int arch_dcache_invd_range(void *addr, size_t bytes)
61{
62#if XCHAL_DCACHE_SIZE
63 size_t step = XCHAL_DCACHE_LINESIZE;
64 size_t first = ROUND_DOWN(addr, step);
65 size_t last = ROUND_UP(((long)addr) + bytes, step);
66 size_t line;
67
68 for (line = first; bytes && line < last; line += step) {
69 __asm__ volatile("dhi %0, 0" :: "r"(line));
70 }
71#endif
72 return 0;
73}
74
77{
78#if XCHAL_DCACHE_SIZE
79 size_t step = XCHAL_DCACHE_LINESIZE;
80 size_t line;
81
82 for (line = 0; line < XCHAL_DCACHE_SIZE; line += step) {
83 __asm__ volatile("dii %0, 0" :: "r"(line));
84 }
85#endif
86 return 0;
87}
88
91{
92#if XCHAL_DCACHE_SIZE
93 size_t step = XCHAL_DCACHE_LINESIZE;
94 size_t line;
95
96 for (line = 0; line < XCHAL_DCACHE_SIZE; line += step) {
97 __asm__ volatile("diwb %0, 0" :: "r"(line));
98 }
99#endif
100 return 0;
101}
102
105{
106#if XCHAL_DCACHE_SIZE
107 size_t step = XCHAL_DCACHE_LINESIZE;
108 size_t line;
109
110 for (line = 0; line < XCHAL_DCACHE_SIZE; line += step) {
111 __asm__ volatile("diwbi %0, 0" :: "r"(line));
112 }
113#endif
114 return 0;
115}
116
119{
120 /* nothing */
121}
122
125{
126 /* nothing */
127}
128
129#endif /* CONFIG_DCACHE */
130
131#if defined(CONFIG_ICACHE) || defined(__DOXYGEN__)
132
135{
136 return -ENOTSUP;
137}
138
141{
142 return -ENOTSUP;
143}
144
147{
148#if XCHAL_ICACHE_SIZE
149 xthal_icache_all_invalidate();
150#endif
151 return 0;
152}
153
156{
157 return -ENOTSUP;
158}
159
161static ALWAYS_INLINE int arch_icache_flush_range(void *addr, size_t size)
162{
163 return -ENOTSUP;
164}
165
167static ALWAYS_INLINE int arch_icache_invd_range(void *addr, size_t size)
168{
169#if XCHAL_ICACHE_SIZE
170 xthal_icache_region_invalidate(addr, size);
171#endif
172 return 0;
173}
174
176static ALWAYS_INLINE int arch_icache_flush_and_invd_range(void *addr, size_t size)
177{
178 return -ENOTSUP;
179}
180
183{
184 /* nothing */
185}
186
189{
190 /* nothing */
191}
192
193#endif /* CONFIG_ICACHE */
194
195#if defined(CONFIG_CACHE_DOUBLEMAP)
209static inline bool arch_cache_is_ptr_cached(void *ptr)
210{
211 size_t addr = (size_t) ptr;
212
213 return (addr >> 29) == CONFIG_XTENSA_CACHED_REGION;
214}
215
229static inline bool arch_cache_is_ptr_uncached(void *ptr)
230{
231 size_t addr = (size_t) ptr;
232
233 return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
234}
235
236static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t rfrom)
237{
238 /* The math here is all compile-time: when the two regions
239 * differ by a power of two, we can convert between them by
240 * setting or clearing just one bit. Otherwise it needs two
241 * operations.
242 */
243 uint32_t rxor = (rto ^ rfrom) << 29;
244
245 rto <<= 29;
246 if (Z_IS_POW2(rxor)) {
247 if ((rxor & rto) == 0) {
248 return addr & ~rxor;
249 } else {
250 return addr | rxor;
251 }
252 } else {
253 return (addr & ~(7U << 29)) | rto;
254 }
255}
256
277static inline void __sparse_cache *arch_cache_cached_ptr_get(void *ptr)
278{
279 return (__sparse_force void __sparse_cache *)z_xtrpoflip((uint32_t) ptr,
280 CONFIG_XTENSA_CACHED_REGION,
281 CONFIG_XTENSA_UNCACHED_REGION);
282}
283
302static inline void *arch_cache_uncached_ptr_get(void __sparse_cache *ptr)
303{
304 return (void *)z_xtrpoflip((__sparse_force uint32_t)ptr,
305 CONFIG_XTENSA_UNCACHED_REGION,
306 CONFIG_XTENSA_CACHED_REGION);
307}
308#else
309static inline bool arch_cache_is_ptr_cached(void *ptr)
310{
311 ARG_UNUSED(ptr);
312
313 return false;
314}
315
316static inline bool arch_cache_is_ptr_uncached(void *ptr)
317{
318 ARG_UNUSED(ptr);
319
320 return false;
321}
322
323static inline void *arch_cache_cached_ptr_get(void *ptr)
324{
325 return ptr;
326}
327
328static inline void *arch_cache_uncached_ptr_get(void *ptr)
329{
330 return ptr;
331}
332#endif
333
335{
336}
337
338
339#ifdef __cplusplus
340} /* extern "C" */
341#endif
342
343#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_ */
static ALWAYS_INLINE void arch_cache_init(void)
Definition cache.h:239
#define ALWAYS_INLINE
Definition common.h:129
void arch_dcache_disable(void)
Disable the d-cache.
void arch_icache_disable(void)
Disable the i-cache.
void * arch_cache_uncached_ptr_get(void *ptr)
int arch_dcache_invd_range(void *addr, size_t size)
Invalidate an address range in the d-cache.
int arch_icache_flush_and_invd_all(void)
Flush and Invalidate the i-cache.
int arch_icache_flush_all(void)
Flush the i-cache.
int arch_icache_invd_all(void)
Invalidate the i-cache.
int arch_dcache_flush_range(void *addr, size_t size)
Flush an address range in the d-cache.
size_t arch_icache_line_size_get(void)
Get the i-cache line size.
int arch_icache_flush_range(void *addr, size_t size)
Flush an address range in the i-cache.
int arch_icache_invd_range(void *addr, size_t size)
Invalidate an address range in the i-cache.
int arch_dcache_flush_all(void)
Flush the d-cache.
bool arch_cache_is_ptr_cached(void *ptr)
int arch_icache_flush_and_invd_range(void *addr, size_t size)
Flush and Invalidate an address range in the i-cache.
bool arch_cache_is_ptr_uncached(void *ptr)
void * arch_cache_cached_ptr_get(void *ptr)
int arch_dcache_flush_and_invd_all(void)
Flush and Invalidate the d-cache.
int arch_dcache_invd_all(void)
Invalidate the d-cache.
void arch_icache_enable(void)
Enable the i-cache.
int arch_dcache_flush_and_invd_range(void *addr, size_t size)
Flush and Invalidate an address range in the d-cache.
void arch_dcache_enable(void)
Enable the d-cache.
#define ROUND_UP(x, align)
Value of x rounded up to the next multiple of align.
Definition util.h:329
#define ROUND_DOWN(x, align)
Value of x rounded down to the previous multiple of align.
Definition util.h:336
#define ENOTSUP
Unsupported value.
Definition errno.h:114
Size of off_t must be equal or less than size of size_t
Definition retained_mem.h:28
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
Misc utilities.
Macros to abstract toolchain specific capabilities.