Zephyr API Documentation 4.1.99
A Scalable Open Source RTOS
 4.1.99
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
rtio.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
26#ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27#define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28
29#include <string.h>
30
32#include <zephyr/device.h>
33#include <zephyr/kernel.h>
35#include <zephyr/sys/__assert.h>
36#include <zephyr/sys/atomic.h>
38#include <zephyr/sys/util.h>
41
42#ifdef __cplusplus
43extern "C" {
44#endif
45
46
66#define RTIO_PRIO_LOW 0U
67
71#define RTIO_PRIO_NORM 127U
72
76#define RTIO_PRIO_HIGH 255U
77
97#define RTIO_SQE_CHAINED BIT(0)
98
109#define RTIO_SQE_TRANSACTION BIT(1)
110
111
121#define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
122
129#define RTIO_SQE_CANCELED BIT(3)
130
137#define RTIO_SQE_MULTISHOT BIT(4)
138
142#define RTIO_SQE_NO_RESPONSE BIT(5)
143
161#define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
162
163#define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
164
171#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
172
179#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
180
188#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
189 (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
190 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
191
199#define RTIO_IODEV_I2C_STOP BIT(1)
200
204#define RTIO_IODEV_I2C_RESTART BIT(2)
205
209#define RTIO_IODEV_I2C_10_BITS BIT(3)
210
214#define RTIO_IODEV_I3C_STOP BIT(1)
215
219#define RTIO_IODEV_I3C_RESTART BIT(2)
220
224#define RTIO_IODEV_I3C_HDR BIT(3)
225
229#define RTIO_IODEV_I3C_NBCH BIT(4)
230
234#define RTIO_IODEV_I3C_HDR_MODE_MASK GENMASK(15, 8)
235
239#define RTIO_IODEV_I3C_HDR_MODE_SET(flags) \
240 FIELD_PREP(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
241
245#define RTIO_IODEV_I3C_HDR_MODE_GET(flags) \
246 FIELD_GET(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
247
251#define RTIO_IODEV_I3C_HDR_CMD_CODE_MASK GENMASK(22, 16)
252
256#define RTIO_IODEV_I3C_HDR_CMD_CODE_SET(flags) \
257 FIELD_PREP(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
258
262#define RTIO_IODEV_I3C_HDR_CMD_CODE_GET(flags) \
263 FIELD_GET(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
264
266struct rtio;
267struct rtio_cqe;
268struct rtio_sqe;
269struct rtio_sqe_pool;
270struct rtio_cqe_pool;
271struct rtio_iodev;
272struct rtio_iodev_sqe;
282typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
283
290typedef void (*rtio_signaled_t)(struct rtio_iodev_sqe *iodev_sqe, void *userdata);
291
295struct rtio_sqe {
304 const struct rtio_iodev *iodev;
313 void *userdata;
314
315 union {
316
318 struct {
320 const uint8_t *buf;
321 } tx;
322
324 struct {
325 uint32_t buf_len;
327 } rx;
328
330 struct {
332 uint8_t buf[7];
333 } tiny_tx;
334
336 struct {
338 void *arg0;
339 } callback;
340
342 struct {
343 uint32_t buf_len;
346 } txrx;
347
349 struct {
351 struct _timeout to;
352 } delay;
353
356
358 struct {
359 /* enum i3c_config_type type; */
360 int type;
361 void *config;
362 } i3c_config;
363
365 /* struct i3c_ccc_payload *ccc_payload; */
367
369 struct {
372 void *userdata;
373 } await;
374 };
375};
376
378/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
379BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
392
399
406
418struct rtio {
419#ifdef CONFIG_RTIO_SUBMIT_SEM
420 /* A wait semaphore which may suspend the calling thread
421 * to wait for some number of completions when calling submit
422 */
423 struct k_sem *submit_sem;
424
425 uint32_t submit_count;
426#endif
427
428#ifdef CONFIG_RTIO_CONSUME_SEM
429 /* A wait semaphore which may suspend the calling thread
430 * to wait for some number of completions while consuming
431 * them from the completion queue
432 */
433 struct k_sem *consume_sem;
434#endif
435
436 /* Total number of completions */
438
439 /* Number of completions that were unable to be submitted with results
440 * due to the cq spsc being full
441 */
443
444 /* Submission queue object pool with free list */
446
447 /* Complete queue object pool with free list */
449
450#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
451 /* Mem block pool */
452 struct sys_mem_blocks *block_pool;
453#endif
454
455 /* Submission queue */
456 struct mpsc sq;
457
458 /* Completion queue */
459 struct mpsc cq;
460};
461
463extern struct k_mem_partition rtio_partition;
464
472static inline size_t rtio_mempool_block_size(const struct rtio *r)
473{
474#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
475 ARG_UNUSED(r);
476 return 0;
477#else
478 if (r == NULL || r->block_pool == NULL) {
479 return 0;
480 }
481 return BIT(r->block_pool->info.blk_sz_shift);
482#endif
483}
484
492#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
493static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
494{
495 uintptr_t addr = (uintptr_t)ptr;
496 struct sys_mem_blocks *mem_pool = r->block_pool;
497 uint32_t block_size = rtio_mempool_block_size(r);
498
499 uintptr_t buff = (uintptr_t)mem_pool->buffer;
500 uint32_t buff_size = mem_pool->info.num_blocks * block_size;
501
502 if (addr < buff || addr >= buff + buff_size) {
503 return UINT16_MAX;
504 }
505 return (addr - buff) / block_size;
506}
507#endif
508
515 struct rtio_sqe sqe;
516 struct mpsc_node q;
518 struct rtio *r;
519};
520
533 void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
534};
535
540 /* Function pointer table */
541 const struct rtio_iodev_api *api;
542
543 /* Data associated with this iodev */
544 void *data;
545};
546
548#define RTIO_OP_NOP 0
549
551#define RTIO_OP_RX (RTIO_OP_NOP+1)
552
554#define RTIO_OP_TX (RTIO_OP_RX+1)
555
557#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
558
560#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
561
563#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
564
566#define RTIO_OP_DELAY (RTIO_OP_TXRX+1)
567
569#define RTIO_OP_I2C_RECOVER (RTIO_OP_DELAY+1)
570
572#define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
573
575#define RTIO_OP_I3C_RECOVER (RTIO_OP_I2C_CONFIGURE+1)
576
578#define RTIO_OP_I3C_CONFIGURE (RTIO_OP_I3C_RECOVER+1)
579
581#define RTIO_OP_I3C_CCC (RTIO_OP_I3C_CONFIGURE+1)
582
584#define RTIO_OP_AWAIT (RTIO_OP_I3C_CCC+1)
585
589static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
590 const struct rtio_iodev *iodev,
591 void *userdata)
592{
593 memset(sqe, 0, sizeof(struct rtio_sqe));
594 sqe->op = RTIO_OP_NOP;
595 sqe->iodev = iodev;
596 sqe->userdata = userdata;
597}
598
602static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
603 const struct rtio_iodev *iodev,
604 int8_t prio,
605 uint8_t *buf,
606 uint32_t len,
607 void *userdata)
608{
609 memset(sqe, 0, sizeof(struct rtio_sqe));
610 sqe->op = RTIO_OP_RX;
611 sqe->prio = prio;
612 sqe->iodev = iodev;
613 sqe->rx.buf_len = len;
614 sqe->rx.buf = buf;
615 sqe->userdata = userdata;
616}
617
623static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
624 const struct rtio_iodev *iodev, int8_t prio,
625 void *userdata)
626{
627 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
629}
630
631static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
632 const struct rtio_iodev *iodev, int8_t prio,
633 void *userdata)
634{
635 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
637}
638
642static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
643 const struct rtio_iodev *iodev,
644 int8_t prio,
645 const uint8_t *buf,
646 uint32_t len,
647 void *userdata)
648{
649 memset(sqe, 0, sizeof(struct rtio_sqe));
650 sqe->op = RTIO_OP_TX;
651 sqe->prio = prio;
652 sqe->iodev = iodev;
653 sqe->tx.buf_len = len;
654 sqe->tx.buf = buf;
655 sqe->userdata = userdata;
656}
657
668static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
669 const struct rtio_iodev *iodev,
670 int8_t prio,
671 const uint8_t *tiny_write_data,
672 uint8_t tiny_write_len,
673 void *userdata)
674{
675 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf));
676
677 memset(sqe, 0, sizeof(struct rtio_sqe));
678 sqe->op = RTIO_OP_TINY_TX;
679 sqe->prio = prio;
680 sqe->iodev = iodev;
681 sqe->tiny_tx.buf_len = tiny_write_len;
682 memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len);
683 sqe->userdata = userdata;
684}
685
694static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
695 rtio_callback_t callback,
696 void *arg0,
697 void *userdata)
698{
699 memset(sqe, 0, sizeof(struct rtio_sqe));
700 sqe->op = RTIO_OP_CALLBACK;
701 sqe->prio = 0;
702 sqe->iodev = NULL;
703 sqe->callback.callback = callback;
704 sqe->callback.arg0 = arg0;
705 sqe->userdata = userdata;
706}
707
718static inline void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe,
719 rtio_callback_t callback,
720 void *arg0,
721 void *userdata)
722{
723 rtio_sqe_prep_callback(sqe, callback, arg0, userdata);
725}
726
730static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
731 const struct rtio_iodev *iodev,
732 int8_t prio,
733 const uint8_t *tx_buf,
734 uint8_t *rx_buf,
735 uint32_t buf_len,
736 void *userdata)
737{
738 memset(sqe, 0, sizeof(struct rtio_sqe));
739 sqe->op = RTIO_OP_TXRX;
740 sqe->prio = prio;
741 sqe->iodev = iodev;
742 sqe->txrx.buf_len = buf_len;
743 sqe->txrx.tx_buf = tx_buf;
744 sqe->txrx.rx_buf = rx_buf;
745 sqe->userdata = userdata;
746}
747
748static inline void rtio_sqe_prep_await(struct rtio_sqe *sqe,
749 const struct rtio_iodev *iodev,
750 int8_t prio,
751 void *userdata)
752{
753 memset(sqe, 0, sizeof(struct rtio_sqe));
754 sqe->op = RTIO_OP_AWAIT;
755 sqe->prio = prio;
756 sqe->iodev = iodev;
757 sqe->userdata = userdata;
758}
759
760static inline void rtio_sqe_prep_delay(struct rtio_sqe *sqe,
761 k_timeout_t timeout,
762 void *userdata)
763{
764 memset(sqe, 0, sizeof(struct rtio_sqe));
765 sqe->op = RTIO_OP_DELAY;
766 sqe->prio = 0;
767 sqe->iodev = NULL;
768 sqe->delay.timeout = timeout;
769 sqe->userdata = userdata;
770}
771
772static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
773{
774 struct mpsc_node *node = mpsc_pop(&pool->free_q);
775
776 if (node == NULL) {
777 return NULL;
778 }
779
780 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
781
782 pool->pool_free--;
783
784 return iodev_sqe;
785}
786
787static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
788{
789 mpsc_push(&pool->free_q, &iodev_sqe->q);
790
791 pool->pool_free++;
792}
793
794static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
795{
796 struct mpsc_node *node = mpsc_pop(&pool->free_q);
797
798 if (node == NULL) {
799 return NULL;
800 }
801
802 struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
803
804 memset(cqe, 0, sizeof(struct rtio_cqe));
805
806 pool->pool_free--;
807
808 return cqe;
809}
810
811static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
812{
813 mpsc_push(&pool->free_q, &cqe->q);
814
815 pool->pool_free++;
816}
817
818static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
819 size_t max_sz, uint8_t **buf, uint32_t *buf_len)
820{
821#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
822 ARG_UNUSED(r);
823 ARG_UNUSED(min_sz);
824 ARG_UNUSED(max_sz);
825 ARG_UNUSED(buf);
826 ARG_UNUSED(buf_len);
827 return -ENOTSUP;
828#else
829 const uint32_t block_size = rtio_mempool_block_size(r);
830 uint32_t bytes = max_sz;
831
832 /* Not every context has a block pool and the block size may return 0 in
833 * that case
834 */
835 if (block_size == 0) {
836 return -ENOMEM;
837 }
838
839 do {
840 size_t num_blks = DIV_ROUND_UP(bytes, block_size);
841 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
842
843 if (rc == 0) {
844 *buf_len = num_blks * block_size;
845 return 0;
846 }
847
848 if (bytes <= block_size) {
849 break;
850 }
851
852 bytes -= block_size;
853 } while (bytes >= min_sz);
854
855 return -ENOMEM;
856#endif
857}
858
859static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
860{
861#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
862 ARG_UNUSED(r);
863 ARG_UNUSED(buf);
864 ARG_UNUSED(buf_len);
865#else
866 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
867
868 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
869#endif
870}
871
872/* Do not try and reformat the macros */
873/* clang-format off */
874
882#define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
883 STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
884 .api = (iodev_api), \
885 .data = (iodev_data), \
886 }
887
888#define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
889 static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz]; \
890 STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
891 .free_q = MPSC_INIT((name.free_q)), \
892 .pool_size = sz, \
893 .pool_free = sz, \
894 .pool = CONCAT(_sqe_pool_, name), \
895 }
896
897
898#define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
899 static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz]; \
900 STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
901 .free_q = MPSC_INIT((name.free_q)), \
902 .pool_size = sz, \
903 .pool_free = sz, \
904 .pool = CONCAT(_cqe_pool_, name), \
905 }
906
916#define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
917
927#define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
928
929#define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
930 RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
931 CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)]; \
932 _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt, \
933 CONCAT(_block_pool_, name), RTIO_DMEM)
934
935#define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
936 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
937 (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT))) \
938 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
939 (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT))) \
940 STRUCT_SECTION_ITERABLE(rtio, name) = { \
941 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),)) \
942 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
943 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
944 .cq_count = ATOMIC_INIT(0), \
945 .xcqcnt = ATOMIC_INIT(0), \
946 .sqe_pool = _sqe_pool, \
947 .cqe_pool = _cqe_pool, \
948 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
949 .sq = MPSC_INIT((name.sq)), \
950 .cq = MPSC_INIT((name.cq)), \
951 }
952
960#define RTIO_DEFINE(name, sq_sz, cq_sz) \
961 Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz); \
962 Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz); \
963 Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool), \
964 &CONCAT(name, _cqe_pool), NULL)
965
966/* clang-format on */
967
978#define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
979 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
980 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
981 Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
982 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
983
984/* clang-format on */
985
993static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
994{
995 return r->sqe_pool->pool_free;
996}
997
1006static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
1007{
1008 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
1009 return iodev_sqe->next;
1010 } else {
1011 return NULL;
1012 }
1013}
1014
1015
1024static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
1025{
1026 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
1027 return iodev_sqe->next;
1028 } else {
1029 return NULL;
1030 }
1031}
1032
1041static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
1042{
1043 return iodev_sqe->next;
1044}
1045
1054static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
1055{
1056 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
1057
1058 if (iodev_sqe == NULL) {
1059 return NULL;
1060 }
1061
1062 mpsc_push(&r->sq, &iodev_sqe->q);
1063
1064 return &iodev_sqe->sqe;
1065}
1066
1072static inline void rtio_sqe_drop_all(struct rtio *r)
1073{
1074 struct rtio_iodev_sqe *iodev_sqe;
1075 struct mpsc_node *node = mpsc_pop(&r->sq);
1076
1077 while (node != NULL) {
1078 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1079 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
1080 node = mpsc_pop(&r->sq);
1081 }
1082}
1083
1087static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
1088{
1089 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
1090
1091 if (cqe == NULL) {
1092 return NULL;
1093 }
1094
1095 memset(cqe, 0, sizeof(struct rtio_cqe));
1096
1097 return cqe;
1098}
1099
1103static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
1104{
1105 mpsc_push(&r->cq, &cqe->q);
1106}
1107
1119static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
1120{
1121 struct mpsc_node *node;
1122 struct rtio_cqe *cqe = NULL;
1123
1124#ifdef CONFIG_RTIO_CONSUME_SEM
1125 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
1126 return NULL;
1127 }
1128#endif
1129
1130 node = mpsc_pop(&r->cq);
1131 if (node == NULL) {
1132 return NULL;
1133 }
1134 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1135
1136 return cqe;
1137}
1138
1149static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
1150{
1151 struct mpsc_node *node;
1152 struct rtio_cqe *cqe;
1153
1154#ifdef CONFIG_RTIO_CONSUME_SEM
1155 k_sem_take(r->consume_sem, K_FOREVER);
1156#endif
1157 node = mpsc_pop(&r->cq);
1158 while (node == NULL) {
1159 Z_SPIN_DELAY(1);
1160 node = mpsc_pop(&r->cq);
1161 }
1162 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1163
1164 return cqe;
1165}
1166
1173static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1174{
1175 rtio_cqe_pool_free(r->cqe_pool, cqe);
1176}
1177
1184static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1185{
1186 uint32_t flags = 0;
1187
1188#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1189 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1190 struct rtio *r = iodev_sqe->r;
1191 struct sys_mem_blocks *mem_pool = r->block_pool;
1192 unsigned int blk_index = 0;
1193 unsigned int blk_count = 0;
1194
1195 if (iodev_sqe->sqe.rx.buf) {
1196 blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >>
1197 mem_pool->info.blk_sz_shift;
1198 blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift;
1199 }
1200 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1201 }
1202#else
1203 ARG_UNUSED(iodev_sqe);
1204#endif
1205
1206 return flags;
1207}
1208
1224__syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1225 uint8_t **buff, uint32_t *buff_len);
1226
1227static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1228 uint8_t **buff, uint32_t *buff_len)
1229{
1230#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1232 unsigned int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1233 unsigned int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1235
1236 *buff_len = blk_count * blk_size;
1237
1238 if (blk_count > 0) {
1239 *buff = r->block_pool->buffer + blk_idx * blk_size;
1240
1241 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1242 __ASSERT_NO_MSG(*buff <
1243 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1244 } else {
1245 *buff = NULL;
1246 }
1247 return 0;
1248 }
1249 return -EINVAL;
1250#else
1251 ARG_UNUSED(r);
1252 ARG_UNUSED(cqe);
1253 ARG_UNUSED(buff);
1254 ARG_UNUSED(buff_len);
1255
1256 return -ENOTSUP;
1257#endif
1258}
1259
1261void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1262void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1263
1272static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1273{
1274 rtio_executor_ok(iodev_sqe, result);
1275}
1276
1285static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1286{
1287 rtio_executor_err(iodev_sqe, result);
1288}
1289
1301static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1302{
1303 struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1304
1305 if (cqe == NULL) {
1306 atomic_inc(&r->xcqcnt);
1307 } else {
1308 cqe->result = result;
1309 cqe->userdata = userdata;
1310 cqe->flags = flags;
1311 rtio_cqe_produce(r, cqe);
1312 }
1313
1314 /* atomic_t isn't guaranteed to wrap correctly as it could be signed, so
1315 * we must resort to a cas loop.
1316 */
1317 atomic_t val, new_val;
1318
1319 do {
1320 val = atomic_get(&r->cq_count);
1321 new_val = (atomic_t)((uintptr_t)val + 1);
1322 } while (!atomic_cas(&r->cq_count, val, new_val));
1323
1324#ifdef CONFIG_RTIO_SUBMIT_SEM
1325 if (r->submit_count > 0) {
1326 r->submit_count--;
1327 if (r->submit_count == 0) {
1328 k_sem_give(r->submit_sem);
1329 }
1330 }
1331#endif
1332#ifdef CONFIG_RTIO_CONSUME_SEM
1333 k_sem_give(r->consume_sem);
1334#endif
1335}
1336
1337#define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1338
1351static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1352 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1353{
1354 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1355
1356#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1357 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1358 struct rtio *r = iodev_sqe->r;
1359
1360 if (sqe->rx.buf != NULL) {
1361 if (sqe->rx.buf_len < min_buf_len) {
1362 return -ENOMEM;
1363 }
1364 *buf = sqe->rx.buf;
1365 *buf_len = sqe->rx.buf_len;
1366 return 0;
1367 }
1368
1369 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1370 if (rc == 0) {
1371 sqe->rx.buf = *buf;
1372 sqe->rx.buf_len = *buf_len;
1373 return 0;
1374 }
1375
1376 return -ENOMEM;
1377 }
1378#else
1379 ARG_UNUSED(max_buf_len);
1380#endif
1381
1382 if (sqe->rx.buf_len < min_buf_len) {
1383 return -ENOMEM;
1384 }
1385
1386 *buf = sqe->rx.buf;
1387 *buf_len = sqe->rx.buf_len;
1388 return 0;
1389}
1390
1405__syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1406
1407static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1408{
1409#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1410 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1411 return;
1412 }
1413
1414 rtio_block_pool_free(r, buff, buff_len);
1415#else
1416 ARG_UNUSED(r);
1417 ARG_UNUSED(buff);
1418 ARG_UNUSED(buff_len);
1419#endif
1420}
1421
1425static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1426{
1428
1429#ifdef CONFIG_RTIO_SUBMIT_SEM
1430 k_object_access_grant(r->submit_sem, t);
1431#endif
1432
1433#ifdef CONFIG_RTIO_CONSUME_SEM
1434 k_object_access_grant(r->consume_sem, t);
1435#endif
1436}
1437
1448__syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1449
1450static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1451{
1452 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1453
1454 do {
1455 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1456 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1457 } while (iodev_sqe != NULL);
1458
1459 return 0;
1460}
1461
1473__syscall void rtio_sqe_signal(struct rtio_sqe *sqe);
1474
1475static inline void z_impl_rtio_sqe_signal(struct rtio_sqe *sqe)
1476{
1477 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1478
1479 if (!atomic_cas(&iodev_sqe->sqe.await.ok, 0, 1)) {
1480 iodev_sqe->sqe.await.callback(iodev_sqe, iodev_sqe->sqe.await.userdata);
1481 }
1482}
1483
1494static inline void rtio_iodev_sqe_await_signal(struct rtio_iodev_sqe *iodev_sqe,
1495 rtio_signaled_t callback,
1496 void *userdata)
1497{
1498 iodev_sqe->sqe.await.callback = callback;
1499 iodev_sqe->sqe.await.userdata = userdata;
1500
1501 if (!atomic_cas(&iodev_sqe->sqe.await.ok, 0, 1)) {
1502 callback(iodev_sqe, userdata);
1503 }
1504}
1505
1521__syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1522 struct rtio_sqe **handle, size_t sqe_count);
1523
1524static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1525 struct rtio_sqe **handle,
1526 size_t sqe_count)
1527{
1528 struct rtio_sqe *sqe;
1529 uint32_t acquirable = rtio_sqe_acquirable(r);
1530
1531 if (acquirable < sqe_count) {
1532 return -ENOMEM;
1533 }
1534
1535 for (unsigned long i = 0; i < sqe_count; i++) {
1536 sqe = rtio_sqe_acquire(r);
1537 __ASSERT_NO_MSG(sqe != NULL);
1538 if (handle != NULL && i == 0) {
1539 *handle = sqe;
1540 }
1541 *sqe = sqes[i];
1542 }
1543
1544 return 0;
1545}
1546
1563static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1564{
1565 return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1566}
1567
1583__syscall int rtio_cqe_copy_out(struct rtio *r,
1584 struct rtio_cqe *cqes,
1585 size_t cqe_count,
1587static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1588 struct rtio_cqe *cqes,
1589 size_t cqe_count,
1591{
1592 size_t copied = 0;
1593 struct rtio_cqe *cqe;
1594 k_timepoint_t end = sys_timepoint_calc(timeout);
1595
1596 do {
1599 if (cqe == NULL) {
1600 Z_SPIN_DELAY(25);
1601 continue;
1602 }
1603 cqes[copied++] = *cqe;
1604 rtio_cqe_release(r, cqe);
1605 } while (copied < cqe_count && !sys_timepoint_expired(end));
1606
1607 return copied;
1608}
1609
1625__syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1626
1627#ifdef CONFIG_RTIO_SUBMIT_SEM
1628static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1629{
1630 int res = 0;
1631
1632 if (wait_count > 0) {
1633 __ASSERT(!k_is_in_isr(),
1634 "expected rtio submit with wait count to be called from a thread");
1635
1636 k_sem_reset(r->submit_sem);
1637 r->submit_count = wait_count;
1638 }
1639
1641
1642 if (wait_count > 0) {
1643 res = k_sem_take(r->submit_sem, K_FOREVER);
1644 __ASSERT(res == 0,
1645 "semaphore was reset or timed out while waiting on completions!");
1646 }
1647
1648 return res;
1649}
1650#else
1651static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1652{
1653
1654 int res = 0;
1655 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count);
1656 uintptr_t cq_complete_count = cq_count + wait_count;
1657 bool wraps = cq_complete_count < cq_count;
1658
1660
1661 if (wraps) {
1662 while ((uintptr_t)atomic_get(&r->cq_count) >= cq_count) {
1663 Z_SPIN_DELAY(10);
1664 k_yield();
1665 }
1666 }
1667
1668 while ((uintptr_t)atomic_get(&r->cq_count) < cq_complete_count) {
1669 Z_SPIN_DELAY(10);
1670 k_yield();
1671 }
1672
1673 return res;
1674}
1675#endif /* CONFIG_RTIO_SUBMIT_SEM */
1676
1681#ifdef __cplusplus
1682}
1683#endif
1684
1685#include <zephyr/syscalls/rtio.h>
1686
1687#endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
workaround assembler barfing for ST r
Definition asm-macro-32-bit-gnu.h:24
long atomic_t
Definition atomic_types.h:15
atomic_val_t atomic_get(const atomic_t *target)
Atomic get.
atomic_val_t atomic_inc(atomic_t *target)
Atomic increment.
bool atomic_cas(atomic_t *target, atomic_val_t old_value, atomic_val_t new_value)
Atomic compare-and-set.
#define K_FOREVER
Generate infinite timeout delay.
Definition kernel.h:1481
#define K_NO_WAIT
Generate null timeout delay.
Definition kernel.h:1371
k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
Calculate a timepoint value.
static bool sys_timepoint_expired(k_timepoint_t timepoint)
Indicates if timepoint is expired.
Definition sys_clock.h:328
#define K_TIMEOUT_EQ(a, b)
Compare timeouts for equality.
Definition sys_clock.h:80
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
Free contiguous multiple memory blocks.
int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count, void **out_block)
Allocate a contiguous set of memory blocks.
static ALWAYS_INLINE void mpsc_push(struct mpsc *q, struct mpsc_node *n)
Push a node.
Definition mpsc_lockfree.h:126
static struct mpsc_node * mpsc_pop(struct mpsc *q)
Pop a node off of the list.
Definition mpsc_lockfree.h:145
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags)
Get the block count of a mempool flags.
Definition rtio.h:179
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags)
Get the block index of a mempool flags.
Definition rtio.h:171
#define RTIO_CQE_FLAG_MEMPOOL_BUFFER
The entry's buffer was allocated from the RTIO's mempool.
Definition rtio.h:161
#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)
Prepare CQE flags for a mempool read.
Definition rtio.h:188
#define RTIO_CQE_FLAG_GET(flags)
Definition rtio.h:163
#define RTIO_SQE_MULTISHOT
The SQE should continue producing CQEs until canceled.
Definition rtio.h:137
#define RTIO_SQE_TRANSACTION
The next request in the queue is part of a transaction.
Definition rtio.h:109
#define RTIO_SQE_MEMPOOL_BUFFER
The buffer should be allocated by the RTIO mempool.
Definition rtio.h:121
#define RTIO_SQE_CANCELED
The SQE should not execute if possible.
Definition rtio.h:129
#define RTIO_SQE_NO_RESPONSE
The SQE does not produce a CQE.
Definition rtio.h:142
#define RTIO_SQE_CHAINED
The next request in the queue should wait on this one.
Definition rtio.h:97
static void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare a read op submission with context's mempool.
Definition rtio.h:623
void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result)
#define RTIO_OP_CALLBACK
An operation that calls a given function (callback)
Definition rtio.h:560
static uint32_t rtio_sqe_acquirable(struct rtio *r)
Count of acquirable submission queue events.
Definition rtio.h:993
static void rtio_sqe_prep_delay(struct rtio_sqe *sqe, k_timeout_t timeout, void *userdata)
Definition rtio.h:760
static void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
Definition rtio.h:811
static void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tiny_write_data, uint8_t tiny_write_len, void *userdata)
Prepare a tiny write op submission.
Definition rtio.h:668
static size_t rtio_mempool_block_size(const struct rtio *r)
Get the mempool block size of the RTIO context.
Definition rtio.h:472
static void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
Submit a completion queue event with a given result and userdata.
Definition rtio.h:1301
static void rtio_sqe_prep_nop(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, void *userdata)
Prepare a nop (no op) submission.
Definition rtio.h:589
void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
Release memory that was allocated by the RTIO's memory pool.
static int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
Copy an array of SQEs into the queue.
Definition rtio.h:1563
static void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
Produce a complete queue event if available.
Definition rtio.h:1103
#define RTIO_OP_TINY_TX
An operation that transmits tiny writes by copying the data to write.
Definition rtio.h:557
static uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
Compute the CQE flags from the rtio_iodev_sqe entry.
Definition rtio.h:1184
static void rtio_iodev_sqe_await_signal(struct rtio_iodev_sqe *iodev_sqe, rtio_signaled_t callback, void *userdata)
Await an AWAIT SQE signal from RTIO IODEV.
Definition rtio.h:1494
void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
static int rtio_block_pool_alloc(struct rtio *r, size_t min_sz, size_t max_sz, uint8_t **buf, uint32_t *buf_len)
Definition rtio.h:818
static void rtio_sqe_prep_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *buf, uint32_t len, void *userdata)
Prepare a write op submission.
Definition rtio.h:642
int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes, struct rtio_sqe **handle, size_t sqe_count)
Copy an array of SQEs into the queue and get resulting handles back.
static struct rtio_cqe * rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
Definition rtio.h:794
struct k_mem_partition rtio_partition
The memory partition associated with all RTIO context information.
static void rtio_sqe_prep_read(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a read op submission.
Definition rtio.h:602
static struct rtio_sqe * rtio_sqe_acquire(struct rtio *r)
Acquire a single submission queue event if available.
Definition rtio.h:1054
#define RTIO_OP_TX
An operation that transmits (writes)
Definition rtio.h:554
static void rtio_sqe_drop_all(struct rtio *r)
Drop all previously acquired sqe.
Definition rtio.h:1072
static void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Definition rtio.h:631
int rtio_cqe_copy_out(struct rtio *r, struct rtio_cqe *cqes, size_t cqe_count, k_timeout_t timeout)
Copy an array of CQEs from the queue.
static void rtio_sqe_prep_callback(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission.
Definition rtio.h:694
static void rtio_access_grant(struct rtio *r, struct k_thread *t)
Grant access to an RTIO context to a user thread.
Definition rtio.h:1425
#define RTIO_OP_TXRX
An operation that transceives (reads and writes simultaneously)
Definition rtio.h:563
static void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
Release consumed completion queue event.
Definition rtio.h:1173
static int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len, uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
Get the buffer associate with the RX submission.
Definition rtio.h:1351
static void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submissions completion with error.
Definition rtio.h:1285
void(* rtio_signaled_t)(struct rtio_iodev_sqe *iodev_sqe, void *userdata)
Callback signature for RTIO_OP_AWAIT signaled.
Definition rtio.h:290
static void rtio_sqe_prep_transceive(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tx_buf, uint8_t *rx_buf, uint32_t buf_len, void *userdata)
Prepare a transceive op submission.
Definition rtio.h:730
int rtio_sqe_cancel(struct rtio_sqe *sqe)
Attempt to cancel an SQE.
static void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
Definition rtio.h:787
static void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submission completion with success.
Definition rtio.h:1272
void(* rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0)
Callback signature for RTIO_OP_CALLBACK.
Definition rtio.h:282
#define RTIO_OP_NOP
An operation that does nothing and will complete immediately.
Definition rtio.h:548
#define RTIO_OP_AWAIT
An operation to suspend bus while awaiting signal.
Definition rtio.h:584
static struct rtio_cqe * rtio_cqe_acquire(struct rtio *r)
Acquire a complete queue event if available.
Definition rtio.h:1087
static struct rtio_iodev_sqe * rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain.
Definition rtio.h:1024
static struct rtio_cqe * rtio_cqe_consume(struct rtio *r)
Consume a single completion queue event if available.
Definition rtio.h:1119
static struct rtio_iodev_sqe * rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
Definition rtio.h:772
void rtio_sqe_signal(struct rtio_sqe *sqe)
Signal an AWAIT SQE.
static struct rtio_iodev_sqe * rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain or transaction.
Definition rtio.h:1041
static void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission that does not create a CQE.
Definition rtio.h:718
#define RTIO_OP_DELAY
An operation that takes a specified amount of time (asynchronously) before completing.
Definition rtio.h:566
int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe, uint8_t **buff, uint32_t *buff_len)
Retrieve the mempool buffer that was allocated for the CQE.
static struct rtio_iodev_sqe * rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the transaction.
Definition rtio.h:1006
void rtio_executor_submit(struct rtio *r)
static struct rtio_cqe * rtio_cqe_consume_block(struct rtio *r)
Wait for and consume a single completion queue event.
Definition rtio.h:1149
static void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
Definition rtio.h:859
static void rtio_sqe_prep_await(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Definition rtio.h:748
#define RTIO_OP_RX
An operation that receives (reads)
Definition rtio.h:551
int rtio_submit(struct rtio *r, uint32_t wait_count)
Submit I/O requests to the underlying executor.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition util.h:285
#define DIV_ROUND_UP(n, d)
Divide and round up.
Definition util.h:353
#define EINVAL
Invalid argument.
Definition errno.h:60
#define ENOMEM
Not enough core.
Definition errno.h:50
#define ENOTSUP
Unsupported value.
Definition errno.h:114
void k_yield(void)
Yield the current thread.
void k_object_access_grant(const void *object, struct k_thread *thread)
Grant a thread access to a kernel object.
#define NULL
Definition iar_missing_defs.h:20
Public kernel APIs.
Memory Blocks Allocator.
A wait-free intrusive multi producer single consumer (MPSC) queue using a singly linked list.
flags
Definition parser.h:97
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
#define UINT16_MAX
Definition stdint.h:28
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__UINT16_TYPE__ uint16_t
Definition stdint.h:89
__INT8_TYPE__ int8_t
Definition stdint.h:72
void * memset(void *buf, int c, size_t n)
void * memcpy(void *ZRESTRICT d, const void *ZRESTRICT s, size_t n)
Memory Partition.
Definition mem_domain.h:55
Thread Structure.
Definition thread.h:262
Kernel timeout type.
Definition sys_clock.h:65
Kernel timepoint type.
Definition sys_clock.h:235
Queue member.
Definition mpsc_lockfree.h:79
MPSC Queue.
Definition mpsc_lockfree.h:86
Definition rtio.h:400
struct rtio_cqe * pool
Definition rtio.h:404
struct mpsc free_q
Definition rtio.h:401
const uint16_t pool_size
Definition rtio.h:402
uint16_t pool_free
Definition rtio.h:403
A completion queue event.
Definition rtio.h:385
void * userdata
Associated userdata with operation.
Definition rtio.h:389
struct mpsc_node q
Definition rtio.h:386
uint32_t flags
Flags associated with the operation.
Definition rtio.h:390
int32_t result
Result from operation.
Definition rtio.h:388
API that an RTIO IO device should implement.
Definition rtio.h:524
void(* submit)(struct rtio_iodev_sqe *iodev_sqe)
Submit to the iodev an entry to work on.
Definition rtio.h:533
Compute the mempool block index for a given pointer.
Definition rtio.h:514
struct rtio_iodev_sqe * next
Definition rtio.h:517
struct rtio_sqe sqe
Definition rtio.h:515
struct rtio * r
Definition rtio.h:518
struct mpsc_node q
Definition rtio.h:516
An IO device with a function table for submitting requests.
Definition rtio.h:539
const struct rtio_iodev_api * api
Definition rtio.h:541
void * data
Definition rtio.h:544
Definition rtio.h:393
struct rtio_iodev_sqe * pool
Definition rtio.h:397
const uint16_t pool_size
Definition rtio.h:395
struct mpsc free_q
Definition rtio.h:394
uint16_t pool_free
Definition rtio.h:396
A submission queue event.
Definition rtio.h:295
uint32_t i2c_config
OP_I2C_CONFIGURE.
Definition rtio.h:355
void * userdata
User provided data which is returned upon operation completion.
Definition rtio.h:313
const uint8_t * tx_buf
Buffer to write from.
Definition rtio.h:344
uint8_t op
Op code.
Definition rtio.h:296
uint8_t buf_len
Length of tiny buffer.
Definition rtio.h:331
void * arg0
Last argument given to callback.
Definition rtio.h:338
atomic_t ok
Definition rtio.h:370
uint8_t * rx_buf
Buffer to read into.
Definition rtio.h:345
uint8_t prio
Op priority.
Definition rtio.h:298
k_timeout_t timeout
Delay timeout.
Definition rtio.h:350
uint8_t * buf
Buffer to read into.
Definition rtio.h:326
rtio_signaled_t callback
Definition rtio.h:371
uint32_t buf_len
Length of buffer.
Definition rtio.h:319
const struct rtio_iodev * iodev
Device to operation on.
Definition rtio.h:304
struct rtio_sqe::@454::@457 rx
OP_RX.
struct rtio_sqe::@454::@460 txrx
OP_TXRX.
struct rtio_sqe::@454::@463 await
OP_AWAIT.
uint32_t iodev_flags
Op iodev flags.
Definition rtio.h:302
void * ccc_payload
OP_I3C_CCC.
Definition rtio.h:366
int type
Definition rtio.h:360
struct rtio_sqe::@454::@456 tx
OP_TX.
uint16_t flags
Op Flags.
Definition rtio.h:300
const uint8_t * buf
Buffer to write from.
Definition rtio.h:320
struct rtio_sqe::@454::@458 tiny_tx
OP_TINY_TX.
struct rtio_sqe::@454::@461 delay
OP_DELAY.
void * config
Definition rtio.h:361
rtio_callback_t callback
Definition rtio.h:337
An RTIO context containing what can be viewed as a pair of queues.
Definition rtio.h:418
struct rtio_cqe_pool * cqe_pool
Definition rtio.h:448
struct mpsc sq
Definition rtio.h:456
atomic_t cq_count
Definition rtio.h:437
struct rtio_sqe_pool * sqe_pool
Definition rtio.h:445
atomic_t xcqcnt
Definition rtio.h:442
struct mpsc cq
Definition rtio.h:459
Misc utilities.