From 4935e504d2d0f030e7eab585e4362d947e7724e4 Mon Sep 17 00:00:00 2001 From: anan_bj Date: Tue, 6 Jun 2023 18:58:20 +0800 Subject: [PATCH] [Add] add rpmsg lib & template --- Middlewares/Rpmsg_Library/Inc/list.h | 169 ++ .../Rpmsg_Library/Inc/remoteproc_rsc_table.h | 77 + Middlewares/Rpmsg_Library/Inc/rpmsg_api.h | 174 ++ Middlewares/Rpmsg_Library/Inc/rpmsg_config.h | 19 + Middlewares/Rpmsg_Library/Src/rpmsg_api.c | 239 +++ Middlewares/Third_Party/rpmsg/common/llist.c | 113 ++ .../environment/bm/rpmsg_env_specific.h | 37 + .../environment/freertos/rpmsg_env_specific.h | 47 + .../environment/qnx/rpmsg_env_specific.h | 44 + .../environment/threadx/rpmsg_env_specific.h | 44 + .../environment/xos/rpmsg_env_specific.h | 43 + .../environment/zephyr/rpmsg_env_specific.h | 35 + Middlewares/Third_Party/rpmsg/include/llist.h | 62 + .../platform/imx6sx_m4/rpmsg_platform.h | 62 + .../platform/imx7d_m4/rpmsg_platform.h | 62 + .../platform/imx7ulp_m4/rpmsg_platform.h | 62 + .../platform/imx8mm_m4/rpmsg_platform.h | 61 + .../platform/imx8mn_m7/rpmsg_platform.h | 61 + .../platform/imx8mp_m7/rpmsg_platform.h | 60 + .../platform/imx8mq_m4/rpmsg_platform.h | 61 + .../platform/imx8qm_m4/rpmsg_platform.h | 94 ++ .../platform/imx8qx_cm4/rpmsg_platform.h | 64 + .../platform/imxrt1160/rpmsg_platform.h | 59 + .../platform/imxrt1170/rpmsg_platform.h | 59 + .../imxrt500_fusionf1/rpmsg_platform.h | 58 + .../platform/imxrt500_m33/rpmsg_platform.h | 58 + .../platform/imxrt600_hifi4/rpmsg_platform.h | 59 + .../platform/imxrt600_m33/rpmsg_platform.h | 58 + .../platform/ingenic_riscv/rpmsg_platform.h | 63 + .../include/platform/k32l3a6/rpmsg_platform.h | 60 + .../platform/lpc5410x/rpmsg_platform.h | 60 + .../platform/lpc5411x/rpmsg_platform.h | 60 + .../platform/lpc55s69/rpmsg_platform.h | 59 + .../rpmsg/include/rpmsg_compiler.h | 115 ++ .../rpmsg/include/rpmsg_default_config.h | 196 +++ .../Third_Party/rpmsg/include/rpmsg_env.h | 627 ++++++++ .../Third_Party/rpmsg/include/rpmsg_lite.h | 375 +++++ .../Third_Party/rpmsg/include/rpmsg_ns.h | 140 ++ .../Third_Party/rpmsg/include/rpmsg_queue.h | 203 +++ .../Third_Party/rpmsg/include/virtio_ring.h | 168 ++ .../Third_Party/rpmsg/include/virtqueue.h | 252 +++ .../porting/environment/rpmsg_env_bm.c | 443 +++++ .../porting/environment/rpmsg_env_freertos.c | 789 +++++++++ .../porting/environment/rpmsg_env_qnx.c | 776 +++++++++ .../porting/environment/rpmsg_env_qnx.h | 56 + .../porting/environment/rpmsg_env_threadx.c | 724 +++++++++ .../porting/environment/rpmsg_env_xos.c | 747 +++++++++ .../porting/environment/rpmsg_env_zephyr.c | 715 +++++++++ .../platform/imx6sx_m4/rpmsg_platform.c | 294 ++++ .../imx6sx_m4/rpmsg_platform_zephyr_ipm.c | 273 ++++ .../platform/imx7d_m4/rpmsg_platform.c | 294 ++++ .../imx7d_m4/rpmsg_platform_zephyr_ipm.c | 273 ++++ .../platform/imx7ulp_m4/rpmsg_platform.c | 297 ++++ .../platform/imx8mm_m4/rpmsg_platform.c | 293 ++++ .../platform/imx8mn_m7/rpmsg_platform.c | 293 ++++ .../platform/imx8mp_m7/rpmsg_platform.c | 293 ++++ .../platform/imx8mq_m4/rpmsg_platform.c | 285 ++++ .../platform/imx8qm_m4/rpmsg_platform.c | 492 ++++++ .../platform/imx8qx_cm4/rpmsg_platform.c | 306 ++++ .../platform/imxrt1160/rpmsg_platform.c | 357 +++++ .../platform/imxrt1170/rpmsg_platform.c | 357 +++++ .../imxrt500_fusionf1/rpmsg_platform.c | 281 ++++ .../platform/imxrt500_m33/rpmsg_platform.c | 310 ++++ .../platform/imxrt600_hifi4/rpmsg_platform.c | 281 ++++ .../platform/imxrt600_m33/rpmsg_platform.c | 310 ++++ .../platform/ingenic_riscv/rpmsg_platform.c | 237 +++ .../porting/platform/k32l3a6/rpmsg_platform.c | 357 +++++ .../platform/lpc5410x/rpmsg_platform.c | 353 ++++ .../platform/lpc5411x/rpmsg_platform.c | 353 ++++ .../lpc5411x/rpmsg_platform_zephyr_ipm.c | 261 +++ .../platform/lpc55s69/rpmsg_platform.c | 339 ++++ .../Third_Party/rpmsg/rpmsg_lite/rpmsg_lite.c | 1420 +++++++++++++++++ .../Third_Party/rpmsg/rpmsg_lite/rpmsg_ns.c | 189 +++ .../rpmsg/rpmsg_lite/rpmsg_queue.c | 222 +++ .../Third_Party/rpmsg/virtio/virtqueue.c | 735 +++++++++ cpu/core-riscv/ld.lds | 7 +- .../drivers-x2600/include/x2600_ll_risc_ccu.h | 3 + drivers/drivers-x2600/src/x2600_ll_risc_ccu.c | 16 +- .../template-riscv-freertos/README.md | 4 +- .../.vscode/cmake-kits.json | 18 + .../template-riscv-rpmsg/.vscode/launch.json | 55 + .../.vscode/settings.json | 8 + .../template-riscv-rpmsg/.vscode/tasks.json | 12 + .../template-riscv-rpmsg/CMakeLists.txt | 119 ++ .../Templates/template-riscv-rpmsg/Makefile | 185 +++ .../Templates/template-riscv-rpmsg/README.md | 46 + .../include/board_eth_phy_conf.h | 75 + .../include/x2600_hal_conf.h | 119 ++ .../include/x2600_sysclk_conf.h | 68 + .../Templates/template-riscv-rpmsg/main.c | 28 + .../template-riscv-rpmsg/riscv32-gcc.cmake | 18 + .../template-riscv-rpmsg/xburst2_app/Makefile | 5 + .../template-riscv-rpmsg/xburst2_app/main.c | 40 + .../xburst2_app/rpmsg_api.c | 113 ++ .../xburst2_app/rpmsg_api.h | 24 + .../Templates/template-riscv/README.md | 4 +- 96 files changed, 19385 insertions(+), 6 deletions(-) create mode 100644 Middlewares/Rpmsg_Library/Inc/list.h create mode 100644 Middlewares/Rpmsg_Library/Inc/remoteproc_rsc_table.h create mode 100755 Middlewares/Rpmsg_Library/Inc/rpmsg_api.h create mode 100644 Middlewares/Rpmsg_Library/Inc/rpmsg_config.h create mode 100755 Middlewares/Rpmsg_Library/Src/rpmsg_api.c create mode 100755 Middlewares/Third_Party/rpmsg/common/llist.c create mode 100755 Middlewares/Third_Party/rpmsg/include/environment/bm/rpmsg_env_specific.h create mode 100755 Middlewares/Third_Party/rpmsg/include/environment/freertos/rpmsg_env_specific.h create mode 100755 Middlewares/Third_Party/rpmsg/include/environment/qnx/rpmsg_env_specific.h create mode 100755 Middlewares/Third_Party/rpmsg/include/environment/threadx/rpmsg_env_specific.h create mode 100755 Middlewares/Third_Party/rpmsg/include/environment/xos/rpmsg_env_specific.h create mode 100755 Middlewares/Third_Party/rpmsg/include/environment/zephyr/rpmsg_env_specific.h create mode 100755 Middlewares/Third_Party/rpmsg/include/llist.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imx6sx_m4/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imx7d_m4/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imx7ulp_m4/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imx8mm_m4/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imx8mn_m7/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imx8mp_m7/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imx8mq_m4/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imx8qm_m4/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imx8qx_cm4/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imxrt1160/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imxrt1170/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imxrt500_fusionf1/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imxrt500_m33/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imxrt600_hifi4/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/imxrt600_m33/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/ingenic_riscv/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/k32l3a6/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/lpc5410x/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/lpc5411x/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/platform/lpc55s69/rpmsg_platform.h create mode 100755 Middlewares/Third_Party/rpmsg/include/rpmsg_compiler.h create mode 100755 Middlewares/Third_Party/rpmsg/include/rpmsg_default_config.h create mode 100755 Middlewares/Third_Party/rpmsg/include/rpmsg_env.h create mode 100755 Middlewares/Third_Party/rpmsg/include/rpmsg_lite.h create mode 100755 Middlewares/Third_Party/rpmsg/include/rpmsg_ns.h create mode 100755 Middlewares/Third_Party/rpmsg/include/rpmsg_queue.h create mode 100755 Middlewares/Third_Party/rpmsg/include/virtio_ring.h create mode 100755 Middlewares/Third_Party/rpmsg/include/virtqueue.h create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_bm.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_freertos.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_qnx.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_qnx.h create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_threadx.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_xos.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_zephyr.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx6sx_m4/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx6sx_m4/rpmsg_platform_zephyr_ipm.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7d_m4/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7d_m4/rpmsg_platform_zephyr_ipm.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7ulp_m4/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mm_m4/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mn_m7/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mp_m7/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mq_m4/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8qm_m4/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8qx_cm4/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt1160/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt1170/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt500_fusionf1/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt500_m33/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt600_hifi4/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt600_m33/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/ingenic_riscv/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/k32l3a6/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5410x/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5411x/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5411x/rpmsg_platform_zephyr_ipm.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc55s69/rpmsg_platform.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_lite.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_ns.c create mode 100755 Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_queue.c create mode 100755 Middlewares/Third_Party/rpmsg/virtio/virtqueue.c create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/cmake-kits.json create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/launch.json create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/settings.json create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/tasks.json create mode 100755 projects/x2660-halley/Templates/template-riscv-rpmsg/CMakeLists.txt create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/Makefile create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/README.md create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/include/board_eth_phy_conf.h create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/include/x2600_hal_conf.h create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/include/x2600_sysclk_conf.h create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/main.c create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/riscv32-gcc.cmake create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/Makefile create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/main.c create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/rpmsg_api.c create mode 100644 projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/rpmsg_api.h diff --git a/Middlewares/Rpmsg_Library/Inc/list.h b/Middlewares/Rpmsg_Library/Inc/list.h new file mode 100644 index 00000000..1c526a86 --- /dev/null +++ b/Middlewares/Rpmsg_Library/Inc/list.h @@ -0,0 +1,169 @@ +#ifndef LIST_H +#define LIST_H + +//#ifdef __KERNEL__ +//#include + +//#else + +/* + * Copied from include/linux/... + */ + +#undef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + + +struct list_head { + struct list_head *next, *prev; +}; + + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + list->next = list; + list->prev = list; +} + + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_first_entry - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +/** + * list_first_entry_or_null - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note that if the list is empty, it returns NULL. + */ +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) + + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *_new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = _new; + _new->next = next; + _new->prev = prev; + prev->next = _new; +} + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *_new, struct list_head *head) +{ + __list_add(_new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head *prev, struct list_head *next) +{ + next->prev = prev; + prev->next = next; +} + +#define LIST_POISON1 ((void *) 0x00100100) +#define LIST_POISON2 ((void *) 0x00200200) +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty() on entry does not return true after this, the entry is + * in an undefined state. + */ +static inline void list_del(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->next = (struct list_head*)LIST_POISON1; + entry->prev = (struct list_head*)LIST_POISON2; +} + +//#endif + +#endif diff --git a/Middlewares/Rpmsg_Library/Inc/remoteproc_rsc_table.h b/Middlewares/Rpmsg_Library/Inc/remoteproc_rsc_table.h new file mode 100644 index 00000000..d6bb0c28 --- /dev/null +++ b/Middlewares/Rpmsg_Library/Inc/remoteproc_rsc_table.h @@ -0,0 +1,77 @@ +#ifndef __REMOTEPROC_RSC_TABLE_H__ +#define __REMOTEPROC_RSC_TABLE_H__ + +#include "rpmsg_api.h" + +#define VIRTIO_ID_RPMSG 7 +#define VRING_COUNT 2 + +#pragma pack(1) +struct fw_rsc_vdev_vring { + uint32_t da; + uint32_t align; + uint32_t num; + uint32_t notifyid; + uint32_t pa; +}; + + +struct fw_rsc_vdev { + uint32_t type; + uint32_t id; + uint32_t notifyid; + uint32_t dfeatures; + uint32_t gfeatures; + uint32_t config_len; + uint8_t status; + uint8_t num_of_vrings; + uint8_t reserved[2]; + struct fw_rsc_vdev_vring vring[0]; +}; + + +struct sh_resource_table { + uint32_t ver; + uint32_t num; + uint32_t reserved[2]; + uint32_t offset[4]; + + struct fw_rsc_vdev vdev; + struct fw_rsc_vdev_vring vring0; + struct fw_rsc_vdev_vring vring1; +}; + +enum fw_resource_type { + RSC_CARVEOUT = 0, + RSC_DEVMEM = 1, + RSC_TRACE = 2, + RSC_VDEV = 3, + RSC_RPROC_MEM = 4, + RSC_FW_CHKSUM = 5, + RSC_LAST = 6, + RSC_VENDOR_START = 128, + RSC_VENDOR_END = 512, +}; + +volatile struct sh_resource_table __attribute__((used,section(".resource_table"))) resource_table = { +/* volatile struct sh_resource_table __attribute__((used)) resource_table = { */ + .ver = 1, + .num = 1, + .offset = { + 32, + }, + .reserved = {0,0}, + .vdev= { + RSC_VDEV, VIRTIO_ID_RPMSG, 0,1, 0, 0, 0, + VRING_COUNT, {0, 0}, + }, + .vring0 = {VRING_RX_ADDRESS, VRING_ALIGNMENT, VRING_NUM_BUFFS, VRING0_ID, VRING_RX_ADDRESS}, + .vring1 = {VRING_TX_ADDRESS, VRING_ALIGNMENT, VRING_NUM_BUFFS, VRING1_ID, VRING_TX_ADDRESS}, +}; + +#pragma pack() + + + + +#endif /*__REMOTEPROC_RSC_TABLE_H__*/ diff --git a/Middlewares/Rpmsg_Library/Inc/rpmsg_api.h b/Middlewares/Rpmsg_Library/Inc/rpmsg_api.h new file mode 100755 index 00000000..a69047bc --- /dev/null +++ b/Middlewares/Rpmsg_Library/Inc/rpmsg_api.h @@ -0,0 +1,174 @@ +/** + * @file x2600_hal_template.h + * @author MPU系统软件部团队 + * @brief rpmsg核间通信头文件 + * + * @copyright 版权所有 (北京君正集成电路股份有限公司) {2022} + * @copyright Copyright© 2022 Ingenic Semiconductor Co.,Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __RPMSG_LIBRARY_H__ +#define __RPMSG_LIBRARY_H__ + +#ifdef __cplusplus + extern "C" { +#endif + +/** + * @addtogroup group_RPMSG_LIBRARY + * @{ + */ + +/* 1. 头文件 (Includes)----------------------------------------------- */ +#include "rpmsg_config.h" +#include "rpmsg_lite.h" +#include "rpmsg_ns.h" +#include "list.h" + +/* 2. 导出的类型 (Exported Types)--------------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_exported_types RPMSG_LIBRARY 导出的类型 (Exported Types) + * @{ + */ +typedef struct { + struct rpmsg_lite_instance rpmsg_lite_dev_ctx; + struct rpmsg_lite_instance *rpmsg_instance; + struct rpmsg_lite_ept_static_context rpmsg_lite_ept_ctx; + struct rpmsg_lite_endpoint *rpmsg_ept; +}ingenic_rpmsg_t; + + +/** + * @} + */ +/* 3. 导出常量定义 Exported Constants ----------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_exported_constants RPMSG_LIBRARY 导出的常量 Exported Constants + * @{ + */ + +// 删除此行, 添加内容 +// 删除此行, 添加内容 + +/** + * @} + */ +/* 4. 导出宏定义 Exported Macros --------------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_exported_macros RPMSG_LIBRARY 导出宏 Exported Macros + * @{ + */ +#define MAX_MSG_NUM 20 + +#define VRING_RX_ADDRESS 0x12400000 +#define VRING_TX_ADDRESS 0x12401000 + +#define VRING_ALIGNMENT 16 +#define VRING_NUM_BUFFS RL_BUFFER_COUNT +#define VRING0_ID 0 +#define VRING1_ID 1 + +/** + * @} + */ +/* 5. 导出函数申明 Exported Funcs --------------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_private_var RPMSG_LIBRARY 私有变量申明 (Private Variables) + * @defgroup RPMSG_LIBRARY_exported_funcs RPMSG_LIBRARY 导出函数申明 Exported Funcs + * @{ + */ +void create_channel(ingenic_rpmsg_t *ingenic_rpmsg); +int32_t msg_recv(void **buffer, uint32_t *len, uint32_t *src); +int32_t msg_send(ingenic_rpmsg_t *ingenic_rpmsg, uint32_t dst, char *data, uint32_t size, uint32_t timeout); + +/** + * @} + */ +/* 6. 导出变量申明 (Exported Variables) --------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_exported_var RPMSG_LIBRARY 导出变量申明 (Exported Variables) + * @{ + */ + +// 删除此行, 添加内容 +// 删除此行, 添加内容 + +/** + * @} + */ +/* 7. 私有类型定义 (Private Types) -------------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_private_types RPMSG_LIBRARY 私有类型定义 (Private Types) + * @{ + */ +struct msg{ + void *buf; + uint32_t size; + uint32_t src; + struct list_head entry; +}; + +/** + * @} + */ +/* 8. 私有常量定义Private Constants ------------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_private_constants RPMSG_LIBRARY 私有常量定义Private Constants + * @{ + */ + +// 删除此行, 添加内容 +// 删除此行, 添加内容 + +/** + * @} + */ +/* 9. 私有宏定义 (Private Macros) -------------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_private_macros RPMSG_LIBRARY 私有宏定义 (Private Macros) + * @{ + */ + +// 删除此行, 添加内容 +// 删除此行, 添加内容 + +/** + * @} + */ +/* 10. 私有函数申明 (Private Funcs) ------------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_private_funcs RPMSG_LIBRARY 私有函数申明 (Private Funcs) + * @{ + */ + +// 删除此行, 添加内容 +// 删除此行, 添加内容 + +/** + * @} + */ +/* 11. 私有变量申明 Private Variables ----------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_private_var RPMSG_LIBRARY 私有变量申明 (Private Variables) + * @{ + */ + +// 删除此行, 添加内容 +// 删除此行, 添加内容 + +/** + * @} + */ + +/** + * @} + */ +#ifdef __cplusplus +} +#endif +#endif /* __RPMSG_LIBRARY_H__ */ diff --git a/Middlewares/Rpmsg_Library/Inc/rpmsg_config.h b/Middlewares/Rpmsg_Library/Inc/rpmsg_config.h new file mode 100644 index 00000000..61b313ff --- /dev/null +++ b/Middlewares/Rpmsg_Library/Inc/rpmsg_config.h @@ -0,0 +1,19 @@ +#ifndef __RPMSG_CONFIG_H__ +#define __RPMSG_CONFIG_H__ + +#define _GNUC__ +#define RL_USE_STATIC_API 1 + +#define RL_USE_ENVIRONMENT_CONTEXT 0 + + +#define RL_ALLOW_CUSTOM_SHMEM_CONFIG 0 + +#define RL_BUFFER_COUNT 32 +#define VRING_ALIGN 16 + +#endif // __RPMSG_CONFIG_H__ + + + + diff --git a/Middlewares/Rpmsg_Library/Src/rpmsg_api.c b/Middlewares/Rpmsg_Library/Src/rpmsg_api.c new file mode 100755 index 00000000..5fac0918 --- /dev/null +++ b/Middlewares/Rpmsg_Library/Src/rpmsg_api.c @@ -0,0 +1,239 @@ +/** + * @file rpmsg_api.c + * @author MPU系统软件部团队 + * @brief rpmsg核间通信源文件 + * + * @copyright 版权所有 (北京君正集成电路股份有限公司) {2022} + * @copyright Copyright© 2022 Ingenic Semiconductor Co.,Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + @verbatim + ============================================================================== + ##### 使用说明 ##### + ============================================================================== + @endverbatim + */ + +/* 1.头文件 (Includes)------------------------------------------------ */ +#include +#include +#include + +/** @addtogroup RPMSG_LIBRARY + * @{ + */ + +/* 2.私有常量定义Private Constants -------------------------------------- */ +/** + * @addtogroup RPMSG_LIBRARY_private_constants + * @{ + */ +struct msg msg[MAX_MSG_NUM]; +LIST_HEAD(avali); +LIST_HEAD(used); +DEFINE_SPINLOCK(lock); + +/** + * @} + */ +/* 3. 私有类型定义 (Private Types) -------------------------------------- */ +/** + * @addtogroup RPMSG_LIBRARY_private_types + * @{ + */ + +// 删除此行, 添加内容 +// 删除此行, 添加内容 + +/** + * @} + */ +/* 4. 私有宏定义 (Private Macros) -------------------------------------- */ +/** + * @addtogroup RPMSG_LIBRARY_private_macros + * @{ + */ + +#define CLIENT_NAME "rpmsg_chrdev" + +/** + * @} + */ +/* 5. 私有变量申明 Private Variables ------------------------------------ */ +/** + * @addtogroup RPMSG_LIBRARY_private_var + * @{ + */ + +// 删除此行, 添加内容 +// 删除此行, 添加内容 + +/** + * @} + */ +/* 6. 私有函数申明 (Private Funcs) -------------------------------------- */ +/** + * @addtogroup RPMSG_LIBRARY_private_funcs + * @{ + */ + +static int32_t msg_queue_init(); +static int32_t msg_available(); +static int32_t msg_enqueue(void *payload, uint32_t payload_len, uint32_t src); +static int32_t msg_dequeue(void **payload, uint32_t *payload_len, uint32_t *src); +static int32_t riscv_ept_rx_cb_t(void *payload, uint32_t payload_len, uint32_t src, void *priv); +static void rpmsg_mailbox_handler(int irq, void *data); + +/** + * @} + */ +/* 7. 私有函数实现 (Private Funcs) -------------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_private_funcs_impl RPMSG_LIBRARY 私有函数实现 + * @{ + */ +static int32_t msg_queue_init() +{ + uint32_t i; + for(i = 0; i < MAX_MSG_NUM; i++) + list_add_tail(&msg[i].entry, &avali); +} + +static int32_t msg_available() +{ + int32_t ret; + ret = list_empty(&used); + return !ret; +} + +static int32_t msg_enqueue(void *payload, uint32_t payload_len, uint32_t src) +{ + struct msg *msg; + + spin_lock_irq(&lock); + msg = list_first_entry(&avali, struct msg, entry); + + msg->buf = payload; + msg->size = payload_len; + msg->src = src; + + list_del(&msg->entry); + list_add_tail(&msg->entry, &used); + spin_unlock_irq(&lock); + + return 0; +} + +static int32_t msg_dequeue(void **payload, uint32_t *payload_len, uint32_t *src) +{ + struct msg *msg; + + spin_lock(&lock); + msg = list_first_entry(&used, struct msg, entry); + + *payload = msg->buf; + *payload_len = msg->size; + *src = msg->src; + + list_del(&msg->entry); + list_add_tail(&msg->entry, &avali); + spin_unlock(&lock); + + return 0; +} + + +static int32_t riscv_ept_rx_cb_t(void *payload, uint32_t payload_len, uint32_t src, void *priv) +{ + ingenic_rpmsg_t *ingenic_rpmsg = priv; + char* data = (char*)payload; + if(payload_len && data[0] != '0') + + msg_enqueue(payload, payload_len, src); + + return 0; +} + +static void rpmsg_mailbox_handler(int irq, void *data) { + RISCV_CCU_TypeDef *RISC_CCU = (RISCV_CCU_TypeDef *)data; + uint32_t msg = 0; + msg = LL_RISC_CCU_Mbox_Recvmsg(RISC_CCU); + rpmsg_handler(msg); +} + +/** + * @} + */ +/* 8. 导出函数实现------------------------------------------------------- */ +/** + * @defgroup RPMSG_LIBRARY_exported_funcs_impl RPMSG_LIBRARY 导出函数实现 + * @{ + */ +void create_channel(ingenic_rpmsg_t *ingenic_rpmsg) +{ + unsigned int timeout = 0xffff; + int ret = 0; + + ingenic_rpmsg->rpmsg_instance = rpmsg_lite_remote_init((void*)VRING_RX_ADDRESS, + 0,0,&ingenic_rpmsg->rpmsg_lite_dev_ctx); + if(!ingenic_rpmsg->rpmsg_instance){ + prom_printk("rpmsg_lite_remote_init failed \r\n"); + } + + ll_request_irq(IRQ_RISCV_MAILBOX, rpmsg_mailbox_handler, RISC_CCU_Instance); + + + ingenic_rpmsg->rpmsg_ept = rpmsg_lite_create_ept(ingenic_rpmsg->rpmsg_instance,RL_ADDR_ANY, + riscv_ept_rx_cb_t,ingenic_rpmsg,&ingenic_rpmsg->rpmsg_lite_ept_ctx); + if(!ingenic_rpmsg->rpmsg_ept){ + prom_printk("rpmsg_lite_create_ept failed \r\n"); + } + + do { + ret = rpmsg_ns_announce(ingenic_rpmsg->rpmsg_instance, + ingenic_rpmsg->rpmsg_ept, + CLIENT_NAME, + 0); + timeout--; + } + while((ret == RL_NOT_READY) && timeout); + + msg_queue_init(); + + return ret; +} + +int32_t msg_recv(void **buffer, uint32_t *len, uint32_t *src) +{ + while(1) + { + spin_lock(&lock); + if(!msg_available()) + { + spin_unlock(&lock); + continue; + } + + msg_dequeue(buffer, len, src); + spin_unlock(&lock); + break; + } + return 0; +} + +int32_t msg_send(ingenic_rpmsg_t *ingenic_rpmsg, uint32_t dst, char *data, uint32_t size, uint32_t timeout) +{ + return rpmsg_lite_send(ingenic_rpmsg->rpmsg_instance,ingenic_rpmsg->rpmsg_ept, dst, data, size, timeout); +} + + +/** + * @} + */ + +/** + * @} + */ diff --git a/Middlewares/Third_Party/rpmsg/common/llist.c b/Middlewares/Third_Party/rpmsg/common/llist.c new file mode 100755 index 00000000..6d79a920 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/common/llist.c @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright 2019 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of Mentor Graphics Corporation nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/************************************************************************** + * FILE NAME + * + * llist.c + * + * COMPONENT + * + * OpenAMP stack. + * + * DESCRIPTION + * + * Source file for basic linked list service. + * + **************************************************************************/ +#include "llist.h" + +#define LIST_NULL ((void *)0) +/*! + * add_to_list + * + * Places new element at the start of the list. + * + * @param head - list head + * @param node - new element to add + * + */ +void add_to_list(struct llist **head, struct llist *node) +{ + if (node == LIST_NULL) + { + return; + } + + if (*head != LIST_NULL) + { + /* Place the new element at the start of list. */ + node->next = *head; + node->prev = LIST_NULL; + (*head)->prev = node; + *head = node; + } + else + { + /* List is empty - assign new element to list head. */ + *head = node; + (*head)->next = LIST_NULL; + (*head)->prev = LIST_NULL; + } +} + +/*! + * remove_from_list + * + * Removes the given element from the list. + * + * @param head - list head + * @param element - element to remove from list + * + */ +void remove_from_list(struct llist **head, struct llist *node) +{ + if ((*head == LIST_NULL) || (node == LIST_NULL)) + { + return; + } + + if (node == *head) + { + /* First element has to be removed. */ + *head = (*head)->next; + } + else if (node->next == LIST_NULL) + { + /* Last element has to be removed. */ + node->prev->next = node->next; + } + else + { + /* Intermediate element has to be removed. */ + node->prev->next = node->next; + node->next->prev = node->prev; + } +} diff --git a/Middlewares/Third_Party/rpmsg/include/environment/bm/rpmsg_env_specific.h b/Middlewares/Third_Party/rpmsg/include/environment/bm/rpmsg_env_specific.h new file mode 100755 index 00000000..f468403e --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/environment/bm/rpmsg_env_specific.h @@ -0,0 +1,37 @@ +/* + * Copyright 2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_specific.h + * + * DESCRIPTION + * + * This file contains baremetal specific constructions. + * + **************************************************************************/ +#ifndef RPMSG_ENV_SPECIFIC_H_ +#define RPMSG_ENV_SPECIFIC_H_ + +#include +#include "rpmsg_default_config.h" + +typedef struct +{ + uint32_t src; + void *data; + uint32_t len; +} rpmsg_queue_rx_cb_data_t; + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +typedef uint8_t LOCK_STATIC_CONTEXT; +typedef uint8_t rpmsg_static_queue_ctxt; +#endif + +#endif /* RPMSG_ENV_SPECIFIC_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/environment/freertos/rpmsg_env_specific.h b/Middlewares/Third_Party/rpmsg/include/environment/freertos/rpmsg_env_specific.h new file mode 100755 index 00000000..456b1ff4 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/environment/freertos/rpmsg_env_specific.h @@ -0,0 +1,47 @@ +/* + * Copyright 2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_specific.h + * + * DESCRIPTION + * + * This file contains FreeRTOS specific constructions. + * + **************************************************************************/ +#ifndef RPMSG_ENV_SPECIFIC_H_ +#define RPMSG_ENV_SPECIFIC_H_ + +#include +#include "rpmsg_default_config.h" + +typedef struct +{ + uint32_t src; + void *data; + uint32_t len; +} rpmsg_queue_rx_cb_data_t; + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +#include "FreeRTOS.h" + +#if (configSUPPORT_STATIC_ALLOCATION == 0) +#warning You have configured RPMsg_Lite to use static API but FreeRTOS is not configured for static allocations! Please switch the configSUPPORT_STATIC_ALLOCATION to 1 in your FreeRTOSConfig.h file. +#endif +typedef StaticSemaphore_t LOCK_STATIC_CONTEXT; +typedef StaticQueue_t rpmsg_static_queue_ctxt; + +/* Queue object static storage size in bytes, should be defined as (RL_BUFFER_COUNT*sizeof(rpmsg_queue_rx_cb_data_t)) + This macro helps the application to statically allocate the queue object static storage memory. Note, the + RL_BUFFER_COUNT is not applied for all instances when RL_ALLOW_CUSTOM_SHMEM_CONFIG is set to 1 ! */ +#define RL_ENV_QUEUE_STATIC_STORAGE_SIZE (RL_BUFFER_COUNT * sizeof(rpmsg_queue_rx_cb_data_t)) +#endif + +#endif /* RPMSG_ENV_SPECIFIC_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/environment/qnx/rpmsg_env_specific.h b/Middlewares/Third_Party/rpmsg/include/environment/qnx/rpmsg_env_specific.h new file mode 100755 index 00000000..8d2a315c --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/environment/qnx/rpmsg_env_specific.h @@ -0,0 +1,44 @@ +/* + * Copyright 2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_specific.h + * + * DESCRIPTION + * + * This file contains QNX specific constructions. + * + **************************************************************************/ +#ifndef RPMSG_ENV_SPECIFIC_H_ +#define RPMSG_ENV_SPECIFIC_H_ + +#include +#include "rpmsg_default_config.h" + +typedef struct +{ + uint32_t src; + void *data; + uint32_t len; +} rpmsg_queue_rx_cb_data_t; + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +#include +#include + +typedef pthread_mutex_t LOCK_STATIC_CONTEXT; +typedef env_queue_t rpmsg_static_queue_ctxt; +/* Queue object static storage size in bytes, should be defined as (RL_BUFFER_COUNT*sizeof(rpmsg_queue_rx_cb_data_t)) + This macro helps the application to statically allocate the queue object static storage memory. Note, the + RL_BUFFER_COUNT is not applied for all instances when RL_ALLOW_CUSTOM_SHMEM_CONFIG is set to 1 ! */ +#define RL_ENV_QUEUE_STATIC_STORAGE_SIZE (RL_BUFFER_COUNT * sizeof(rpmsg_queue_rx_cb_data_t)) +#endif + +#endif /* RPMSG_ENV_SPECIFIC_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/environment/threadx/rpmsg_env_specific.h b/Middlewares/Third_Party/rpmsg/include/environment/threadx/rpmsg_env_specific.h new file mode 100755 index 00000000..11779350 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/environment/threadx/rpmsg_env_specific.h @@ -0,0 +1,44 @@ +/* + * Copyright 2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_specific.h + * + * DESCRIPTION + * + * This file contains ThreadX specific constructions. + * + **************************************************************************/ +#ifndef RPMSG_ENV_SPECIFIC_H_ +#define RPMSG_ENV_SPECIFIC_H_ + +#include +#include "rpmsg_default_config.h" + +typedef struct +{ + uint32_t src; + void *data; + uint32_t len; +} rpmsg_queue_rx_cb_data_t; + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +#include "tx_api.h" +#include "tx_event_flags.h" + +typedef TX_SEMAPHORE LOCK_STATIC_CONTEXT; +typedef TX_QUEUE rpmsg_static_queue_ctxt; +/* Queue object static storage size in bytes, should be defined as (RL_BUFFER_COUNT*sizeof(rpmsg_queue_rx_cb_data_t)) + This macro helps the application to statically allocate the queue object static storage memory. Note, the + RL_BUFFER_COUNT is not applied for all instances when RL_ALLOW_CUSTOM_SHMEM_CONFIG is set to 1 ! */ +#define RL_ENV_QUEUE_STATIC_STORAGE_SIZE (RL_BUFFER_COUNT * sizeof(rpmsg_queue_rx_cb_data_t)) +#endif + +#endif /* RPMSG_ENV_SPECIFIC_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/environment/xos/rpmsg_env_specific.h b/Middlewares/Third_Party/rpmsg/include/environment/xos/rpmsg_env_specific.h new file mode 100755 index 00000000..fc8d318d --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/environment/xos/rpmsg_env_specific.h @@ -0,0 +1,43 @@ +/* + * Copyright 2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_specific.h + * + * DESCRIPTION + * + * This file contains XOS specific constructions. + * + **************************************************************************/ +#ifndef RPMSG_ENV_SPECIFIC_H_ +#define RPMSG_ENV_SPECIFIC_H_ + +#include +#include "rpmsg_default_config.h" + +typedef struct +{ + uint32_t src; + void *data; + uint32_t len; +} rpmsg_queue_rx_cb_data_t; + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +#include + +typedef XosSem LOCK_STATIC_CONTEXT; +typedef void rpmsg_static_queue_ctxt; +/* Queue object static storage size in bytes, should be defined as (RL_BUFFER_COUNT*sizeof(rpmsg_queue_rx_cb_data_t)) + This macro helps the application to statically allocate the queue object static storage memory. Note, the + RL_BUFFER_COUNT is not applied for all instances when RL_ALLOW_CUSTOM_SHMEM_CONFIG is set to 1 ! */ +#define RL_ENV_QUEUE_STATIC_STORAGE_SIZE (XOS_MSGQ_SIZE(RL_BUFFER_COUNT, sizeof(rpmsg_queue_rx_cb_data_t))) +#endif + +#endif /* RPMSG_ENV_SPECIFIC_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/environment/zephyr/rpmsg_env_specific.h b/Middlewares/Third_Party/rpmsg/include/environment/zephyr/rpmsg_env_specific.h new file mode 100755 index 00000000..c4d60323 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/environment/zephyr/rpmsg_env_specific.h @@ -0,0 +1,35 @@ +/* + * Copyright 2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_specific.h + * + * DESCRIPTION + * + * This file contains Zephyr specific constructions. + * + **************************************************************************/ +#ifndef RPMSG_ENV_SPECIFIC_H_ +#define RPMSG_ENV_SPECIFIC_H_ + +#include "rpmsg_default_config.h" + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +#include + +typedef k_sem LOCK_STATIC_CONTEXT; +typedef k_msgq rpmsg_static_queue_ctxt; +/* Queue object static storage size in bytes, should be defined as (RL_BUFFER_COUNT*sizeof(rpmsg_queue_rx_cb_data_t)) + This macro helps the application to statically allocate the queue object static storage memory. Note, the + RL_BUFFER_COUNT is not applied for all instances when RL_ALLOW_CUSTOM_SHMEM_CONFIG is set to 1 ! */ +#define RL_ENV_QUEUE_STATIC_STORAGE_SIZE (RL_BUFFER_COUNT * sizeof(rpmsg_queue_rx_cb_data_t)) +#endif + +#endif /* RPMSG_ENV_SPECIFIC_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/llist.h b/Middlewares/Third_Party/rpmsg/include/llist.h new file mode 100755 index 00000000..a4282522 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/llist.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright 2019 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of Mentor Graphics Corporation nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/************************************************************************** + * FILE NAME + * + * llist.h + * + * COMPONENT + * + * OpenAMP stack. + * + * DESCRIPTION + * + * Header file for linked list service. + * + **************************************************************************/ + +#ifndef LLIST_H_ +#define LLIST_H_ + +#include + +struct llist +{ + void *data; + uint32_t attr; + struct llist *next; + struct llist *prev; +}; + +void add_to_list(struct llist **head, struct llist *node); +void remove_from_list(struct llist **head, struct llist *node); + +#endif /* LLIST_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imx6sx_m4/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imx6sx_m4/rpmsg_platform.h new file mode 100755 index 00000000..e09a8cd4 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imx6sx_m4/rpmsg_platform.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (1) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x8000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMX6SX_M4_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); +void rpmsg_handler(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imx7d_m4/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imx7d_m4/rpmsg_platform.h new file mode 100755 index 00000000..9febe559 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imx7d_m4/rpmsg_platform.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (1) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x8000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMX7D_M4_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); +void rpmsg_handler(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imx7ulp_m4/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imx7ulp_m4/rpmsg_platform.h new file mode 100755 index 00000000..0edf151c --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imx7ulp_m4/rpmsg_platform.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (1) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x8000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMX7ULP_M4_SRTM_LINK_ID (0U) +#define RL_PLATFORM_IMX7ULP_M4_USER_LINK_ID (1U) +#define RL_PLATFORM_HIGHEST_LINK_ID (1U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imx8mm_m4/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imx8mm_m4/rpmsg_platform.h new file mode 100755 index 00000000..5e7d36f9 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imx8mm_m4/rpmsg_platform.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (1) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x8000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMX8MM_M4_USER_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (15U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imx8mn_m7/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imx8mn_m7/rpmsg_platform.h new file mode 100755 index 00000000..708766ed --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imx8mn_m7/rpmsg_platform.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (1) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x8000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMX8MN_M7_USER_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (15U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imx8mp_m7/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imx8mp_m7/rpmsg_platform.h new file mode 100755 index 00000000..80ec7b9c --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imx8mp_m7/rpmsg_platform.h @@ -0,0 +1,60 @@ +/* + * Copyright 2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (1) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x8000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMX8MP_M7_USER_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (15U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imx8mq_m4/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imx8mq_m4/rpmsg_platform.h new file mode 100755 index 00000000..9b21b2d3 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imx8mq_m4/rpmsg_platform.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (1) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x8000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMX8MQ_M4_USER_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (15U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imx8qm_m4/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imx8qm_m4/rpmsg_platform.h new file mode 100755 index 00000000..b633cdcd --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imx8qm_m4/rpmsg_platform.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (1) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x8000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +/* VQ_ID in 8QM is defined as follows: + * com_id: [3:3] communication ID, used to identify the MU instance. + * vring_id: [2:1] vring ID, used to identify the vring. + * q_id: [0:0] queue ID, used to identify the tvq or rvq. + * com_id + vring_id = link_id + */ + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(vq_id) ((vq_id) >> 1U) +#define RL_GET_COM_ID(vq_id) ((vq_id) >> 3U) +#define RL_GET_Q_ID(vq_id) ((vq_id)&0x1U) + +#define RL_GEN_LINK_ID(com_id, vring_id) (((com_id) << 2U) | (vring_id)) +#define RL_GEN_MU_MSG(vq_id) (uint32_t)(((vq_id)&0x7U) << 16U) /* com_id is discarded in msg */ + +#define RL_PLATFORM_IMX8QM_M4_A_COM_ID (0U) +#define RL_PLATFORM_IMX8QM_M4_M4_COM_ID (1U) + +#define RL_PLATFORM_IMX8QM_M4_SRTM_VRING_ID (0U) +#define RL_PLATFORM_IMX8QM_M4_USER_VRING_ID (1U) + +#define RL_PLATFORM_HIGHEST_LINK_ID RL_GEN_LINK_ID(RL_PLATFORM_IMX8QM_M4_M4_COM_ID, RL_PLATFORM_IMX8QM_M4_USER_VRING_ID) + +#define RL_PLATFORM_IMX8QM_M4_A_SRTM_LINK_ID \ + RL_GEN_LINK_ID(RL_PLATFORM_IMX8QM_M4_A_COM_ID, RL_PLATFORM_IMX8QM_M4_SRTM_VRING_ID) +#define RL_PLATFORM_IMX8QM_M4_A_USER_LINK_ID \ + RL_GEN_LINK_ID(RL_PLATFORM_IMX8QM_M4_A_COM_ID, RL_PLATFORM_IMX8QM_M4_USER_VRING_ID) +#define RL_PLATFORM_IMX8QM_M4_M4_SRTM_LINK_ID \ + RL_GEN_LINK_ID(RL_PLATFORM_IMX8QM_M4_M4_COM_ID, RL_PLATFORM_IMX8QM_M4_SRTM_VRING_ID) +#define RL_PLATFORM_IMX8QM_M4_M4_USER_LINK_ID \ + RL_GEN_LINK_ID(RL_PLATFORM_IMX8QM_M4_M4_COM_ID, RL_PLATFORM_IMX8QM_M4_USER_VRING_ID) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#if defined(MIMX8QM_CM4_CORE0) +int32_t LSIO_MU5_INT_B_IRQHandler(void); +int32_t LSIO_MU7_INT_A_IRQHandler(void); +#elif defined(MIMX8QM_CM4_CORE1) +int32_t LSIO_MU6_INT_B_IRQHandler(void); +int32_t LSIO_MU7_INT_B_IRQHandler(void); +#endif + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imx8qx_cm4/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imx8qx_cm4/rpmsg_platform.h new file mode 100755 index 00000000..a669af13 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imx8qx_cm4/rpmsg_platform.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (1) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x8000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMX8QX_CM4_SRTM_LINK_ID (0U) +#define RL_PLATFORM_IMX8QX_CM4_USER_LINK_ID (1U) +#define RL_PLATFORM_HIGHEST_LINK_ID (15U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +int32_t LSIO_MU5_INT_B_IRQHandler(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imxrt1160/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imxrt1160/rpmsg_platform.h new file mode 100755 index 00000000..926a83e0 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imxrt1160/rpmsg_platform.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because imxrt1160 is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMXRT1160_M7_M4_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imxrt1170/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imxrt1170/rpmsg_platform.h new file mode 100755 index 00000000..297d3820 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imxrt1170/rpmsg_platform.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because imxrt1170 is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMXRT1170_M7_M4_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imxrt500_fusionf1/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imxrt500_fusionf1/rpmsg_platform.h new file mode 100755 index 00000000..3e2110ec --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imxrt500_fusionf1/rpmsg_platform.h @@ -0,0 +1,58 @@ +/* + * Copyright 2019-2020 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because RT500 is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMXRT500_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (1U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imxrt500_m33/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imxrt500_m33/rpmsg_platform.h new file mode 100755 index 00000000..dd94dd2d --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imxrt500_m33/rpmsg_platform.h @@ -0,0 +1,58 @@ +/* + * Copyright 2019-2020 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because RT500 is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMXRT500_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imxrt600_hifi4/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imxrt600_hifi4/rpmsg_platform.h new file mode 100755 index 00000000..fbe471e5 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imxrt600_hifi4/rpmsg_platform.h @@ -0,0 +1,59 @@ +/* + * Copyright 2019-2020 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because RT600 is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMXRT600_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/imxrt600_m33/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/imxrt600_m33/rpmsg_platform.h new file mode 100755 index 00000000..d10a548f --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/imxrt600_m33/rpmsg_platform.h @@ -0,0 +1,58 @@ +/* + * Copyright 2019-2020 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because RT600 is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_IMXRT600_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/ingenic_riscv/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/ingenic_riscv/rpmsg_platform.h new file mode 100755 index 00000000..31f6cefa --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/ingenic_riscv/rpmsg_platform.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 Ingenic Semiconductor, Inc. + * Copyright 2005-2022 Ingenic + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* RPMSG MU channel index */ +#define RPMSG_MU_CHANNEL (0) + +/* + * Linux requires the ALIGN to 0x1000(4KB) instead of 0x80 + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x1000U) +#endif + +/* contains pool of descriptors and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x1000UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_INGENIC_RISCV_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); +void rpmsg_handler(uint32_t msg); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/k32l3a6/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/k32l3a6/rpmsg_platform.h new file mode 100755 index 00000000..5087db9d --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/k32l3a6/rpmsg_platform.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because k32l3a6 is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_K32L3A60_M4_M0_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/lpc5410x/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/lpc5410x/rpmsg_platform.h new file mode 100755 index 00000000..33478c72 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/lpc5410x/rpmsg_platform.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because LPC54102 is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_LPC5410x_M4_M0_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/lpc5411x/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/lpc5411x/rpmsg_platform.h new file mode 100755 index 00000000..cf22d7d1 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/lpc5411x/rpmsg_platform.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because LPC5411x is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_LPC5411x_M4_M0_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/platform/lpc55s69/rpmsg_platform.h b/Middlewares/Third_Party/rpmsg/include/platform/lpc55s69/rpmsg_platform.h new file mode 100755 index 00000000..8a31538c --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/platform/lpc55s69/rpmsg_platform.h @@ -0,0 +1,59 @@ +/* + * Copyright 2018-2019 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RPMSG_PLATFORM_H_ +#define RPMSG_PLATFORM_H_ + +#include + +/* + * No need to align the VRING as defined in Linux because LPC55S69 is not intended + * to run the Linux + */ +#ifndef VRING_ALIGN +#define VRING_ALIGN (0x10U) +#endif + +/* contains pool of descriptos and two circular buffers */ +#ifndef VRING_SIZE +#define VRING_SIZE (0x400UL) +#endif + +/* size of shared memory + 2*VRING size */ +#define RL_VRING_OVERHEAD (2UL * VRING_SIZE) + +#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU)) +#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U) +#define RL_GET_Q_ID(id) ((id)&0x1U) + +#define RL_PLATFORM_LPC55S69_M33_M33_LINK_ID (0U) +#define RL_PLATFORM_HIGHEST_LINK_ID (0U) + +/* platform interrupt related functions */ +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data); +int32_t platform_deinit_interrupt(uint32_t vector_id); +int32_t platform_interrupt_enable(uint32_t vector_id); +int32_t platform_interrupt_disable(uint32_t vector_id); +int32_t platform_in_isr(void); +void platform_notify(uint32_t vector_id); + +/* platform low-level time-delay (busy loop) */ +void platform_time_delay(uint32_t num_msec); + +/* platform memory functions */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags); +void platform_cache_all_flush_invalidate(void); +void platform_cache_disable(void); +uint32_t platform_vatopa(void *addr); +void *platform_patova(uint32_t addr); + +/* platform init/deinit */ +int32_t platform_init(void); +int32_t platform_deinit(void); + +#endif /* RPMSG_PLATFORM_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/rpmsg_compiler.h b/Middlewares/Third_Party/rpmsg/include/rpmsg_compiler.h new file mode 100755 index 00000000..065bda3c --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/rpmsg_compiler.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_compiler.h + * + * DESCRIPTION + * + * This file defines compiler-specific macros. + * + ***************************************************************************/ +#ifndef RPMSG_COMPILER_H_ +#define RPMSG_COMPILER_H_ + +/* IAR ARM build tools */ +#if defined(__ICCARM__) + +#include + +#define MEM_BARRIER() __DSB() + +#ifndef RL_PACKED_BEGIN +#define RL_PACKED_BEGIN __packed +#endif + +#ifndef RL_PACKED_END +#define RL_PACKED_END +#endif + +/* ARM GCC */ +#elif defined(__CC_ARM) || (defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) + +#if (defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) +#include +#endif + +#define MEM_BARRIER() __schedule_barrier() + +#ifndef RL_PACKED_BEGIN +#define RL_PACKED_BEGIN _Pragma("pack(1U)") +#endif + +#ifndef RL_PACKED_END +#define RL_PACKED_END _Pragma("pack()") +#endif + +/* XCC HiFi4 */ +#elif defined(__XCC__) + +/* + * The XCC HiFi4 compiler is compatible with GNU compiler, with restrictions. + * For ARM __schedule_barrier, there's no identical intrinsic in HiFi4. + * A complete synchronization barrier would require initialize and wait ops. + * Here use NOP instead, similar to ARM __nop. + */ +#define MEM_BARRIER() __asm__ __volatile__("nop" : : : "memory") + +#ifndef RL_PACKED_BEGIN +#define RL_PACKED_BEGIN +#endif + +#ifndef RL_PACKED_END +#define RL_PACKED_END __attribute__((__packed__)) +#endif + +/* GNUC */ +#elif defined(__GNUC__) + +//#define MEM_BARRIER() __asm__ volatile("dsb" : : : "memory") +#define MEM_BARRIER() //__asm__ volatile("dsb" : : : "memory") + +#ifndef RL_PACKED_BEGIN +#define RL_PACKED_BEGIN +#endif + +#ifndef RL_PACKED_END +#define RL_PACKED_END __attribute__((__packed__)) +#endif + +#else +/* There is no default definition here to avoid wrong structures packing in case of not supported compiler */ +#error Please implement the structure packing macros for your compiler here! +#endif + +#endif /* RPMSG_COMPILER_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/rpmsg_default_config.h b/Middlewares/Third_Party/rpmsg/include/rpmsg_default_config.h new file mode 100755 index 00000000..291a7100 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/rpmsg_default_config.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RPMSG_DEFAULT_CONFIG_H_ +#define RPMSG_DEFAULT_CONFIG_H_ + +#define RL_USE_CUSTOM_CONFIG (1) + +#if RL_USE_CUSTOM_CONFIG +#include "rpmsg_config.h" +#endif + +/*! + * @addtogroup config + * @{ + * @file + */ + +//! @name Configuration options +//@{ + +//! @def RL_MS_PER_INTERVAL +//! +//! Delay in milliseconds used in non-blocking API functions for polling. +//! The default value is 1. +#ifndef RL_MS_PER_INTERVAL +#define RL_MS_PER_INTERVAL (1) +#endif + +//! @def RL_ALLOW_CUSTOM_SHMEM_CONFIG +//! +//! This option allows to define custom shared memory configuration and replacing +//! the shared memory related global settings from rpmsg_config.h This is useful +//! when multiple instances are running in parallel but different shared memory +//! arrangement (vring size & alignment, buffers size & count) is required. Note, +//! that once enabled the platform_get_custom_shmem_config() function needs +//! to be implemented in platform layer. The default value is 0 (all RPMsg_Lite +//! instances use the same shared memory arrangement as defined by common config macros). +#ifndef RL_ALLOW_CUSTOM_SHMEM_CONFIG +#define RL_ALLOW_CUSTOM_SHMEM_CONFIG (0) +#endif + +#if !(defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)) +//! @def RL_BUFFER_PAYLOAD_SIZE +//! +//! Size of the buffer payload, it must be equal to (240, 496, 1008, ...) +//! [2^n - 16]. Ensure the same value is defined on both sides of rpmsg +//! communication. The default value is 496U. +#ifndef RL_BUFFER_PAYLOAD_SIZE +#define RL_BUFFER_PAYLOAD_SIZE (496U) +#endif + +//! @def RL_BUFFER_COUNT +//! +//! Number of the buffers, it must be power of two (2, 4, ...). +//! The default value is 2U. +//! Note this value defines the buffer count for one direction of the rpmsg +//! communication only, i.e. if the default value of 2 is used +//! in rpmsg_config.h files for the master and the remote side, 4 buffers +//! in total are created in the shared memory. +#ifndef RL_BUFFER_COUNT +#define RL_BUFFER_COUNT (2U) +#endif + +#else +//! Define the buffer payload and count per different link IDs (rpmsg_lite instance) when RL_ALLOW_CUSTOM_SHMEM_CONFIG +//! is set. +//! Refer to the rpmsg_plaform.h for the used link IDs. +#ifndef RL_BUFFER_PAYLOAD_SIZE +#define RL_BUFFER_PAYLOAD_SIZE(link_id) (496U) +#endif + +#ifndef RL_BUFFER_COUNT +#define RL_BUFFER_COUNT(link_id) (((link_id) == 0U) ? 256U : 2U) +#endif +#endif /* !(defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1))*/ + +//! @def RL_API_HAS_ZEROCOPY +//! +//! Zero-copy API functions enabled/disabled. +//! The default value is 1 (enabled). +#ifndef RL_API_HAS_ZEROCOPY +#define RL_API_HAS_ZEROCOPY (1) +#endif + +//! @def RL_USE_STATIC_API +//! +//! Static API functions (no dynamic allocation) enabled/disabled. +//! The default value is 0 (static API disabled). +#ifndef RL_USE_STATIC_API +#define RL_USE_STATIC_API (0) +#endif + +//! @def RL_CLEAR_USED_BUFFERS +//! +//! Clearing used buffers before returning back to the pool of free buffers +//! enabled/disabled. +//! The default value is 0 (disabled). +#ifndef RL_CLEAR_USED_BUFFERS +#define RL_CLEAR_USED_BUFFERS (0) +#endif + +//! @def RL_USE_MCMGR_IPC_ISR_HANDLER +//! +//! When enabled IPC interrupts are managed by the Multicore Manager (IPC +//! interrupts router), when disabled RPMsg-Lite manages IPC interrupts +//! by itself. +//! The default value is 0 (no MCMGR IPC ISR handler used). +#ifndef RL_USE_MCMGR_IPC_ISR_HANDLER +#define RL_USE_MCMGR_IPC_ISR_HANDLER (0) +#endif + +//! @def RL_USE_ENVIRONMENT_CONTEXT +//! +//! When enabled the environment layer uses its own context. +//! Added for QNX port mainly, but can be used if required. +//! The default value is 0 (no context, saves some RAM). +#ifndef RL_USE_ENVIRONMENT_CONTEXT +#define RL_USE_ENVIRONMENT_CONTEXT (0) +#endif + +//! @def RL_DEBUG_CHECK_BUFFERS +//! +//! Do not use in RPMsg-Lite to Linux configuration +#ifndef RL_DEBUG_CHECK_BUFFERS +#define RL_DEBUG_CHECK_BUFFERS (0) +#endif + +//! @def RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION +//! +//! When enabled the opposite side is notified each time received buffers +//! are consumed and put into the queue of available buffers. +//! Enable this option in RPMsg-Lite to Linux configuration to allow unblocking +//! of the Linux blocking send. +//! The default value is 0 (RPMsg-Lite to RPMsg-Lite communication). +#ifndef RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION +#define RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION (0) +#endif + +//! @def RL_HANG +//! +//! Default implementation of hang assert function +static inline void RL_HANG(void) +{ + for (;;) + { + } +} + +//! @def RL_ASSERT +//! +//! Assert implementation. +#ifndef RL_ASSERT +#define RL_ASSERT_BOOL(b) \ + do \ + { \ + if (!(b)) \ + { \ + RL_HANG(); \ + } \ + } while (0 == 1); +#define RL_ASSERT(x) RL_ASSERT_BOOL((int32_t)(x) != 0) + +#endif +//@} + +#endif /* RPMSG_DEFAULT_CONFIG_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/rpmsg_env.h b/Middlewares/Third_Party/rpmsg/include/rpmsg_env.h new file mode 100755 index 00000000..5c10fa67 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/rpmsg_env.h @@ -0,0 +1,627 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2022 NXP + * Copyright 2021 ACRIOS Systems s.r.o. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env.h + * + * COMPONENT + * + * OpenAMP stack. + * + * DESCRIPTION + * + * This file defines abstraction layer for OpenAMP stack. The implementor + * must provide definition of all the functions. + * + * DATA STRUCTURES + * + * none + * + * FUNCTIONS + * + * env_allocate_memory + * env_free_memory + * env_memset + * env_memcpy + * env_strncpy + * env_print + * env_map_vatopa + * env_map_patova + * env_mb + * env_rmb + * env_wmb + * env_create_mutex + * env_delete_mutex + * env_lock_mutex + * env_unlock_mutex + * env_sleep_msec + * env_disable_interrupt + * env_enable_interrupt + * env_create_queue + * env_delete_queue + * env_put_queue + * env_get_queue + * env_wait_for_link_up + * env_tx_callback + * + **************************************************************************/ +#ifndef RPMSG_ENV_H_ +#define RPMSG_ENV_H_ + +#include +#include "rpmsg_default_config.h" +#include "rpmsg_env_specific.h" +#include "rpmsg_platform.h" + +/*! + * env_init + * + * Initializes OS/BM environment. + * + * @param env_context Pointer to preallocated environment context data + * @param env_init_data Initialization data for the environment layer + * + * @returns - execution status + */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +int32_t env_init(void **env_context, void *env_init_data); +#else +int32_t env_init(void); +#endif + +/*! + * env_deinit + * + * Uninitializes OS/BM environment. + * + * @param env_context Pointer to environment context data + * + * @returns - execution status + */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +int32_t env_deinit(void *env_context); +#else +int32_t env_deinit(void); +#endif + +/*! + * ------------------------------------------------------------------------- + * + * Dynamic memory management functions. The parameters + * are similar to standard c functions. + * + *------------------------------------------------------------------------- + **/ + +/*! + * env_allocate_memory + * + * Allocates memory with the given size. + * + * @param size - size of memory to allocate + * + * @return - pointer to allocated memory + */ +void *env_allocate_memory(uint32_t size); + +/*! + * env_free_memory + * + * Frees memory pointed by the given parameter. + * + * @param ptr - pointer to memory to free + */ +void env_free_memory(void *ptr); + +/*! + * ------------------------------------------------------------------------- + * + * RTL Functions + * + *------------------------------------------------------------------------- + */ + +void env_memset(void *ptr, int32_t value, uint32_t size); +void env_memcpy(void *dst, void const *src, uint32_t len); +int32_t env_strcmp(const char *dst, const char *src); +void env_strncpy(char *dest, const char *src, uint32_t len); +int32_t env_strncmp(char *dest, const char *src, uint32_t len); +#ifdef MCUXPRESSO_SDK +/* MCUXpresso_SDK's PRINTF used in SDK examples */ +#include "fsl_debug_console.h" +#if defined SDK_DEBUGCONSOLE && (SDK_DEBUGCONSOLE != DEBUGCONSOLE_DISABLE) +#define env_print(...) (void)PRINTF(__VA_ARGS__) +#else +#define env_print(...) +#endif +#else +/* When RPMsg_Lite being used outside of MCUXpresso_SDK use your own env_print + implemenetation to avoid conflict with Misra 21.6 rule */ +//#include +#define env_print(...) (void)printf(__VA_ARGS__) +#endif /* MCUXPRESSO_SDK */ + +/*! + *----------------------------------------------------------------------------- + * + * Functions to convert physical address to virtual address and vice versa. + * + *----------------------------------------------------------------------------- + */ + +/*! + * env_map_vatopa + * + * Converts logical address to physical address + * + * @param env Pointer to environment context data + * @param address Pointer to logical address + * + * @return - physical address + */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +uint32_t env_map_vatopa(void *env, void *address); +#else +uint32_t env_map_vatopa(void *address); +#endif + +/*! + * env_map_patova + * + * Converts physical address to logical address + * + * @param env_context Pointer to environment context data + * @param address Pointer to physical address + * + * @return - logical address + * + */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +void *env_map_patova(void *env, uint32_t address); +#else +void *env_map_patova(uint32_t address); +#endif + +/*! + *----------------------------------------------------------------------------- + * + * Abstractions for memory barrier instructions. + * + *----------------------------------------------------------------------------- + */ + +/*! + * env_mb + * + * Inserts memory barrier. + */ + +void env_mb(void); + +/*! + * env_rmb + * + * Inserts read memory barrier + */ + +void env_rmb(void); + +/*! + * env_wmb + * + * Inserts write memory barrier + */ + +void env_wmb(void); + +/*! + *----------------------------------------------------------------------------- + * + * Abstractions for OS lock primitives. + * + *----------------------------------------------------------------------------- + */ + +/*! + * env_create_mutex + * + * Creates a mutex with given initial count. + * + * @param lock - pointer to created mutex + * @param count - initial count 0 or 1 + * @param context - context for mutex + * + * @return - status of function execution + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_mutex(void **lock, int32_t count, void *context); +#else +int32_t env_create_mutex(void **lock, int32_t count); +#endif + +/*! + * env_delete_mutex + * + * Deletes the given lock. + * + * @param lock - mutex to delete + */ + +void env_delete_mutex(void *lock); + +/*! + * env_lock_mutex + * + * Tries to acquire the lock, if lock is not available then call to + * this function will suspend. + * + * @param lock - mutex to lock + * + */ + +void env_lock_mutex(void *lock); + +/*! + * env_unlock_mutex + * + * Releases the given lock. + * + * @param lock - mutex to unlock + */ + +void env_unlock_mutex(void *lock); + +/*! + * env_create_sync_lock + * + * Creates a synchronization lock primitive. It is used + * when signal has to be sent from the interrupt context to main + * thread context. + * + * @param lock - pointer to created sync lock object + * @param state - initial state , lock or unlocked + * @param context - context for lock + * + * @returns - status of function execution + */ +#define LOCKED 0 +#define UNLOCKED 1 + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_sync_lock(void **lock, int32_t state, void *context); +#else +int32_t env_create_sync_lock(void **lock, int32_t state); +#endif + +/*! + * env_create_sync_lock + * + * Deletes given sync lock object. + * + * @param lock - sync lock to delete. + * + */ + +void env_delete_sync_lock(void *lock); + +/*! + * env_acquire_sync_lock + * + * Tries to acquire the sync lock. + * + * @param lock - sync lock to acquire. + */ +void env_acquire_sync_lock(void *lock); + +/*! + * env_release_sync_lock + * + * Releases synchronization lock. + * + * @param lock - sync lock to release. + */ +void env_release_sync_lock(void *lock); + +/*! + * env_sleep_msec + * + * Suspends the calling thread for given time in msecs. + * + * @param num_msec - delay in msecs + */ +void env_sleep_msec(uint32_t num_msec); + +/*! + * env_register_isr + * + * Registers interrupt handler data for the given interrupt vector. + * + * @param env Pointer to environment context data + * @param vector_id Virtual interrupt vector number + * @param data Interrupt handler data (virtqueue) + */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +void env_register_isr(void *env, uint32_t vector_id, void *data); +#else +void env_register_isr(uint32_t vector_id, void *data); +#endif + +/*! + * env_unregister_isr + * + * Unregisters interrupt handler data for the given interrupt vector. + * + * @param env Pointer to environment context data + * @param vector_id Virtual interrupt vector number + */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +void env_unregister_isr(void *env, uint32_t vector_id); +#else +void env_unregister_isr(uint32_t vector_id); +#endif + +/*! + * env_enable_interrupt + * + * Enables the given interrupt + * + * @param env Pointer to environment context data + * @param vector_id Virtual interrupt vector number + */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +void env_enable_interrupt(void *env, uint32_t vector_id); +#else +void env_enable_interrupt(uint32_t vector_id); +#endif + +/*! + * env_disable_interrupt + * + * Disables the given interrupt. + * + * @param env Pointer to environment context data + * @param vector_id Virtual interrupt vector number + */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +void env_disable_interrupt(void *env, uint32_t vector_id); +#else +void env_disable_interrupt(uint32_t vector_id); +#endif + +/*! + * env_map_memory + * + * Enables memory mapping for given memory region. + * + * @param pa - physical address of memory + * @param va - logical address of memory + * @param size - memory size + * param flags - flags for cache/uncached and access type + * + * Currently only first byte of flag parameter is used and bits mapping is defined as follow; + * + * Cache bits + * 0x0000_0001 = No cache + * 0x0000_0010 = Write back + * 0x0000_0100 = Write through + * 0x0000_x000 = Not used + * + * Memory types + * + * 0x0001_xxxx = Memory Mapped + * 0x0010_xxxx = IO Mapped + * 0x0100_xxxx = Shared + * 0x1000_xxxx = TLB + */ + +/* Macros for caching scheme used by the shared memory */ +#define UNCACHED (1 << 0) +#define WB_CACHE (1 << 1) +#define WT_CACHE (1 << 2) + +/* Memory Types */ +#define MEM_MAPPED (1 << 4) +#define IO_MAPPED (1 << 5) +#define SHARED_MEM (1 << 6) +#define TLB_MEM (1 << 7) + +void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags); + +/*! + * env_get_timestamp + * + * Returns a 64 bit time stamp. + * + * + */ +uint64_t env_get_timestamp(void); + +/*! + * env_disable_cache + * + * Disables system caches. + * + */ + +void env_disable_cache(void); + +typedef void LOCK; + +/*! + * env_create_queue + * + * Creates a message queue. + * + * @param queue Pointer to created queue + * @param length Maximum number of elements in the queue + * @param item_size Queue element size in bytes + * @param queue_static_storage Pointer to queue static storage buffer + * @param queue_static_context Pointer to queue static context + * + * @return - status of function execution + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_queue(void **queue, + int32_t length, + int32_t element_size, + uint8_t *queue_static_storage, + rpmsg_static_queue_ctxt *queue_static_context); +#else +int32_t env_create_queue(void **queue, int32_t length, int32_t element_size); +#endif + +/*! + * env_delete_queue + * + * Deletes the message queue. + * + * @param queue Queue to delete + */ + +void env_delete_queue(void *queue); + +/*! + * env_put_queue + * + * Put an element in a queue. + * + * @param queue Queue to put element in + * @param msg Pointer to the message to be put into the queue + * @param timeout_ms Timeout in ms + * + * @return - status of function execution + */ + +int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms); + +/*! + * env_get_queue + * + * Get an element out of a queue. + * + * @param queue Queue to get element from + * @param msg Pointer to a memory to save the message + * @param timeout_ms Timeout in ms + * + * @return - status of function execution + */ + +int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms); + +/*! + * env_get_current_queue_size + * + * Get current queue size. + * + * @param queue Queue pointer + * + * @return - Number of queued items in the queue + */ + +int32_t env_get_current_queue_size(void *queue); + +/*! + * env_isr + * + * Invoke RPMSG/IRQ callback + * + * @param env Pointer to environment context data + * @param vector RPMSG IRQ vector ID. + */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +void env_isr(void *env, uint32_t vector); +#else +void env_isr(uint32_t vector); +#endif + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +/*! + * env_get_platform_context + * + * Get the platform layer context from the environment platform context + * + * @param env Pointer to environment context data + * + * @return Pointer to platform context data + */ +void *env_get_platform_context(void *env_context); + +/*! + * env_init_interrupt + * + * Initialize the ISR data for given virtqueue interrupt + * + * @param env Pointer to environment context data + * @param vq_id Virtqueue ID + * @param isr_data Pointer to initial ISR data + * + * @return Execution status, 0 on success + */ +int32_t env_init_interrupt(void *env, int32_t vq_id, void *isr_data); + +/*! + * env_deinit_interrupt + * + * Deinitialize the ISR data for given virtqueue interrupt + * + * @param env Pointer to environment context data + * @param vq_id Virtqueue ID + * + * @return Execution status, 0 on success + */ +int32_t env_deinit_interrupt(void *env, int32_t vq_id); +#endif + +/*! + * env_wait_for_link_up + * + * Env. specific implementation of rpmsg_lite_wait_for_link_up function with the usage + * of RTOS sync. primitives to avoid busy loop. Returns once the link is up. + * + * @param link_state Pointer to the link_state parameter of the rpmsg_lite_instance structure + * @param link_id Link ID used to define the rpmsg-lite instance, see rpmsg_platform.h + */ +void env_wait_for_link_up(volatile uint32_t *link_state, uint32_t link_id); + +/*! + * env_tx_callback + * + * Called from rpmsg_lite_tx_callback() to allow unblocking of env_wait_for_link_up() + * + * @param link_id Link ID used to define the rpmsg-lite instance, see rpmsg_platform.h + */ +void env_tx_callback(uint32_t link_id); + +#endif /* RPMSG_ENV_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/rpmsg_lite.h b/Middlewares/Third_Party/rpmsg/include/rpmsg_lite.h new file mode 100755 index 00000000..c508e470 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/rpmsg_lite.h @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2022 NXP + * Copyright 2021 ACRIOS Systems s.r.o. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RPMSG_LITE_H_ +#define RPMSG_LITE_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "rpmsg_compiler.h" +#include "virtqueue.h" +#include "rpmsg_env.h" +#include "llist.h" +#include "rpmsg_default_config.h" + +//! @addtogroup rpmsg_lite +//! @{ + +/******************************************************************************* + * Definitions + ******************************************************************************/ + +#define RL_VERSION "4.0.0" /*!< Current RPMsg Lite version */ + +/* Shared memory "allocator" parameters */ +#define RL_WORD_SIZE (sizeof(uint32_t)) +#define RL_WORD_ALIGN_UP(a) \ + (((((uint32_t)(a)) & (RL_WORD_SIZE - 1U)) != 0U) ? ((((uint32_t)(a)) & (~(RL_WORD_SIZE - 1U))) + 4U) : \ + ((uint32_t)(a))) +#define RL_WORD_ALIGN_DOWN(a) \ + (((((uint32_t)(a)) & (RL_WORD_SIZE - 1U)) != 0U) ? (((uint32_t)(a)) & (~(RL_WORD_SIZE - 1U))) : ((uint32_t)(a))) + +/* Definitions for device types , null pointer, etc.*/ +#define RL_SUCCESS (0) +#define RL_NULL ((void *)0) +#define RL_REMOTE (0) +#define RL_MASTER (1) +#define RL_TRUE (1UL) +#define RL_FALSE (0UL) +#define RL_ADDR_ANY (0xFFFFFFFFU) +#define RL_RELEASE (0) +#define RL_HOLD (1) +#define RL_DONT_BLOCK (0) +#define RL_BLOCK (0xFFFFFFFFU) + +/* Error macros. */ +#define RL_ERRORS_BASE (-5000) +#define RL_ERR_NO_MEM (RL_ERRORS_BASE - 1) +#define RL_ERR_BUFF_SIZE (RL_ERRORS_BASE - 2) +#define RL_ERR_PARAM (RL_ERRORS_BASE - 3) +#define RL_ERR_DEV_ID (RL_ERRORS_BASE - 4) +#define RL_ERR_MAX_VQ (RL_ERRORS_BASE - 5) +#define RL_ERR_NO_BUFF (RL_ERRORS_BASE - 6) +#define RL_NOT_READY (RL_ERRORS_BASE - 7) +#define RL_ALREADY_DONE (RL_ERRORS_BASE - 8) + +/* Init flags */ +#define RL_NO_FLAGS (0) + +/*! \typedef rl_ept_rx_cb_t + \brief Receive callback function type. +*/ +typedef int32_t (*rl_ept_rx_cb_t)(void *payload, uint32_t payload_len, uint32_t src, void *priv); + +/*! + * RPMsg Lite Endpoint structure + */ +struct rpmsg_lite_endpoint +{ + uint32_t addr; /*!< endpoint address */ + rl_ept_rx_cb_t rx_cb; /*!< ISR callback function */ + void *rx_cb_data; /*!< ISR callback data */ + void *rfu; /*!< reserved for future usage */ + /* 16 bytes aligned on 32bit architecture */ +}; + +/*! + * RPMsg Lite Endpoint static context + */ +struct rpmsg_lite_ept_static_context +{ + struct rpmsg_lite_endpoint ept; /*!< memory for endpoint structure */ + struct llist node; /*!< memory for linked list node structure */ +}; + +/*! + * Structure describing the local instance + * of RPMSG lite communication stack and + * holds all runtime variables needed internally + * by the stack. + */ +struct rpmsg_lite_instance +{ + struct virtqueue *rvq; /*!< receive virtqueue */ + struct virtqueue *tvq; /*!< transmit virtqueue */ + struct llist *rl_endpoints; /*!< linked list of endpoints */ + LOCK *lock; /*!< local RPMsg Lite mutex lock */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + LOCK_STATIC_CONTEXT lock_static_ctxt; /*!< Static context for lock object creation */ +#endif + uint32_t link_state; /*!< state of the link, up/down*/ + char *sh_mem_base; /*!< base address of the shared memory */ + uint32_t sh_mem_remaining; /*!< amount of remaining unused buffers in shared memory */ + uint32_t sh_mem_total; /*!< total amount of buffers in shared memory */ + struct virtqueue_ops const *vq_ops; /*!< ops functions table pointer */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + void *env; /*!< pointer to the environment layer context */ +#endif + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + struct vq_static_context vq_ctxt[2]; +#endif + uint32_t link_id; /*!< linkID of this rpmsg_lite instance */ +}; + +/******************************************************************************* + * API + ******************************************************************************/ + +/* Exported API functions */ + +/*! + * @brief Initializes the RPMsg-Lite communication stack. + * Must be called prior to any other RPMSG lite API. + * To be called by the master side. + * + * @param shmem_addr Shared memory base used for this instance of RPMsg-Lite + * @param shmem_length Length of memory area given by previous parameter + * @param link_id Link ID used to define the rpmsg-lite instance, see rpmsg_platform.h + * @param init_flags Initialization flags + * @param env_cfg Initialization data for the environement RPMsg-Lite layer, used when + * the environment layer uses its own context (RL_USE_ENVIRONMENT_CONTEXT) + * @param static_context RPMsg-Lite preallocated context pointer, used in case of static api (RL_USE_STATIC_API) + * + * @return New RPMsg-Lite instance pointer or RL_NULL. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr, + size_t shmem_length, + uint32_t link_id, + uint32_t init_flags, + struct rpmsg_lite_instance *static_context); +#elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +struct rpmsg_lite_instance *rpmsg_lite_master_init( + void *shmem_addr, size_t shmem_length, uint32_t link_id, uint32_t init_flags, void *env_cfg); +#else +struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr, + size_t shmem_length, + uint32_t link_id, + uint32_t init_flags); +#endif + +/** + * @brief Initializes the RPMsg-Lite communication stack. + * Must be called prior to any other RPMsg-Lite API. + * To be called by the remote side. + * + * @param shmem_addr Shared memory base used for this instance of RPMsg-Lite + * @param link_id Link ID used to define the rpmsg-lite instance, see rpmsg_platform.h + * @param init_flags Initialization flags + * @param env_cfg Initialization data for the environement RPMsg-Lite layer, used when + * the environment layer uses its own context (RL_USE_ENVIRONMENT_CONTEXT) + * @param static_context RPMsg-Lite preallocated context pointer, used in case of static api (RL_USE_STATIC_API) + * + * @return New RPMsg-Lite instance pointer or RL_NULL. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr, + uint32_t link_id, + uint32_t init_flags, + struct rpmsg_lite_instance *static_context); +#elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr, + uint32_t link_id, + uint32_t init_flags, + void *env_cfg); +#else +struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr, uint32_t link_id, uint32_t init_flags); +#endif + +/*! + * + * @brief Deinitialized the RPMsg-Lite communication stack + * This function always succeeds. + * rpmsg_lite_init() can be called again after this + * function has been called. + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * + * @return Status of function execution, RL_SUCCESS on success. + */ +int32_t rpmsg_lite_deinit(struct rpmsg_lite_instance *rpmsg_lite_dev); + +/*! + * @brief Create a new rpmsg endpoint, which can be used + * for communication. + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param addr Desired address, RL_ADDR_ANY for automatic selection + * @param rx_cb Callback function called on receive + * @param rx_cb_data Callback data pointer, passed to rx_cb + * @param ept_context Endpoint preallocated context pointer, used in case of static api (RL_USE_STATIC_API) + * + * @return RL_NULL on error, new endpoint pointer on success. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev, + uint32_t addr, + rl_ept_rx_cb_t rx_cb, + void *rx_cb_data, + struct rpmsg_lite_ept_static_context *ept_context); +#else +struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev, + uint32_t addr, + rl_ept_rx_cb_t rx_cb, + void *rx_cb_data); +#endif + +/*! + * @brief This function deletes rpmsg endpoint and performs cleanup. + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param rl_ept Pointer to endpoint to destroy + * + */ +int32_t rpmsg_lite_destroy_ept(struct rpmsg_lite_instance *rpmsg_lite_dev, struct rpmsg_lite_endpoint *rl_ept); + +/*! + * + * @brief Sends a message contained in data field of length size + * to the remote endpoint with address dst. + * ept->addr is used as source address in the rpmsg header + * of the message being sent. + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param ept Sender endpoint + * @param dst Remote endpoint address + * @param data Payload buffer + * @param size Size of payload, in bytes + * @param timeout Timeout in ms, 0 if nonblocking + * + * @return Status of function execution, RL_SUCCESS on success. + * + */ +int32_t rpmsg_lite_send(struct rpmsg_lite_instance *rpmsg_lite_dev, + struct rpmsg_lite_endpoint *ept, + uint32_t dst, + char *data, + uint32_t size, + uint32_t timeout); + +/*! + * @brief Function to get the link state + * + * @param rpmsg_lite_dev RPMsg-Lite instance pointer + * + * @return RL_TRUE when link up, RL_FALSE when down. + * + */ +uint32_t rpmsg_lite_is_link_up(struct rpmsg_lite_instance *rpmsg_lite_dev); + +/*! + * @brief Function to wait until the link is up. Returns + * once the link_state is set. + * + * @param rpmsg_lite_dev RPMsg-Lite instance pointer + */ +void rpmsg_lite_wait_for_link_up(struct rpmsg_lite_instance *rpmsg_lite_dev); + +#if defined(RL_API_HAS_ZEROCOPY) && (RL_API_HAS_ZEROCOPY == 1) + +/*! + * @brief Releases the rx buffer for future reuse in vring. + * This API can be called at process context when the + * message in rx buffer is processed. + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param rxbuf Rx buffer with message payload + * + * @return Status of function execution, RL_SUCCESS on success. + */ +int32_t rpmsg_lite_release_rx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, void *rxbuf); + +/*! + * @brief Allocates the tx buffer for message payload. + * + * This API can only be called at process context to get the tx buffer in vring. By this way, the + * application can directly put its message into the vring tx buffer without copy from an application buffer. + * It is the application responsibility to correctly fill the allocated tx buffer by data and passing correct + * parameters to the rpmsg_lite_send_nocopy() function to perform data no-copy-send mechanism. + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param[in] size Pointer to store maximum payload size available + * @param[in] timeout Integer, wait upto timeout ms or not for buffer to become available + * + * @return The tx buffer address on success and RL_NULL on failure. + * + * @see rpmsg_lite_send_nocopy + */ +void *rpmsg_lite_alloc_tx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t *size, uint32_t timeout); + +/*! + * @brief Sends a message in tx buffer allocated by rpmsg_lite_alloc_tx_buffer() + * + * This function sends txbuf of length len to the remote dst address, + * and uses ept->addr as the source address. + * The application has to take the responsibility for: + * 1. tx buffer allocation (rpmsg_lite_alloc_tx_buffer()) + * 2. filling the data to be sent into the pre-allocated tx buffer + * 3. not exceeding the buffer size when filling the data + * 4. data cache coherency + * + * After the rpmsg_lite_send_nocopy() function is issued the tx buffer is no more owned + * by the sending task and must not be touched anymore unless the rpmsg_lite_send_nocopy() + * function fails and returns an error. + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param[in] ept Sender endpoint pointer + * @param[in] dst Destination address + * @param[in] data TX buffer with message filled + * @param[in] size Length of payload + * + * @return 0 on success and an appropriate error value on failure. + * + * @see rpmsg_lite_alloc_tx_buffer + */ +int32_t rpmsg_lite_send_nocopy(struct rpmsg_lite_instance *rpmsg_lite_dev, + struct rpmsg_lite_endpoint *ept, + uint32_t dst, + void *data, + uint32_t size); +#endif /* RL_API_HAS_ZEROCOPY */ + +//! @} + +#if defined(__cplusplus) +} +#endif + +#endif /* RPMSG_LITE_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/rpmsg_ns.h b/Middlewares/Third_Party/rpmsg/include/rpmsg_ns.h new file mode 100755 index 00000000..47bb6d57 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/rpmsg_ns.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RPMSG_NS_H_ +#define RPMSG_NS_H_ + +#include "rpmsg_lite.h" + +//! @addtogroup rpmsg_ns +//! @{ + +#define RL_NS_EPT_ADDR (0x35u) + +/* Up to 32 flags available */ +enum rpmsg_ns_flags +{ + RL_NS_CREATE = 0, + RL_NS_DESTROY = 1, +}; + +/*! \typedef rpmsg_ns_new_ept_cb + \brief New endpoint NS callback function type. +*/ +typedef void (*rpmsg_ns_new_ept_cb)(uint32_t new_ept, const char *new_ept_name, uint32_t flags, void *user_data); + +struct rpmsg_ns_callback_data +{ + rpmsg_ns_new_ept_cb cb; + void *user_data; +}; + +struct rpmsg_ns_context +{ + struct rpmsg_lite_endpoint *ept; + struct rpmsg_ns_callback_data *cb_ctxt; +}; + +typedef struct rpmsg_ns_context *rpmsg_ns_handle; + +struct rpmsg_ns_static_context_container +{ + struct rpmsg_lite_ept_static_context ept_ctxt; + struct rpmsg_ns_callback_data cb_ctxt; + struct rpmsg_ns_context ns_ctxt; +}; + +typedef struct rpmsg_ns_static_context_container rpmsg_ns_static_context; + +#if defined(__cplusplus) +extern "C" { +#endif + +/******************************************************************************* + * API + ******************************************************************************/ + +/* Exported API functions */ + +/*! + * @brief Registers application nameservice callback + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param app_cb Application nameservice callback + * @param user_data Application nameservice callback data + * + * @return RL_NULL on error, NameService handle on success. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +rpmsg_ns_handle rpmsg_ns_bind(struct rpmsg_lite_instance *rpmsg_lite_dev, + rpmsg_ns_new_ept_cb app_cb, + void *user_data, + rpmsg_ns_static_context *ns_ept_ctxt); +#else +rpmsg_ns_handle rpmsg_ns_bind(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_ns_new_ept_cb app_cb, void *user_data); +#endif /* RL_USE_STATIC_API */ + +/*! + * @brief Unregisters application nameservice callback and cleans up + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param handle NameService handle + * + * @return Status of function execution, RL_SUCCESS on success. + * + */ +int32_t rpmsg_ns_unbind(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_ns_handle handle); + +/*! + * @brief Sends name service announcement to remote device + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param new_ept New endpoint to announce + * @param ept_name Name for the announced endpoint + * @param flags Channel creation/deletion flags + * + * @return Status of function execution, RL_SUCCESS on success + * + */ +int32_t rpmsg_ns_announce(struct rpmsg_lite_instance *rpmsg_lite_dev, + struct rpmsg_lite_endpoint *new_ept, + const char *ept_name, + uint32_t flags); + +//! @} + +#if defined(__cplusplus) +} +#endif + +#endif /* RPMSG_NS_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/rpmsg_queue.h b/Middlewares/Third_Party/rpmsg/include/rpmsg_queue.h new file mode 100755 index 00000000..477b17e1 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/rpmsg_queue.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * Copyright 2021 ACRIOS Systems s.r.o. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RPMSG_QUEUE_H_ +#define RPMSG_QUEUE_H_ + +#include "rpmsg_lite.h" + +//! @addtogroup rpmsg_queue +//! @{ + +/*! \typedef rpmsg_queue_handle + \brief Rpmsg queue handle type. +*/ +typedef void *rpmsg_queue_handle; + +/* RL_API_HAS_ZEROCOPY has to be enabled for RPMsg Queue to work */ +#if defined(RL_API_HAS_ZEROCOPY) && (RL_API_HAS_ZEROCOPY == 1) + +/******************************************************************************* + * API + ******************************************************************************/ + +/* Exported API functions */ + +#if defined(__cplusplus) +extern "C" { +#endif + +/*! + * @brief + * This callback needs to be registered with an endpoint + * + * @param payload Pointer to the buffer containing received data + * @param payload_len Size of data received, in bytes + * @param src Pointer to address of the endpoint from which data is received + * @param priv Private data provided during endpoint creation + * + * @return RL_HOLD or RL_RELEASE to release or hold the buffer in payload + */ +int32_t rpmsg_queue_rx_cb(void *payload, uint32_t payload_len, uint32_t src, void *priv); + +/*! + * @brief + * Create a RPMsg queue which can be used + * for blocking reception. + * + * @param rpmsg_lite_dev RPMsg Lite instance + * @param queue_storage RPMsg Lite queue static storage pointer + * @param queue_ctxt RPMsg Lite queue static context holder + * + * @return RPMsg queue handle or RL_NULL + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +rpmsg_queue_handle rpmsg_queue_create(struct rpmsg_lite_instance *rpmsg_lite_dev, + uint8_t *queue_storage, + rpmsg_static_queue_ctxt *queue_ctxt); +#else +rpmsg_queue_handle rpmsg_queue_create(struct rpmsg_lite_instance *rpmsg_lite_dev); +#endif + +/*! + * @brief + * Destroy a queue and clean up. + * Do not destroy a queue which is registered with an active endpoint! + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param[in] q RPMsg queue handle to destroy + * + * @return Status of function execution + * + */ +int32_t rpmsg_queue_destroy(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_queue_handle q); + +/*! + * @brief + * blocking receive function - blocking version of the received function that can be called from an RTOS task. + * The data is copied from the receive buffer into the user supplied buffer. + * + * This is the "receive with copy" version of the RPMsg receive function. This version is simple + * to use but it requires copying data from shared memory into the user space buffer. + * The user has no obligation or burden to manage the shared memory buffers. + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param[in] q RPMsg queue handle to listen on + * @param[in] data Pointer to the user buffer the received data are copied to + * @param[out] len Pointer to an int variable that will contain the number of bytes actually copied into the + * buffer + * @param[in] maxlen Maximum number of bytes to copy (received buffer size) + * @param[out] src Pointer to address of the endpoint from which data is received + * @param[in] timeout Timeout, in milliseconds, to wait for a message. A value of 0 means don't wait (non-blocking + * call). + * A value of 0xffffffff means wait forever (blocking call). + * + * @return Status of function execution + * + * @see rpmsg_queue_recv_nocopy + */ +int32_t rpmsg_queue_recv(struct rpmsg_lite_instance *rpmsg_lite_dev, + rpmsg_queue_handle q, + uint32_t *src, + char *data, + uint32_t maxlen, + uint32_t *len, + uint32_t timeout); + +/*! + * @brief + * blocking receive function - blocking version of the received function that can be called from an RTOS task. + * The data is NOT copied into the user-app. buffer. + * + * This is the "zero-copy receive" version of the RPMsg receive function. No data is copied. + * Only the pointer to the data is returned. This version is fast, but it requires the user to manage + * buffer allocation. Specifically, the user must decide when a buffer is no longer in use and + * make the appropriate API call to free it, see rpmsg_queue_nocopy_free(). + * + * @param rpmsg_lite_dev RPMsg Lite instance + * @param[in] q RPMsg queue handle to listen on + * @param[out] data Pointer to the RPMsg buffer of the shared memory where the received data is stored + * @param[out] len Pointer to an int variable that that will contain the number of valid bytes in the RPMsg + * buffer + * @param[out] src Pointer to address of the endpoint from which data is received + * @param[in] timeout Timeout, in milliseconds, to wait for a message. A value of 0 means don't wait (non-blocking + * call). + * A value of 0xffffffff means wait forever (blocking call). + * + * @return Status of function execution. + * + * @see rpmsg_queue_nocopy_free + * @see rpmsg_queue_recv + */ +int32_t rpmsg_queue_recv_nocopy(struct rpmsg_lite_instance *rpmsg_lite_dev, + rpmsg_queue_handle q, + uint32_t *src, + char **data, + uint32_t *len, + uint32_t timeout); + +/*! + * @brief This function frees a buffer previously returned by rpmsg_queue_recv_nocopy(). + * + * Once the zero-copy mechanism of receiving data is used, this function + * has to be called to free a buffer and to make it available for the next data + * transfer. + * + * @param rpmsg_lite_dev RPMsg-Lite instance + * @param[in] data Pointer to the RPMsg buffer of the shared memory that has to be freed + * + * @return Status of function execution. + * + * @see rpmsg_queue_recv_nocopy + */ +int32_t rpmsg_queue_nocopy_free(struct rpmsg_lite_instance *rpmsg_lite_dev, void *data); + +/*! + * @brief This function returns the number of pending messages in the queue. + * + * @param[in] q RPMsg queue handle + * + * @return Number of pending messages in the queue. + */ +int32_t rpmsg_queue_get_current_size(rpmsg_queue_handle q); + +//! @} + +#if defined(__cplusplus) +} +#endif + +#endif /* RL_API_HAS_ZEROCOPY */ + +#endif /* RPMSG_QUEUE_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/include/virtio_ring.h b/Middlewares/Third_Party/rpmsg/include/virtio_ring.h new file mode 100755 index 00000000..7ad7ec5a --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/virtio_ring.h @@ -0,0 +1,168 @@ +/*- + * Copyright Rusty Russell IBM Corporation 2007. + * Copyright 2019 NXP + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef VIRTIO_RING_H +#define VIRTIO_RING_H + +/* This marks a buffer as continuing via the next field. */ +#define VRING_DESC_F_NEXT 1U +/* This marks a buffer as write-only (otherwise read-only). */ +#define VRING_DESC_F_WRITE 2U +/* This means the buffer contains a list of buffer descriptors. */ +#define VRING_DESC_F_INDIRECT 4U + +/* The Host uses this in used->flags to advise the Guest: don't kick me + * when you add a buffer. It's unreliable, so it's simply an + * optimization. Guest will still kick if it's out of buffers. */ +#define VRING_USED_F_NO_NOTIFY 1U +/* The Guest uses this in avail->flags to advise the Host: don't + * interrupt me when you consume a buffer. It's unreliable, so it's + * simply an optimization. */ +#define VRING_AVAIL_F_NO_INTERRUPT 1U + +/* VirtIO ring descriptors: 16 bytes. + * These can chain together via "next". */ +struct vring_desc +{ + /* Address (guest-physical). */ + uint64_t addr; + /* Length. */ + uint32_t len; + /* The flags as indicated above. */ + uint16_t flags; + /* We chain unused descriptors via this, too. */ + uint16_t next; +}; + +struct vring_avail +{ + uint16_t flags; + uint16_t idx; + uint16_t ring[1]; +}; + +/* uint32_t is used here for ids for padding reasons. */ +struct vring_used_elem +{ + /* Index of start of used descriptor chain. */ + uint32_t id; + /* Total length of the descriptor chain which was written to. */ + uint32_t len; +}; + +struct vring_used +{ + uint16_t flags; + uint16_t idx; + struct vring_used_elem ring[1]; +}; + +struct vring +{ + uint32_t num; + + struct vring_desc *desc; + struct vring_avail *avail; + struct vring_used *used; +}; + +/* The standard layout for the ring is a continuous chunk of memory which + * looks like this. We assume num is a power of 2. + * + * struct vring { + * # The actual descriptors (16 bytes each) + * struct vring_desc desc[num]; + * + * # A ring of available descriptor heads with free-running index. + * __u16 avail_flags; + * __u16 avail_idx; + * __u16 available[num]; + * __u16 used_event_idx; + * + * # Padding to the next align boundary. + * char pad[]; + * + * # A ring of used descriptor heads with free-running index. + * __u16 used_flags; + * __u16 used_idx; + * struct vring_used_elem used[num]; + * __u16 avail_event_idx; + * }; + * + * NOTE: for VirtIO PCI, align is 4096. + */ + +/* + * We publish the used event index at the end of the available ring, and vice + * versa. They are at the end for backwards compatibility. + */ +#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) +#define vring_avail_event(vr) ((vr)->used->ring[(vr)->num].id) + +static inline int32_t vring_size(uint32_t num, uint32_t align) +{ + uint32_t size; + + size = num * sizeof(struct vring_desc); + size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)) + sizeof(uint16_t); + size = (size + align - 1UL) & ~(align - 1UL); + size += sizeof(struct vring_used) + (num * sizeof(struct vring_used_elem)) + sizeof(uint16_t); + return ((int32_t)size); +} + +static inline void vring_init(struct vring *vr, uint32_t num, uint8_t *p, uint32_t align) +{ + vr->num = num; + vr->desc = (struct vring_desc *)(void *)p; + vr->avail = (struct vring_avail *)(void *)(p + num * sizeof(struct vring_desc)); + vr->used = (struct vring_used *)(((uint32_t)&vr->avail->ring[num] + align - 1UL) & ~(align - 1UL)); +} + +/* + * The following is used with VIRTIO_RING_F_EVENT_IDX. + * + * Assuming a given event_idx value from the other size, if we have + * just incremented index from old to new_idx, should we trigger an + * event? + */ +static inline int32_t vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) +{ + if ((uint16_t)(new_idx - event_idx - 1U) < (uint16_t)(new_idx - old)) + { + return 1; + } + else + { + return 0; + } +} +#endif /* VIRTIO_RING_H */ diff --git a/Middlewares/Third_Party/rpmsg/include/virtqueue.h b/Middlewares/Third_Party/rpmsg/include/virtqueue.h new file mode 100755 index 00000000..1eb0639a --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/include/virtqueue.h @@ -0,0 +1,252 @@ +#ifndef VIRTQUEUE_H_ +#define VIRTQUEUE_H_ + +/*- + * Copyright (c) 2011, Bryan Venteicher + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include "rpmsg_default_config.h" +typedef uint8_t boolean; + +#include "virtio_ring.h" +#include "llist.h" + +/*Error Codes*/ +#define VQ_ERROR_BASE (-3000) +#define ERROR_VRING_FULL (VQ_ERROR_BASE - 1) +#define ERROR_INVLD_DESC_IDX (VQ_ERROR_BASE - 2) +#define ERROR_EMPTY_RING (VQ_ERROR_BASE - 3) +#define ERROR_NO_MEM (VQ_ERROR_BASE - 4) +#define ERROR_VRING_MAX_DESC (VQ_ERROR_BASE - 5) +#define ERROR_VRING_ALIGN (VQ_ERROR_BASE - 6) +#define ERROR_VRING_NO_BUFF (VQ_ERROR_BASE - 7) +#define ERROR_VQUEUE_INVLD_PARAM (VQ_ERROR_BASE - 8) + +#define VQUEUE_SUCCESS (0) +#define VQUEUE_DEBUG (false) + +/* This is temporary macro to replace C NULL support. + * At the moment all the RTL specific functions are present in env. + * */ +#define VQ_NULL ((void *)0) + +/* The maximum virtqueue size is 2^15. Use that value as the end of + * descriptor chain terminator since it will never be a valid index + * in the descriptor table. This is used to verify we are correctly + * handling vq_free_cnt. + */ +#define VQ_RING_DESC_CHAIN_END (32768) +#define VIRTQUEUE_FLAG_INDIRECT (0x0001U) +#define VIRTQUEUE_FLAG_EVENT_IDX (0x0002U) +#define VIRTQUEUE_MAX_NAME_SZ (32) /* mind the alignment */ + +/* Support for indirect buffer descriptors. */ +#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28) + +/* Support to suppress interrupt until specific index is reached. */ +#define VIRTIO_RING_F_EVENT_IDX (1 << 29) + +/* + * Hint on how long the next interrupt should be postponed. This is + * only used when the EVENT_IDX feature is negotiated. + */ +typedef enum +{ + VQ_POSTPONE_SHORT, + VQ_POSTPONE_LONG, + VQ_POSTPONE_EMPTIED /* Until all available desc are used. */ +} vq_postpone_t; + +/* local virtqueue representation, not in shared memory */ +struct virtqueue +{ + /* 32bit aligned { */ + char vq_name[VIRTQUEUE_MAX_NAME_SZ]; + uint32_t vq_flags; + int32_t vq_alignment; + int32_t vq_ring_size; + void *vq_ring_mem; + void (*callback_fc)(struct virtqueue *vq); + void (*notify_fc)(struct virtqueue *vq); + int32_t vq_max_indirect_size; + int32_t vq_indirect_mem_size; + struct vring vq_ring; + /* } 32bit aligned */ + + /* 16bit aligned { */ + uint16_t vq_queue_index; + uint16_t vq_nentries; + uint16_t vq_free_cnt; + uint16_t vq_queued_cnt; + + /* + * Head of the free chain in the descriptor table. If + * there are no free descriptors, this will be set to + * VQ_RING_DESC_CHAIN_END. + */ + uint16_t vq_desc_head_idx; + + /* + * Last consumed descriptor in the used table, + * trails vq_ring.used->idx. + */ + uint16_t vq_used_cons_idx; + + /* + * Last consumed descriptor in the available table - + * used by the consumer side. + */ + uint16_t vq_available_idx; + /* } 16bit aligned */ + + boolean avail_read; /* 8bit wide */ + boolean avail_write; /* 8bit wide */ + boolean used_read; /* 8bit wide */ + boolean used_write; /* 8bit wide */ + + uint16_t padd; /* aligned to 32bits after this: */ + + void *priv; /* private pointer, upper layer instance pointer */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + void *env; /* private pointer to environment layer internal context */ +#endif +}; + +/* struct to hold vring specific information */ +struct vring_alloc_info +{ + void *phy_addr; + uint32_t align; + uint16_t num_descs; + uint16_t pad; +}; + +struct vq_static_context +{ + struct virtqueue vq; +}; + +typedef void vq_callback(struct virtqueue *vq); +typedef void vq_notify(struct virtqueue *vq); + +#if (VQUEUE_DEBUG == true) +#define VQASSERT_BOOL(_vq, _exp, _msg) \ + do \ + { \ + if (!(_exp)) \ + { \ + env_print("%s: %s - "(_msg), __func__, (_vq)->vq_name); \ + while (1) \ + { \ + }; \ + } \ + } while (0) +#define VQASSERT(_vq, _exp, _msg) VQASSERT_BOOL(_vq, (_exp) != 0, _msg) + +#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, "invalid ring index") + +#define VQ_PARAM_CHK(condition, status_var, status_err) \ + if ((status_var == 0) && (condition)) \ + { \ + status_var = status_err; \ + } + +#define VQUEUE_BUSY(vq, dir) \ + if ((vq)->dir == false) \ + { \ + (vq)->dir = true; \ + } \ + else \ + { \ + VQASSERT(vq, (vq)->dir == false, "VirtQueue already in use") \ + } + +#define VQUEUE_IDLE(vq, dir) ((vq)->dir = false) + +#else + +#define KASSERT(cond, str) +#define VQASSERT(_vq, _exp, _msg) +#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) +#define VQ_PARAM_CHK(condition, status_var, status_err) +#define VQUEUE_BUSY(vq, dir) +#define VQUEUE_IDLE(vq, dir) + +#endif + +int32_t virtqueue_create(uint16_t id, + const char *name, + struct vring_alloc_info *ring, + void (*callback_fc)(struct virtqueue *vq), + void (*notify_fc)(struct virtqueue *vq), + struct virtqueue **v_queue); + +int32_t virtqueue_create_static(uint16_t id, + const char *name, + struct vring_alloc_info *ring, + void (*callback_fc)(struct virtqueue *vq), + void (*notify_fc)(struct virtqueue *vq), + struct virtqueue **v_queue, + struct vq_static_context *vq_ctxt); + +int32_t virtqueue_add_buffer(struct virtqueue *vq, uint16_t head_idx); + +int32_t virtqueue_fill_used_buffers(struct virtqueue *vq, void *buffer, uint32_t len); + +int32_t virtqueue_fill_avail_buffers(struct virtqueue *vq, void *buffer, uint32_t len); + +void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx); + +void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx, uint32_t *len); + +int32_t virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx, uint32_t len); + +void virtqueue_disable_cb(struct virtqueue *vq); + +int32_t virtqueue_enable_cb(struct virtqueue *vq); + +void virtqueue_kick(struct virtqueue *vq); + +void virtqueue_free(struct virtqueue *vq); + +void virtqueue_free_static(struct virtqueue *vq); + +void virtqueue_dump(struct virtqueue *vq); + +void virtqueue_notification(struct virtqueue *vq); + +uint32_t virtqueue_get_desc_size(struct virtqueue *vq); + +uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx); + +void vq_ring_init(struct virtqueue *vq); + +#endif /* VIRTQUEUE_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_bm.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_bm.c new file mode 100755 index 00000000..c1ae69cc --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_bm.c @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2022 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_bm.c + * + * + * DESCRIPTION + * + * This file is Bare Metal Implementation of env layer for OpenAMP. + * + * + **************************************************************************/ + +#include "rpmsg_compiler.h" +#include "rpmsg_env.h" +#include "rpmsg_platform.h" +#include "virtqueue.h" + +#include +#include + +static int32_t env_init_counter = 0; + +/* Max supported ISR counts */ +#define ISR_COUNT (12U) +/*! + * Structure to keep track of registered ISR's. + */ +struct isr_info +{ + void *data; +}; +static struct isr_info isr_table[ISR_COUNT]; + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +/*! + * env_wait_for_link_up + * + * Wait until the link_state parameter of the rpmsg_lite_instance is set. + * Busy loop implementation for BM. + * + */ +void env_wait_for_link_up(volatile uint32_t *link_state, uint32_t link_id) +{ + while (*link_state != 1U) + { + } +} + +/*! + * env_tx_callback + * + * Set event to notify task waiting in env_wait_for_link_up(). + * Empty implementation for BM. + * + */ +void env_tx_callback(uint32_t link_id) +{ +} + +/*! + * env_init + * + * Initializes OS/BM environment. + * + */ +int32_t env_init(void) +{ + // verify 'env_init_counter' + RL_ASSERT(env_init_counter >= 0); + if (env_init_counter < 0) + { + return -1; + } + env_init_counter++; + // multiple call of 'env_init' - return ok + if (1 < env_init_counter) + { + return 0; + } + // first call + (void)memset(isr_table, 0, sizeof(isr_table)); + return platform_init(); +} + +/*! + * env_deinit + * + * Uninitializes OS/BM environment. + * + * @returns Execution status + */ +int32_t env_deinit(void) +{ + // verify 'env_init_counter' + RL_ASSERT(env_init_counter > 0); + if (env_init_counter <= 0) + { + return -1; + } + // counter on zero - call platform deinit + env_init_counter--; + // multiple call of 'env_deinit' - return ok + if (0 < env_init_counter) + { + return 0; + } + // last call + return platform_deinit(); +} + +/*! + * env_allocate_memory - implementation + * + * @param size + */ +void *env_allocate_memory(uint32_t size) +{ + return (malloc(size)); +} + +/*! + * env_free_memory - implementation + * + * @param ptr + */ +void env_free_memory(void *ptr) +{ + if (ptr != ((void *)0)) + { + free(ptr); + } +} + +/*! + * + * env_memset - implementation + * + * @param ptr + * @param value + * @param size + */ +void env_memset(void *ptr, int32_t value, uint32_t size) +{ + (void)memset(ptr, value, size); +} + +/*! + * + * env_memcpy - implementation + * + * @param dst + * @param src + * @param len + */ +void env_memcpy(void *dst, void const *src, uint32_t len) +{ + (void)memcpy(dst, src, len); +} + +/*! + * + * env_strcmp - implementation + * + * @param dst + * @param src + */ + +int32_t env_strcmp(const char *dst, const char *src) +{ + return (strcmp(dst, src)); +} + +/*! + * + * env_strncpy - implementation + * + * @param dest + * @param src + * @param len + */ +void env_strncpy(char *dest, const char *src, uint32_t len) +{ + (void)strncpy(dest, src, len); +} + +/*! + * + * env_strncmp - implementation + * + * @param dest + * @param src + * @param len + */ +int32_t env_strncmp(char *dest, const char *src, uint32_t len) +{ + return (strncmp(dest, src, len)); +} + +/*! + * + * env_mb - implementation + * + */ +void env_mb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_rmb - implementation + */ +void env_rmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_wmb - implementation + */ +void env_wmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_map_vatopa - implementation + * + * @param address + */ +uint32_t env_map_vatopa(void *address) +{ + return platform_vatopa(address); +} + +/*! + * env_map_patova - implementation + * + * @param address + */ +void *env_map_patova(uint32_t address) +{ + return platform_patova(address); +} + +/*! + * env_create_mutex + * + * Creates a mutex with the given initial count. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_mutex(void **lock, int32_t count, void *context) +#else +int32_t env_create_mutex(void **lock, int32_t count) +#endif +{ + /* make the mutex pointer point to itself + * this marks the mutex handle as initialized. + */ + *lock = lock; + return 0; +} + +/*! + * env_delete_mutex + * + * Deletes the given lock + * + */ +void env_delete_mutex(void *lock) +{ +} + +/*! + * env_lock_mutex + * + * Tries to acquire the lock, if lock is not available then call to + * this function will suspend. + */ +void env_lock_mutex(void *lock) +{ + /* No mutex needed for RPMsg-Lite in BM environment, + * since the API is not shared with ISR context. */ +} + +/*! + * env_unlock_mutex + * + * Releases the given lock. + */ +void env_unlock_mutex(void *lock) +{ + /* No mutex needed for RPMsg-Lite in BM environment, + * since the API is not shared with ISR context. */ +} + +/*! + * env_sleep_msec + * + * Suspends the calling thread for given time , in msecs. + */ +void env_sleep_msec(uint32_t num_msec) +{ + platform_time_delay(num_msec); +} + +/*! + * env_register_isr + * + * Registers interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + * @param data - interrupt handler data (virtqueue) + */ +void env_register_isr(uint32_t vector_id, void *data) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = data; + } +} + +/*! + * env_unregister_isr + * + * Unregisters interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + */ +void env_unregister_isr(uint32_t vector_id) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = ((void *)0); + } +} + +/*! + * env_enable_interrupt + * + * Enables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_enable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_enable(vector_id); +} + +/*! + * env_disable_interrupt + * + * Disables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_disable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_disable(vector_id); +} + +/*! + * env_map_memory + * + * Enables memory mapping for given memory region. + * + * @param pa - physical address of memory + * @param va - logical address of memory + * @param size - memory size + * param flags - flags for cache/uncached and access type + */ + +void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags) +{ + platform_map_mem_region(va, pa, size, flags); +} + +/*! + * env_disable_cache + * + * Disables system caches. + * + */ + +void env_disable_cache(void) +{ + platform_cache_all_flush_invalidate(); + platform_cache_disable(); +} + +/*========================================================= */ +/* Util data / functions for BM */ + +void env_isr(uint32_t vector) +{ + struct isr_info *info; + //RL_ASSERT(vector < ISR_COUNT); + if (vector < ISR_COUNT) + { + info = &isr_table[vector]; + virtqueue_notification((struct virtqueue *)info->data); + } +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_freertos.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_freertos.c new file mode 100755 index 00000000..10c2c7a3 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_freertos.c @@ -0,0 +1,789 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2022 NXP + * Copyright 2021 ACRIOS Systems s.r.o. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_freertos.c + * + * + * DESCRIPTION + * + * This file is FreeRTOS Implementation of env layer for OpenAMP. + * + * + **************************************************************************/ + +#include "rpmsg_compiler.h" +#include "rpmsg_env.h" +#include "FreeRTOS.h" +#include "task.h" +#include "semphr.h" +#include "rpmsg_platform.h" +#include "virtqueue.h" +#include "event_groups.h" +#include "rpmsg_lite.h" + +#include +#include + +static int32_t env_init_counter = 0; +static SemaphoreHandle_t env_sema = ((void *)0); +static EventGroupHandle_t event_group = ((void *)0); +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +LOCK_STATIC_CONTEXT env_sem_static_context; +StaticEventGroup_t event_group_static_context; +#endif + +/* RL_ENV_MAX_MUTEX_COUNT is an arbitrary count greater than 'count' + if the inital count is 1, this function behaves as a mutex + if it is greater than 1, it acts as a "resource allocator" with + the maximum of 'count' resources available. + Currently, only the first use-case is applicable/applied in RPMsg-Lite. + */ +#define RL_ENV_MAX_MUTEX_COUNT (10) + +/* Max supported ISR counts */ +#define ISR_COUNT (32U) +/*! + * Structure to keep track of registered ISR's. + */ +struct isr_info +{ + void *data; +}; +static struct isr_info isr_table[ISR_COUNT]; + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +/*! + * env_in_isr + * + * @returns - true, if currently in ISR + * + */ +static int32_t env_in_isr(void) +{ + return platform_in_isr(); +} + +/*! + * env_wait_for_link_up + * + * Wait until the link_state parameter of the rpmsg_lite_instance is set. + * Utilize events to avoid busy loop implementation. + * + */ +void env_wait_for_link_up(volatile uint32_t *link_state, uint32_t link_id) +{ + (void)xEventGroupClearBits(event_group, (EventBits_t)(1UL << link_id)); + if (*link_state != 1U) + { + (void)xEventGroupWaitBits(event_group, (EventBits_t)(1UL << link_id), pdFALSE, pdTRUE, portMAX_DELAY); + } +} + +/*! + * env_tx_callback + * + * Set event to notify task waiting in env_wait_for_link_up(). + * + */ +void env_tx_callback(uint32_t link_id) +{ + BaseType_t xHigherPriorityTaskWoken = pdFALSE; + if (env_in_isr() != 0) + { + (void)xEventGroupSetBitsFromISR(event_group, (EventBits_t)(1UL << link_id), &xHigherPriorityTaskWoken); + portEND_SWITCHING_ISR(xHigherPriorityTaskWoken); + } + else + { + (void)xEventGroupSetBits(event_group, (EventBits_t)(1UL << link_id)); + } +} + +/*! + * env_init + * + * Initializes OS/BM environment. + * + */ +int32_t env_init(void) +{ + int32_t retval; + vTaskSuspendAll(); /* stop scheduler */ + /* verify 'env_init_counter' */ + RL_ASSERT(env_init_counter >= 0); + if (env_init_counter < 0) + { + (void)xTaskResumeAll(); /* re-enable scheduler */ + return -1; + } + env_init_counter++; + /* multiple call of 'env_init' - return ok */ + if (env_init_counter == 1) + { + /* first call */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + env_sema = xSemaphoreCreateBinaryStatic(&env_sem_static_context); + event_group = xEventGroupCreateStatic(&event_group_static_context); +#else + env_sema = xSemaphoreCreateBinary(); + event_group = xEventGroupCreate(); +#endif +#if (configUSE_16_BIT_TICKS == 1) + (void)xEventGroupClearBits(event_group, 0xFFu); +#else + (void)xEventGroupClearBits(event_group, 0xFFFFFFu); +#endif + (void)memset(isr_table, 0, sizeof(isr_table)); + (void)xTaskResumeAll(); + retval = platform_init(); + (void)xSemaphoreGive(env_sema); + + return retval; + } + else + { + (void)xTaskResumeAll(); + /* Get the semaphore and then return it, + * this allows for platform_init() to block + * if needed and other tasks to wait for the + * blocking to be done. + * This is in ENV layer as this is ENV specific.*/ + if (pdTRUE == xSemaphoreTake(env_sema, portMAX_DELAY)) + { + (void)xSemaphoreGive(env_sema); + } + return 0; + } +} + +/*! + * env_deinit + * + * Uninitializes OS/BM environment. + * + * @returns - execution status + */ +int32_t env_deinit(void) +{ + int32_t retval; + + vTaskSuspendAll(); /* stop scheduler */ + /* verify 'env_init_counter' */ + RL_ASSERT(env_init_counter > 0); + if (env_init_counter <= 0) + { + (void)xTaskResumeAll(); /* re-enable scheduler */ + return -1; + } + + /* counter on zero - call platform deinit */ + env_init_counter--; + /* multiple call of 'env_deinit' - return ok */ + if (env_init_counter <= 0) + { + /* last call */ + (void)memset(isr_table, 0, sizeof(isr_table)); + retval = platform_deinit(); + vEventGroupDelete(event_group); + event_group = ((void *)0); + vSemaphoreDelete(env_sema); + env_sema = ((void *)0); + (void)xTaskResumeAll(); + + return retval; + } + else + { + (void)xTaskResumeAll(); + return 0; + } +} + +/*! + * env_allocate_memory - implementation + * + * @param size + */ +void *env_allocate_memory(uint32_t size) +{ + return (pvPortMalloc(size)); +} + +/*! + * env_free_memory - implementation + * + * @param ptr + */ +void env_free_memory(void *ptr) +{ + if (ptr != ((void *)0)) + { + vPortFree(ptr); + } +} + +/*! + * + * env_memset - implementation + * + * @param ptr + * @param value + * @param size + */ +void env_memset(void *ptr, int32_t value, uint32_t size) +{ + (void)memset(ptr, value, size); +} + +/*! + * + * env_memcpy - implementation + * + * @param dst + * @param src + * @param len + */ +void env_memcpy(void *dst, void const *src, uint32_t len) +{ + (void)memcpy(dst, src, len); +} + +/*! + * + * env_strcmp - implementation + * + * @param dst + * @param src + */ + +int32_t env_strcmp(const char *dst, const char *src) +{ + return (strcmp(dst, src)); +} + +/*! + * + * env_strncpy - implementation + * + * @param dest + * @param src + * @param len + */ +void env_strncpy(char *dest, const char *src, uint32_t len) +{ + (void)strncpy(dest, src, len); +} + +/*! + * + * env_strncmp - implementation + * + * @param dest + * @param src + * @param len + */ +int32_t env_strncmp(char *dest, const char *src, uint32_t len) +{ + return (strncmp(dest, src, len)); +} + +/*! + * + * env_mb - implementation + * + */ +void env_mb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_rmb - implementation + */ +void env_rmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_wmb - implementation + */ +void env_wmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_map_vatopa - implementation + * + * @param address + */ +uint32_t env_map_vatopa(void *address) +{ + return platform_vatopa(address); +} + +/*! + * env_map_patova - implementation + * + * @param address + */ +void *env_map_patova(uint32_t address) +{ + return platform_patova(address); +} + +/*! + * env_create_mutex + * + * Creates a mutex with the given initial count. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_mutex(void **lock, int32_t count, void *context) +#else +int32_t env_create_mutex(void **lock, int32_t count) +#endif +{ + if (count > RL_ENV_MAX_MUTEX_COUNT) + { + return -1; + } + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + *lock = xSemaphoreCreateCountingStatic((UBaseType_t)RL_ENV_MAX_MUTEX_COUNT, (UBaseType_t)count, + (StaticSemaphore_t *)context); +#else + *lock = xSemaphoreCreateCounting((UBaseType_t)RL_ENV_MAX_MUTEX_COUNT, (UBaseType_t)count); +#endif + if (*lock != ((void *)0)) + { + return 0; + } + else + { + return -1; + } +} + +/*! + * env_delete_mutex + * + * Deletes the given lock + * + */ +void env_delete_mutex(void *lock) +{ + vSemaphoreDelete(lock); +} + +/*! + * env_lock_mutex + * + * Tries to acquire the lock, if lock is not available then call to + * this function will suspend. + */ +void env_lock_mutex(void *lock) +{ + SemaphoreHandle_t xSemaphore = (SemaphoreHandle_t)lock; + if (env_in_isr() == 0) + { + (void)xSemaphoreTake(xSemaphore, portMAX_DELAY); + } +} + +/*! + * env_unlock_mutex + * + * Releases the given lock. + */ +void env_unlock_mutex(void *lock) +{ + SemaphoreHandle_t xSemaphore = (SemaphoreHandle_t)lock; + if (env_in_isr() == 0) + { + (void)xSemaphoreGive(xSemaphore); + } +} + +/*! + * env_create_sync_lock + * + * Creates a synchronization lock primitive. It is used + * when signal has to be sent from the interrupt context to main + * thread context. + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_sync_lock(void **lock, int32_t state, void *context) +{ + return env_create_mutex(lock, state, context); /* state=1 .. initially free */ +} +#else +int32_t env_create_sync_lock(void **lock, int32_t state) +{ + return env_create_mutex(lock, state); /* state=1 .. initially free */ +} +#endif + +/*! + * env_delete_sync_lock + * + * Deletes the given lock + * + */ +void env_delete_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_delete_mutex(lock); + } +} + +/*! + * env_acquire_sync_lock + * + * Tries to acquire the lock, if lock is not available then call to + * this function waits for lock to become available. + */ +void env_acquire_sync_lock(void *lock) +{ + BaseType_t xTaskWokenByReceive = pdFALSE; + SemaphoreHandle_t xSemaphore = (SemaphoreHandle_t)lock; + if (env_in_isr() != 0) + { + (void)xSemaphoreTakeFromISR(xSemaphore, &xTaskWokenByReceive); + portEND_SWITCHING_ISR(xTaskWokenByReceive); + } + else + { + (void)xSemaphoreTake(xSemaphore, portMAX_DELAY); + } +} + +/*! + * env_release_sync_lock + * + * Releases the given lock. + */ +void env_release_sync_lock(void *lock) +{ + BaseType_t xTaskWokenByReceive = pdFALSE; + SemaphoreHandle_t xSemaphore = (SemaphoreHandle_t)lock; + if (env_in_isr() != 0) + { + (void)xSemaphoreGiveFromISR(xSemaphore, &xTaskWokenByReceive); + portEND_SWITCHING_ISR(xTaskWokenByReceive); + } + else + { + (void)xSemaphoreGive(xSemaphore); + } +} + +/*! + * env_sleep_msec + * + * Suspends the calling thread for given time , in msecs. + */ +void env_sleep_msec(uint32_t num_msec) +{ + vTaskDelay(num_msec / portTICK_PERIOD_MS); +} + +/*! + * env_register_isr + * + * Registers interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + * @param data - interrupt handler data (virtqueue) + */ +void env_register_isr(uint32_t vector_id, void *data) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = data; + } +} + +/*! + * env_unregister_isr + * + * Unregisters interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + */ +void env_unregister_isr(uint32_t vector_id) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = ((void *)0); + } +} + +/*! + * env_enable_interrupt + * + * Enables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_enable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_enable(vector_id); +} + +/*! + * env_disable_interrupt + * + * Disables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_disable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_disable(vector_id); +} + +/*! + * env_map_memory + * + * Enables memory mapping for given memory region. + * + * @param pa - physical address of memory + * @param va - logical address of memory + * @param size - memory size + * param flags - flags for cache/uncached and access type + */ + +void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags) +{ + platform_map_mem_region(va, pa, size, flags); +} + +/*! + * env_disable_cache + * + * Disables system caches. + * + */ + +void env_disable_cache(void) +{ + platform_cache_all_flush_invalidate(); + platform_cache_disable(); +} + +/*! + * + * env_get_timestamp + * + * Returns a 64 bit time stamp. + * + * + */ +uint64_t env_get_timestamp(void) +{ + if (env_in_isr() != 0) + { + return (uint64_t)xTaskGetTickCountFromISR(); + } + else + { + return (uint64_t)xTaskGetTickCount(); + } +} + +/*========================================================= */ +/* Util data / functions */ + +void env_isr(uint32_t vector) +{ + struct isr_info *info; + RL_ASSERT(vector < ISR_COUNT); + if (vector < ISR_COUNT) + { + info = &isr_table[vector]; + virtqueue_notification((struct virtqueue *)info->data); + } +} + +/* + * env_create_queue + * + * Creates a message queue. + * + * @param queue - pointer to created queue + * @param length - maximum number of elements in the queue + * @param element_size - queue element size in bytes + * @param queue_static_storage - pointer to queue static storage buffer + * @param queue_static_context - pointer to queue static context + * + * @return - status of function execution + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_queue(void **queue, + int32_t length, + int32_t element_size, + uint8_t *queue_static_storage, + rpmsg_static_queue_ctxt *queue_static_context) +{ + *queue = + xQueueCreateStatic((UBaseType_t)length, (UBaseType_t)element_size, queue_static_storage, queue_static_context); +#else +int32_t env_create_queue(void **queue, int32_t length, int32_t element_size) +{ + *queue = xQueueCreate((UBaseType_t)length, (UBaseType_t)element_size); +#endif + if (*queue != ((void *)0)) + { + return 0; + } + else + { + return -1; + } +} + +/*! + * env_delete_queue + * + * Deletes the message queue. + * + * @param queue - queue to delete + */ + +void env_delete_queue(void *queue) +{ + vQueueDelete(queue); +} + +/*! + * env_put_queue + * + * Put an element in a queue. + * + * @param queue - queue to put element in + * @param msg - pointer to the message to be put into the queue + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ + +int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + BaseType_t xHigherPriorityTaskWoken = pdFALSE; + if (env_in_isr() != 0) + { + if (xQueueSendFromISR(queue, msg, &xHigherPriorityTaskWoken) == pdPASS) + { + portEND_SWITCHING_ISR(xHigherPriorityTaskWoken); + return 1; + } + } + else + { + if (xQueueSend(queue, msg, ((portMAX_DELAY == timeout_ms) ? portMAX_DELAY : timeout_ms / portTICK_PERIOD_MS)) == + pdPASS) + { + return 1; + } + } + return 0; +} + +/*! + * env_get_queue + * + * Get an element out of a queue. + * + * @param queue - queue to get element from + * @param msg - pointer to a memory to save the message + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ + +int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + BaseType_t xHigherPriorityTaskWoken = pdFALSE; + if (env_in_isr() != 0) + { + if (xQueueReceiveFromISR(queue, msg, &xHigherPriorityTaskWoken) == pdPASS) + { + portEND_SWITCHING_ISR(xHigherPriorityTaskWoken); + return 1; + } + } + else + { + if (xQueueReceive(queue, msg, + ((portMAX_DELAY == timeout_ms) ? portMAX_DELAY : timeout_ms / portTICK_PERIOD_MS)) == pdPASS) + { + return 1; + } + } + return 0; +} + +/*! + * env_get_current_queue_size + * + * Get current queue size. + * + * @param queue - queue pointer + * + * @return - Number of queued items in the queue + */ + +int32_t env_get_current_queue_size(void *queue) +{ + if (env_in_isr() != 0) + { + return ((int32_t)uxQueueMessagesWaitingFromISR(queue)); + } + else + { + return ((int32_t)uxQueueMessagesWaiting(queue)); + } +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_qnx.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_qnx.c new file mode 100755 index 00000000..f88365ef --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_qnx.c @@ -0,0 +1,776 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2022 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_qnx.c + * + * + * DESCRIPTION + * + * This file is QNX Implementation of env layer. + * + * + **************************************************************************/ + +#include "rpmsg_env.h" +#include "rpmsg_platform.h" +#include "virtqueue.h" + +#include +#include +#include +#include +#include "rpmsg_env_qnx.h" + +#if __PTR_BITS__ > 32 +#include +#include +#else +#include +#endif + +/* Max supported ISR counts */ +#define ISR_COUNT (32U) + +#if (!defined(RL_USE_ENVIRONMENT_CONTEXT)) || (RL_USE_ENVIRONMENT_CONTEXT != 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 1" +#endif + +/** + * Structure to keep track of registered ISR's. + */ +struct isr_info +{ + void *data; + volatile uint32_t enabled; +}; + +/** + * Structure to hold queue information + */ +typedef struct env_queue +{ + mqd_t mqd; + size_t msg_len; +} env_queue_t; + +/** + * Env. context structure + */ +typedef struct env_context +{ + void *platform_context; /* Pointer to platform context */ + uint32_t pa; /* Physical address of memory pool reserved for rpmsg */ + void *va; /* Virtual address of the memory pool */ + struct isr_info isr_table[ISR_COUNT]; /* Table with registered Virt. queue data */ +} env_context_t; + +/** + * Returns pointer to platform context. + * + * @param env_context Pointer to env. context + * + * @return Pointer to platform context + */ +void *env_get_platform_context(void *env_context) +{ + env_context_t *env = env_context; + return env->platform_context; +} + +/*! + * env_wait_for_link_up + * + * Wait until the link_state parameter of the rpmsg_lite_instance is set. + * Busy loop implementation for now, to be replaced by events. + * + */ +void env_wait_for_link_up(volatile uint32_t *link_state, uint32_t link_id) +{ + while (*link_state != 1U) + { + } +} + +/*! + * env_tx_callback + * + * Set event to notify task waiting in env_wait_for_link_up(). + * + */ +void env_tx_callback(uint32_t link_id) +{ +} + +/*! + * env_init + * + * Initializes OS/BM environment. + * + */ +int32_t env_init(void **env_context, void *env_init_data) +{ + rpmsg_env_init_t *init = env_init_data; + imx_rpmsg_env_cfg_t *user_cfg; + + if (init != ((void *)0)) + { + user_cfg = init->user_input; + env_context_t *ctx = env_allocate_memory(sizeof(env_context_t)); + if (ctx == ((void *)0)) + { + return -1; + } + /* Save virtual and phy address of mmaped memory region */ + ctx->pa = init->pa; + ctx->va = init->va; + /* Initialize platform, dereference user_input to get platform cfg address */ + if (platform_init(&ctx->platform_context, ctx, user_cfg ? user_cfg->platform_cfg : ((void *)0)) != 0) + { + env_free_memory(ctx); + return -1; + } + *env_context = ctx; + return 0; + } + return -1; +} + +/*! + * env_deinit + * + * Uninitializes OS/BM environment. + * + * @returns - execution status + */ +int32_t env_deinit(void *env_context) +{ + env_context_t *ctx = env_context; + platform_deinit(ctx->platform_context); + env_free_memory(ctx); + return 0; +} + +/*! + * env_allocate_memory - implementation + * + * @param size + */ +void *env_allocate_memory(uint32_t size) +{ + return malloc(size); +} + +/*! + * env_free_memory - implementation + * + * @param ptr + */ +void env_free_memory(void *ptr) +{ + free(ptr); +} + +/*! + * + * env_memset - implementation + * + * @param ptr + * @param value + * @param size + */ +void env_memset(void *ptr, int32_t value, uint32_t size) +{ + (void)memset(ptr, value, size); +} + +/*! + * + * env_memcpy - implementation + * + * @param dst + * @param src + * @param len + */ +void env_memcpy(void *dst, void const *src, uint32_t len) +{ + (void)memcpy(dst, src, len); +} + +/*! + * + * env_strcmp - implementation + * + * @param dst + * @param src + */ + +int32_t env_strcmp(const char *dst, const char *src) +{ + return (strcmp(dst, src)); +} + +/*! + * + * env_strncpy - implementation + * + * @param dest + * @param src + * @param len + */ +void env_strncpy(char *dest, const char *src, uint32_t len) +{ + (void)strncpy(dest, src, len); +} + +/*! + * + * env_strncmp - implementation + * + * @param dest + * @param src + * @param len + */ +int32_t env_strncmp(char *dest, const char *src, uint32_t len) +{ + return (strncmp(dest, src, len)); +} + +/*! + * + * env_mb - implementation + * + */ +void env_mb(void) +{ + dsb(); +} + +/*! + * env_rmb - implementation + */ +void env_rmb(void) +{ + dsb(); +} + +/*! + * env_wmb - implementation + */ +void env_wmb(void) +{ + dsb(); +} + +/*! + * env_map_vatopa - implementation + * + * @param address + */ +uint32_t env_map_vatopa(void *env, void *address) +{ +#if IMX_MMAP_VA_ON_PA + return ((uint32_t)address); +#else + /* This is faster then mem_offset64() */ + env_context_t *ctx = env; + uint64_t va = (uint64_t)address; + uint64_t va_start = (uint64_t)ctx->va; + uint64_t pa = ctx->pa + (va - va_start); + return pa; +#endif +} + +/*! + * env_map_patova - implementation + * + * @param address + */ +void *env_map_patova(void *env, uint32_t address) +{ +#if IMX_MMAP_VA_ON_PA + return ((void *)address); +#else + env_context_t *ctx = env; + uint64_t va_start = (uint64_t)ctx->va; + uint64_t va = (va_start + (address - ctx->pa)); + return (void *)va; +#endif +} + +/*! + * env_create_mutex + * + * Creates a mutex with the given initial count. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_mutex(void **lock, int32_t count, void *context) +#else +int32_t env_create_mutex(void **lock, int32_t count) +#endif +{ + if (count > RL_ENV_MAX_MUTEX_COUNT) + { + return -1; + } + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + *lock = context; +#else + *lock = env_allocate_memory(sizeof(pthread_mutex_t)); +#endif + if (*lock == ((void *)0)) + { + return -1; + } + if (EOK == pthread_mutex_init((pthread_mutex_t *)*lock, ((void *)0))) + { + return 0; + } + else + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(*lock); +#endif + return -1; + } +} + +/*! + * env_delete_mutex + * + * Deletes the given lock + * + */ +void env_delete_mutex(void *lock) +{ + pthread_mutex_destroy(lock); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(lock); +#endif +} + +/*! + * env_lock_mutex + * + * Tries to acquire the lock, if lock is not available then call to + * this function will suspend. + */ +void env_lock_mutex(void *lock) +{ + pthread_mutex_lock(lock); +} + +/*! + * env_unlock_mutex + * + * Releases the given lock. + */ +void env_unlock_mutex(void *lock) +{ + pthread_mutex_unlock(lock); +} + +/*! + * env_create_sync_lock + * + * Creates a synchronization lock primitive. It is used + * when signal has to be sent from the interrupt context to main + * thread context. + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_sync_lock(void **lock, int32_t state, void *context) +{ + return env_create_mutex(lock, state, context); /* state=1 .. initially free */ +} +#else +int32_t env_create_sync_lock(void **lock, int32_t state) +{ + return env_create_mutex(lock, state); /* state=1 .. initially free */ +} +#endif + +/*! + * env_delete_sync_lock + * + * Deletes the given lock + * + */ +void env_delete_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_delete_mutex(lock); + } +} + +/*! + * env_acquire_sync_lock + * + * Tries to acquire the lock, if lock is not available then call to + * this function waits for lock to become available. + */ +void env_acquire_sync_lock(void *lock) +{ + env_lock_mutex(lock); +} + +/*! + * env_release_sync_lock + * + * Releases the given lock. + */ +void env_release_sync_lock(void *lock) +{ + env_unlock_mutex(lock); +} + +/*! + * env_sleep_msec + * + * Suspends the calling thread for given time , in msecs. + */ +void env_sleep_msec(uint32_t num_msec) +{ + delay(num_msec); +} + +/*! + * env_register_isr + * + * Registers interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + * @param data - interrupt handler data (virtqueue) + */ +void env_register_isr(void *env, uint32_t vector_id, void *data) +{ + env_context_t *ctx = env; + + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + ctx->isr_table[vector_id].data = data; + } +} + +/*! + * env_unregister_isr + * + * Unregisters interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + */ +void env_unregister_isr(void *env, uint32_t vector_id) +{ + env_context_t *ctx = env; + + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + ctx->isr_table[vector_id].data = ((void *)0); + ctx->isr_table[vector_id].enabled = 0; + } +} + +/*! + * env_enable_interrupt + * + * Enables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ +void env_enable_interrupt(void *env, uint32_t vector_id) +{ + env_context_t *ctx = env; + + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + ctx->isr_table[vector_id].enabled = 1; + } +} + +/*! + * env_disable_interrupt + * + * Disables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ +void env_disable_interrupt(void *env, uint32_t vector_id) +{ + env_context_t *ctx = env; + + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + ctx->isr_table[vector_id].enabled = 0; + } +} + +/*! + * env_map_memory + * + * Enables memory mapping for given memory region. + * + * @param pa - physical address of memory + * @param va - logical address of memory + * @param size - memory size + * param flags - flags for cache/uncached and access type + */ +void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags) +{ + platform_map_mem_region(va, pa, size, flags); +} + +/*! + * env_disable_cache + * + * Disables system caches. + * + */ +void env_disable_cache(void) +{ + platform_cache_all_flush_invalidate(); + platform_cache_disable(); +} + +/*! + * + * env_get_timestamp + * + * Returns a 64 bit time stamp. + * + * + */ +uint32_t long env_get_timestamp(void) +{ + fprintf(stderr, "%s unsupported\n", __FUNCTION__); + return 0; +} + +/*========================================================= */ +/* Util data / functions */ + +/** + * Called from receive thread + * + * @param env Pointer to env context + * @param vector Vector ID. + */ +void env_isr(void *env, uint32_t vector) +{ + struct isr_info *info; + env_context_t *ctx = env; + + RL_ASSERT(vector < ISR_COUNT); + if (vector < ISR_COUNT) + { + info = &ctx->isr_table[vector]; + if (info->enabled) + { + virtqueue_notification((struct virtqueue *)info->data); + } + } +} + +/** + * Called by rpmsg to init an interrupt + * + * @param env Pointer to env context. + * @param vq_id Virt. queue ID. + * @param isr_data Pointer to interrupt data. + * + * @return Execution status. + */ +int32_t env_init_interrupt(void *env, int32_t vq_id, void *isr_data) +{ + env_register_isr(env, vq_id, isr_data); + return 0; +} + +/** + * Called by rpmsg to deinit an interrupt. + * + * @param env Pointer to env context. + * @param vq_id Virt. queue ID. + * + * @return Execution status. + */ +int32_t env_deinit_interrupt(void *env, int32_t vq_id) +{ + env_unregister_isr(env, vq_id); + return 0; +} + +/** + * env_create_queue + * + * Creates a message queue. + * + * @param queue - pointer to created queue + * @param length - maximum number of elements in the queue + * @param element_size - queue element size in bytes + * @param queue_static_storage - pointer to queue static storage buffer + * @param queue_static_context - pointer to queue static context + * + * @return - status of function execution + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_queue(void **queue, + int32_t length, + int32_t element_size, + uint8_t *queue_static_storage, + rpmsg_static_queue_ctxt *queue_static_context) +#else +int32_t env_create_queue(void **queue, int32_t length, int32_t element_size) +#endif +{ + char name[100]; + struct mq_attr mqstat; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + env_queue_t *q = (env_queue_t *)queue_static_context; +#else + env_queue_t *q = env_allocate_memory(sizeof(env_queue_t)); +#endif + if (q == ((void *)0)) + { + return -1; + } + /* Creates a unique queue in /dev/mq/PID_virtaddr_length */ + sprintf(name, "/%u_0x%lx_%u", getpid(), (uint64_t)q, length); + mqstat.mq_maxmsg = length; + mqstat.mq_msgsize = element_size; + mqstat.mq_flags = 0; + mqstat.mq_curmsgs = 0; + mqstat.mq_recvwait = 0; + mqstat.mq_sendwait = 0; + q->msg_len = element_size; + q->mqd = (name, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR, &mqstat); + if (q->mqd == -1) + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(q); +#endif + fprintf(stderr, "mq_open failed: %s\n", strerror(errno)); + return -1; + } + /* Return queue */ + *queue = q; + return 0; +} + +/*! + * env_delete_queue + * + * Deletes the message queue. + * + * @param queue - queue to delete + */ +void env_delete_queue(void *queue) +{ + env_queue_t *q = queue; + + mq_close(q->mqd); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(queue); +#endif +} + +/*! + * env_put_queue + * + * Put an element in a queue. + * + * @param queue - queue to put element in + * @param msg - pointer to the message to be put into the queue + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ +int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + env_queue_t *q = queue; + + if (mq_send(q->mqd, (const char *)msg, q->msg_len, 0)) + { + fprintf(stderr, "mq_send failed: %s\n", strerror(errno)); + return 0; + } + return 1; +} + +/*! + * env_get_queue + * + * Get an element out of a queue. + * + * @param queue - queue to get element from + * @param msg - pointer to a memory to save the message + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ +int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + env_queue_t *q = queue; + if (mq_receive(q->mqd, msg, q->msg_len, ((void *)0)) == -1) + { + fprintf(stderr, "mq_receive failed: %s\n", strerror(errno)); + return 0; + } + return 1; +} + +/*! + * env_get_current_queue_size + * + * Get current queue size. + * + * @param queue - queue pointer + * + * @return - Number of queued items in the queue + */ +int32_t env_get_current_queue_size(void *queue) +{ + struct mq_attr mqstat; + env_queue_t *q = queue; + if (mq_getattr(q->mqd, &mqstat) != -1) + { + return mqstat.mq_curmsgs; + } + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_qnx.h b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_qnx.h new file mode 100755 index 00000000..ee56bc62 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_qnx.h @@ -0,0 +1,56 @@ +/* + * Copyright 2016-2019 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, this list + * of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, this + * list of conditions and the following disclaimer in the documentation and/or + * other materials provided with the distribution. + * + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_qnx.h + * + * + * DESCRIPTION + * + * This file is QNX header file of env layer. + * + * + **************************************************************************/ +#ifndef RPMSG_ENV_QNX_H_ +#define RPMSG_ENV_QNX_H_ + +#include + +typedef struct rpmsg_env_init +{ + void *user_input; /* Pointer to user init cfg */ + uint32_t pa; /* Physical address of memory pool reserved for rpmsg */ + void *va; /* Virtual address of the memory pool */ +} rpmsg_env_init_t; + +#endif /* RPMSG_ENV_QNX_H_ */ diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_threadx.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_threadx.c new file mode 100755 index 00000000..a008337f --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_threadx.c @@ -0,0 +1,724 @@ +/* + * Copyright 2020-2022 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_threadx.c + * + * + * DESCRIPTION + * + * This file is ThreadX Implementation of env layer for OpenAMP. + * + * + **************************************************************************/ +#include "rpmsg_compiler.h" +#include "rpmsg_env.h" +#include "tx_api.h" +#include "tx_event_flags.h" +#include "rpmsg_platform.h" +#include "fsl_common.h" +#include "fsl_component_mem_manager.h" +#include +#include +#include "virtqueue.h" + +static int32_t env_init_counter = 0; +static TX_SEMAPHORE env_sema; +static TX_EVENT_FLAGS_GROUP event_group; + +/* RL_ENV_MAX_MUTEX_COUNT is an arbitrary count greater than 'count' + if the inital count is 1, this function behaves as a mutex + if it is greater than 1, it acts as a "resource allocator" with + the maximum of 'count' resources available. + Currently, only the first use-case is applicable/applied in RPMsg-Lite. + */ +#define RL_ENV_MAX_MUTEX_COUNT (10) + +/* Max supported ISR counts */ +#define ISR_COUNT (32U) +/*! + * Structure to keep track of registered ISR's. + */ +struct isr_info +{ + void *data; +}; +static struct isr_info isr_table[ISR_COUNT]; + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +/*! + * env_in_isr + * + * @returns - true, if currently in ISR + * + */ +static int32_t env_in_isr(void) +{ + return platform_in_isr(); +} + +/*! + * env_wait_for_link_up + * + * Wait until the link_state parameter of the rpmsg_lite_instance is set. + * Utilize events to avoid busy loop implementation. + * + */ +void env_wait_for_link_up(volatile uint32_t *link_state, uint32_t link_id) +{ + if (*link_state != 1U) + { + tx_event_flags_get(&event_group, (1UL << link_id), TX_AND, NULL, TX_WAIT_FOREVER); + } +} + +/*! + * env_tx_callback + * + * Set event to notify task waiting in env_wait_for_link_up(). + * + */ +void env_tx_callback(uint32_t link_id) +{ + tx_event_flags_set(&event_group, (1UL << link_id), TX_OR); +} + +/*! + * env_init + * + * Initializes OS/ThreadX environment. + * + */ +int32_t env_init(void) +{ + int32_t retval; + uint32_t regPrimask = DisableGlobalIRQ(); /* stop scheduler */ + + /* verify 'env_init_counter' */ + RL_ASSERT(env_init_counter >= 0); + if (env_init_counter < 0) + { + EnableGlobalIRQ(regPrimask); /* re-enable scheduler */ + return -1; + } + env_init_counter++; + /* multiple call of 'env_init' - return ok */ + if (env_init_counter == 1) + { + /* first call */ + if (TX_SUCCESS != _tx_semaphore_create((TX_SEMAPHORE *)&env_sema, NULL, 0)) + { + EnableGlobalIRQ(regPrimask); + return -1; + } + (void)tx_event_flags_create(&event_group, NULL); + (void)memset(isr_table, 0, sizeof(isr_table)); + EnableGlobalIRQ(regPrimask); + retval = platform_init(); + tx_semaphore_put((TX_SEMAPHORE *)&env_sema); + + return retval; + } + else + { + EnableGlobalIRQ(regPrimask); + /* Get the semaphore and then return it, + * this allows for platform_init() to block + * if needed and other tasks to wait for the + * blocking to be done. + * This is in ENV layer as this is ENV specific.*/ + if (TX_SUCCESS == tx_semaphore_get((TX_SEMAPHORE *)&env_sema, TX_WAIT_FOREVER)) + { + tx_semaphore_put((TX_SEMAPHORE *)&env_sema); + } + return 0; + } +} + +/*! + * env_deinit + * + * Uninitializes OS/BM environment. + * + * @returns - execution status + */ +int32_t env_deinit(void) +{ + int32_t retval; + + uint32_t regPrimask = DisableGlobalIRQ(); /* stop scheduler */ + /* verify 'env_init_counter' */ + RL_ASSERT(env_init_counter > 0); + if (env_init_counter <= 0) + { + EnableGlobalIRQ(regPrimask); + return -1; + } + + /* counter on zero - call platform deinit */ + env_init_counter--; + /* multiple call of 'env_deinit' - return ok */ + if (env_init_counter <= 0) + { + /* last call */ + (void)memset(isr_table, 0, sizeof(isr_table)); + retval = platform_deinit(); + (void)tx_event_flags_delete(&event_group); + (void)memset(&event_group, 0, sizeof(event_group)); + (void)_tx_semaphore_delete((TX_SEMAPHORE *)&env_sema); + (void)memset(&env_sema, 0, sizeof(env_sema)); + EnableGlobalIRQ(regPrimask); + return retval; + } + else + { + EnableGlobalIRQ(regPrimask); + return 0; + } +} + +/*! + * env_allocate_memory - implementation + * + * @param size + */ +void *env_allocate_memory(uint32_t size) +{ + return (MEM_BufferAlloc(size)); +} + +/*! + * env_free_memory - implementation + * + * @param ptr + */ +void env_free_memory(void *ptr) +{ + if (ptr != ((void *)0)) + { + MEM_BufferFree(ptr); + } +} + +/*! + * + * env_memset - implementation + * + * @param ptr + * @param value + * @param size + */ +void env_memset(void *ptr, int32_t value, uint32_t size) +{ + (void)memset(ptr, value, size); +} + +/*! + * + * env_memcpy - implementation + * + * @param dst + * @param src + * @param len + */ +void env_memcpy(void *dst, void const *src, uint32_t len) +{ + (void)memcpy(dst, src, len); +} + +/*! + * + * env_strcmp - implementation + * + * @param dst + * @param src + */ + +int32_t env_strcmp(const char *dst, const char *src) +{ + return (strcmp(dst, src)); +} + +/*! + * + * env_strncpy - implementation + * + * @param dest + * @param src + * @param len + */ +void env_strncpy(char *dest, const char *src, uint32_t len) +{ + (void)strncpy(dest, src, len); +} + +/*! + * + * env_strncmp - implementation + * + * @param dest + * @param src + * @param len + */ +int32_t env_strncmp(char *dest, const char *src, uint32_t len) +{ + return (strncmp(dest, src, len)); +} + +/*! + * + * env_mb - implementation + * + */ +void env_mb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_rmb - implementation + */ +void env_rmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_wmb - implementation + */ +void env_wmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_map_vatopa - implementation + * + * @param address + */ +uint32_t env_map_vatopa(void *address) +{ + return platform_vatopa(address); +} + +/*! + * env_map_patova - implementation + * + * @param address + */ +void *env_map_patova(uint32_t address) +{ + return platform_patova(address); +} + +/*! + * env_create_mutex + * + * Creates a mutex with the given initial count. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_mutex(void **lock, int32_t count, void *context) +#else +int32_t env_create_mutex(void **lock, int32_t count) +#endif +{ + TX_SEMAPHORE *semaphore_ptr; + + if (count > RL_ENV_MAX_MUTEX_COUNT) + { + return -1; + } + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + semaphore_ptr = (TX_SEMAPHORE *)context; +#else + semaphore_ptr = (TX_SEMAPHORE *)env_allocate_memory(sizeof(TX_SEMAPHORE)); +#endif + if (semaphore_ptr == ((void *)0)) + { + return -1; + } + + if (TX_SUCCESS == _tx_semaphore_create((TX_SEMAPHORE *)semaphore_ptr, NULL, count)) + { + *lock = (void *)semaphore_ptr; + return 0; + } + else + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(semaphore_ptr); +#endif + return -1; + } +} + +/*! + * env_delete_mutex + * + * Deletes the given lock + * + */ +void env_delete_mutex(void *lock) +{ + (void)_tx_semaphore_delete((TX_SEMAPHORE *)lock); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(lock); +#endif +} + +/*! + * env_lock_mutex + * + * Tries to acquire the lock, if lock is not available then call to + * this function will suspend. + */ +void env_lock_mutex(void *lock) +{ + if (env_in_isr() == 0) + { + (void)tx_semaphore_get((TX_SEMAPHORE *)lock, TX_WAIT_FOREVER); + } +} + +/*! + * env_unlock_mutex + * + * Releases the given lock. + */ +void env_unlock_mutex(void *lock) +{ + if (env_in_isr() == 0) + { + tx_semaphore_put((TX_SEMAPHORE *)lock); + } +} + +/*! + * env_create_sync_lock + * + * Creates a synchronization lock primitive. It is used + * when signal has to be sent from the interrupt context to main + * thread context. + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_sync_lock(void **lock, int32_t state, void *context) +{ + return env_create_mutex(lock, state, context); /* state=1 .. initially free */ +} +#else +int32_t env_create_sync_lock(void **lock, int32_t state) +{ + return env_create_mutex(lock, state); /* state=1 .. initially free */ +} +#endif + +/*! + * env_delete_sync_lock + * + * Deletes the given lock + * + */ +void env_delete_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_delete_mutex(lock); + } +} + +/*! + * env_acquire_sync_lock + * + * Tries to acquire the lock, if lock is not available then call to + * this function waits for lock to become available. + */ +void env_acquire_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_lock_mutex(lock); + } +} + +/*! + * env_release_sync_lock + * + * Releases the given lock. + */ +void env_release_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_unlock_mutex(lock); + } +} + +/*! + * env_sleep_msec + * + * Suspends the calling thread for given time , in msecs. + */ +void env_sleep_msec(uint32_t num_msec) +{ + tx_thread_sleep(num_msec); +} + +/*! + * env_register_isr + * + * Registers interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + * @param data - interrupt handler data (virtqueue) + */ +void env_register_isr(uint32_t vector_id, void *data) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = data; + } +} + +/*! + * env_unregister_isr + * + * Unregisters interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + */ +void env_unregister_isr(uint32_t vector_id) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = ((void *)0); + } +} + +/*! + * env_enable_interrupt + * + * Enables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_enable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_enable(vector_id); +} + +/*! + * env_disable_interrupt + * + * Disables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_disable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_disable(vector_id); +} + +/*! + * env_map_memory + * + * Enables memory mapping for given memory region. + * + * @param pa - physical address of memory + * @param va - logical address of memory + * @param size - memory size + * param flags - flags for cache/uncached and access type + */ + +void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags) +{ + platform_map_mem_region(va, pa, size, flags); +} + +/*! + * env_disable_cache + * + * Disables system caches. + * + */ + +void env_disable_cache(void) +{ + platform_cache_all_flush_invalidate(); + platform_cache_disable(); +} + +/*========================================================= */ +/* Util data / functions */ + +void env_isr(uint32_t vector) +{ + struct isr_info *info; + RL_ASSERT(vector < ISR_COUNT); + if (vector < ISR_COUNT) + { + info = &isr_table[vector]; + virtqueue_notification((struct virtqueue *)info->data); + } +} + +/* + * env_create_queue + * + * Creates a message queue. + * + * @param queue - pointer to created queue + * @param length - maximum number of elements in the queue + * @param element_size - queue element size in bytes + * @param queue_static_storage - pointer to queue static storage buffer + * @param queue_static_context - pointer to queue static context + * + * @return - status of function execution + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_queue(void **queue, + int32_t length, + int32_t element_size, + uint8_t *queue_static_storage, + rpmsg_static_queue_ctxt *queue_static_context) +#else +int32_t env_create_queue(void **queue, int32_t length, int32_t element_size) +#endif +{ + struct TX_QUEUE *queue_ptr = ((void *)0); + char *msgq_buffer_ptr = ((void *)0); + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + queue_ptr = (struct k_msgq *)queue_static_context; + msgq_buffer_ptr = (char *)queue_static_storage; +#else + queue_ptr = (struct k_msgq *)env_allocate_memory(sizeof(struct TX_QUEUE)); + msgq_buffer_ptr = (char *)env_allocate_memory(length * element_size); +#endif + if ((queue_ptr == ((void *)0)) || (msgq_buffer_ptr == ((void *)0))) + { + return -1; + } + + if (TX_SUCCESS == + _tx_queue_create((TX_QUEUE *)queue_ptr, NULL, element_size, (VOID *)msgq_buffer_ptr, (length * element_size))) + { + *queue = (void *)queue_ptr; + return 0; + } + else + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(queue_ptr); + env_free_memory(msgq_buffer_ptr); +#endif + return -1; + } +} + +/*! + * env_delete_queue + * + * Deletes the message queue. + * + * @param queue - queue to delete + */ + +void env_delete_queue(void *queue) +{ + tx_queue_delete(queue); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(queue); +#endif +} + +/*! + * env_put_queue + * + * Put an element in a queue. + * + * @param queue - queue to put element in + * @param msg - pointer to the message to be put into the queue + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ + +int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + if (TX_SUCCESS == tx_queue_send((TX_QUEUE *)queue, msg, timeout_ms)) + { + return 0; + } + else + { + return -1; + } +} + +/*! + * env_get_queue + * + * Get an element out of a queue. + * + * @param queue - queue to get element from + * @param msg - pointer to a memory to save the message + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ + +int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + if (TX_SUCCESS == tx_queue_receive((TX_QUEUE *)queue, msg, timeout_ms)) + { + return 0; + } + else + { + return -1; + } +} + +/*! + * env_get_current_queue_size + * + * Get current queue size. + * + * @param queue - queue pointer + * + * @return - Number of queued items in the queue + */ + +int32_t env_get_current_queue_size(void *queue) +{ + int32_t enqueued; + ULONG available_storage; + TX_THREAD *first_suspended; + ULONG suspended_count; + TX_QUEUE *next_queue; + if (TX_SUCCESS == tx_queue_info_get((TX_QUEUE *)queue, NULL, (ULONG *)&enqueued, &available_storage, + &first_suspended, &suspended_count, &next_queue)) + { + return 0; + } + else + { + return -1; + } +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_xos.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_xos.c new file mode 100755 index 00000000..9c738ef2 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_xos.c @@ -0,0 +1,747 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2022 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_xos.c + * + * + * DESCRIPTION + * + * This file is XOS Implementation of env layer for RPMsg_Lite. + * + * + **************************************************************************/ + +#include "rpmsg_compiler.h" +#include "rpmsg_env.h" +#include "rpmsg_lite.h" +#include +#include "rpmsg_platform.h" +#include "virtqueue.h" + +#include +#include + +static int32_t env_init_counter = 0; +static struct XosSem env_sema = {0}; +static struct XosEvent env_event = {0}; + +/* RL_ENV_MAX_MUTEX_COUNT is an arbitrary count greater than 'count' + if the inital count is 1, this function behaves as a mutex + if it is greater than 1, it acts as a "resource allocator" with + the maximum of 'count' resources available. + Currently, only the first use-case is applicable/applied in RPMsg-Lite. + */ +#define RL_ENV_MAX_MUTEX_COUNT (10) + +/* Max supported ISR counts */ +#define ISR_COUNT (32U) +/*! + * Structure to keep track of registered ISR's. + */ +struct isr_info +{ + void *data; +}; +static struct isr_info isr_table[ISR_COUNT]; + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +/*! + * env_in_isr + * + * @returns - true, if currently in ISR + * + */ +static int32_t env_in_isr(void) +{ + return platform_in_isr(); +} + +/*! + * env_wait_for_link_up + * + * Wait until the link_state parameter of the rpmsg_lite_instance is set. + * Utilize events to avoid busy loop implementation. + * + */ +void env_wait_for_link_up(volatile uint32_t *link_state, uint32_t link_id) +{ + if (*link_state != 1U) + { + xos_event_wait_all(&env_event, (1UL << link_id)); + } +} + +/*! + * env_tx_callback + * + * Set event to notify task waiting in env_wait_for_link_up(). + * + */ +void env_tx_callback(uint32_t link_id) +{ + xos_event_set(&env_event, (1UL << link_id)); +} + +/*! + * env_init + * + * Initializes XOS environment. + * + */ +int32_t env_init(void) +{ + int32_t retval; + uint32_t regPrimask = xos_disable_interrupts(); /* stop scheduler */ + /* verify 'env_init_counter' */ + RL_ASSERT(env_init_counter >= 0); + if (env_init_counter < 0) + { + xos_restore_interrupts(regPrimask); /* re-enable scheduler */ + return -1; + } + env_init_counter++; + /* multiple call of 'env_init' - return ok */ + if (env_init_counter == 1) + { + /* first call */ + (void)xos_sem_create(&env_sema, XOS_SEM_WAIT_PRIORITY, 1); + (void)xos_event_create(&env_event, 0xFFFFFFFFu, XOS_EVENT_AUTO_CLEAR); + (void)memset(isr_table, 0, sizeof(isr_table)); + xos_restore_interrupts(regPrimask); + retval = platform_init(); + (void)xos_sem_put(&env_sema); + + return retval; + } + else + { + xos_restore_interrupts(regPrimask); + /* Get the semaphore and then return it, + * this allows for platform_init() to block + * if needed and other tasks to wait for the + * blocking to be done. + * This is in ENV layer as this is ENV specific.*/ + if (XOS_OK == xos_sem_get(&env_sema)) + { + (void)xos_sem_put(&env_sema); + } + return 0; + } +} + +/*! + * env_deinit + * + * Uninitializes XOS environment. + * + * @returns - execution status + */ +int32_t env_deinit(void) +{ + int32_t retval; + + uint32_t regPrimask = xos_disable_interrupts(); /* stop scheduler */ + /* verify 'env_init_counter' */ + RL_ASSERT(env_init_counter > 0); + if (env_init_counter <= 0) + { + xos_restore_interrupts(regPrimask); /* re-enable scheduler */ + return -1; + } + + /* counter on zero - call platform deinit */ + env_init_counter--; + /* multiple call of 'env_deinit' - return ok */ + if (env_init_counter <= 0) + { + /* last call */ + (void)memset(isr_table, 0, sizeof(isr_table)); + retval = platform_deinit(); + (void)xos_event_delete(&env_event); + (void)xos_sem_delete(&env_sema); + xos_restore_interrupts(regPrimask); + + return retval; + } + else + { + xos_restore_interrupts(regPrimask); + return 0; + } +} + +/*! + * env_allocate_memory - implementation + * + * @param size + */ +void *env_allocate_memory(uint32_t size) +{ + return (malloc(size)); +} + +/*! + * env_free_memory - implementation + * + * @param ptr + */ +void env_free_memory(void *ptr) +{ + if (ptr != ((void *)0)) + { + free(ptr); + } +} + +/*! + * + * env_memset - implementation + * + * @param ptr + * @param value + * @param size + */ +void env_memset(void *ptr, int32_t value, uint32_t size) +{ + (void)memset(ptr, value, size); +} + +/*! + * + * env_memcpy - implementation + * + * @param dst + * @param src + * @param len + */ +void env_memcpy(void *dst, void const *src, uint32_t len) +{ + (void)memcpy(dst, src, len); +} + +/*! + * + * env_strcmp - implementation + * + * @param dst + * @param src + */ + +int32_t env_strcmp(const char *dst, const char *src) +{ + return (strcmp(dst, src)); +} + +/*! + * + * env_strncpy - implementation + * + * @param dest + * @param src + * @param len + */ +void env_strncpy(char *dest, const char *src, uint32_t len) +{ + (void)strncpy(dest, src, len); +} + +/*! + * + * env_strncmp - implementation + * + * @param dest + * @param src + * @param len + */ +int32_t env_strncmp(char *dest, const char *src, uint32_t len) +{ + return (strncmp(dest, src, len)); +} + +/*! + * + * env_mb - implementation + * + */ +void env_mb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_rmb - implementation + */ +void env_rmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_wmb - implementation + */ +void env_wmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_map_vatopa - implementation + * + * @param address + */ +uint32_t env_map_vatopa(void *address) +{ + return platform_vatopa(address); +} + +/*! + * env_map_patova - implementation + * + * @param address + */ +void *env_map_patova(uint32_t address) +{ + return platform_patova(address); +} + +/*! + * env_create_mutex + * + * Creates a mutex with the given initial count. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_mutex(void **lock, int32_t count, void *context) +#else +int32_t env_create_mutex(void **lock, int32_t count) +#endif +{ + struct XosSem *semaphore_ptr; + + if (count > RL_ENV_MAX_MUTEX_COUNT) + { + return -1; + } + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + semaphore_ptr = (struct XosSem *)context; +#else + semaphore_ptr = (struct XosSem *)env_allocate_memory(sizeof(struct XosSem)); +#endif + if (semaphore_ptr == ((void *)0)) + { + return -1; + } + + if (XOS_OK == xos_sem_create(semaphore_ptr, XOS_SEM_WAIT_PRIORITY, count)) + { + *lock = (void *)semaphore_ptr; + return 0; + } + else + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(semaphore_ptr); +#endif + return -1; + } +} + +/*! + * env_delete_mutex + * + * Deletes the given lock + * + */ +void env_delete_mutex(void *lock) +{ + xos_sem_delete(lock); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(lock); +#endif +} + +/*! + * env_lock_mutex + * + * Tries to acquire the lock, if lock is not available then call to + * this function will suspend. + */ +void env_lock_mutex(void *lock) +{ + if (env_in_isr() == 0) + { + (void)xos_sem_get((struct XosSem *)lock); + } +} + +/*! + * env_unlock_mutex + * + * Releases the given lock. + */ +void env_unlock_mutex(void *lock) +{ + if (env_in_isr() == 0) + { + (void)xos_sem_put((struct XosSem *)lock); + } +} + +/*! + * env_create_sync_lock + * + * Creates a synchronization lock primitive. It is used + * when signal has to be sent from the interrupt context to main + * thread context. + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_sync_lock(void **lock, int32_t state, void *context) +{ + return env_create_mutex(lock, state, context); /* state=1 .. initially free */ +} +#else +int32_t env_create_sync_lock(void **lock, int32_t state) +{ + return env_create_mutex(lock, state); /* state=1 .. initially free */ +} +#endif + +/*! + * env_delete_sync_lock + * + * Deletes the given lock + * + */ +void env_delete_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_delete_mutex(lock); + } +} + +/*! + * env_acquire_sync_lock + * + * Tries to acquire the lock, if lock is not available then call to + * this function waits for lock to become available. + */ +void env_acquire_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_lock_mutex(lock); + } +} + +/*! + * env_release_sync_lock + * + * Releases the given lock. + */ +void env_release_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_unlock_mutex(lock); + } +} + +/*! + * env_sleep_msec + * + * Suspends the calling thread for given time , in msecs. + */ +void env_sleep_msec(uint32_t num_msec) +{ + (void)xos_thread_sleep_msec(num_msec); +} + +/*! + * env_register_isr + * + * Registers interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + * @param data - interrupt handler data (virtqueue) + */ +void env_register_isr(uint32_t vector_id, void *data) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = data; + } +} + +/*! + * env_unregister_isr + * + * Unregisters interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + */ +void env_unregister_isr(uint32_t vector_id) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = ((void *)0); + } +} + +/*! + * env_enable_interrupt + * + * Enables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_enable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_enable(vector_id); +} + +/*! + * env_disable_interrupt + * + * Disables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_disable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_disable(vector_id); +} + +/*! + * env_map_memory + * + * Enables memory mapping for given memory region. + * + * @param pa - physical address of memory + * @param va - logical address of memory + * @param size - memory size + * param flags - flags for cache/uncached and access type + */ + +void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags) +{ + platform_map_mem_region(va, pa, size, flags); +} + +/*! + * env_disable_cache + * + * Disables system caches. + * + */ + +void env_disable_cache(void) +{ + platform_cache_all_flush_invalidate(); + platform_cache_disable(); +} + +/*========================================================= */ +/* Util data / functions */ + +void env_isr(uint32_t vector) +{ + struct isr_info *info; + RL_ASSERT(vector < ISR_COUNT); + if (vector < ISR_COUNT) + { + info = &isr_table[vector]; + virtqueue_notification((struct virtqueue *)info->data); + } +} + +/* + * env_create_queue + * + * Creates a message queue. + * + * @param queue - pointer to created queue + * @param length - maximum number of elements in the queue + * @param element_size - queue element size in bytes + * @param queue_static_storage - pointer to queue static storage buffer + * @param queue_static_context - pointer to queue static context + * + * @return - status of function execution + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_queue(void **queue, + int32_t length, + int32_t element_size, + uint8_t *queue_static_storage, + rpmsg_static_queue_ctxt *queue_static_context) +#else +int32_t env_create_queue(void **queue, int32_t length, int32_t element_size) +#endif +{ + char *queue_ptr = ((void *)0); + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + queue_ptr = (char *)queue_static_storage; +#else + queue_ptr = (char *)env_allocate_memory(XOS_MSGQ_SIZE(length, element_size)); +#endif + if (queue_ptr != ((void *)0)) + { + if (XOS_OK == + xos_msgq_create((XosMsgQueue *)queue_ptr, (uint16_t)length, (uint32_t)element_size, XOS_MSGQ_WAIT_PRIORITY)) + { + *queue = (void *)queue_ptr; + return 0; + } + else + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(queue_ptr); +#endif + return -1; + } + } + return -1; +} + +/*! + * env_delete_queue + * + * Deletes the message queue. + * + * @param queue - queue to delete + */ + +void env_delete_queue(void *queue) +{ + xos_msgq_delete(queue); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(queue); +#endif +} + +/*! + * env_put_queue + * + * Put an element in a queue. + * + * @param queue - queue to put element in + * @param msg - pointer to the message to be put into the queue + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ + +int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + if (RL_BLOCK == timeout_ms) + { + /* If no space is available, this function will block if called from a thread, but will + return immediately if called from an interrupt handler. */ + if (XOS_OK == xos_msgq_put(queue, msg)) + { + return 1; + } + } + else + { + /* If no space is available, this function will block if called from a thread, but will + return immediately if called from an interrupt handler. */ + if (XOS_OK == xos_msgq_put_timeout(queue, msg, xos_msecs_to_cycles(timeout_ms))) + { + return 1; + } + } + return 0; +} + +/*! + * env_get_queue + * + * Get an element out of a queue. + * + * @param queue - queue to get element from + * @param msg - pointer to a memory to save the message + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ + +int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + if (RL_BLOCK == timeout_ms) + { + /* If no message is available, this function will block if called from a thread, but will return + immediately if called from an interrupt handler. */ + if (XOS_OK == xos_msgq_get(queue, msg)) + { + return 1; + } + } + else + { + /* If no message is available, this function will block if called from a thread, but will return + immediately if called from an interrupt handler. The thread will be unblocked when a message + arrives in the queue or the timeout expires. */ + if (XOS_OK == xos_msgq_get_timeout(queue, msg, xos_msecs_to_cycles(timeout_ms))) + { + return 1; + } + } + return 0; +} + +/*! + * env_get_current_queue_size + * + * Get current queue size. + * + * @param queue - queue pointer + * + * @return - Number of queued items in the queue + */ + +int32_t env_get_current_queue_size(void *queue) +{ + return ((int32_t)xos_msgq_empty(queue)); +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_zephyr.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_zephyr.c new file mode 100755 index 00000000..451f8b68 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_zephyr.c @@ -0,0 +1,715 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2022 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/************************************************************************** + * FILE NAME + * + * rpmsg_env_zephyr.c + * + * + * DESCRIPTION + * + * This file is Zephyr RTOS Implementation of env layer for OpenAMP. + * + * + **************************************************************************/ + +#include "rpmsg_compiler.h" +#include "rpmsg_env.h" +#include +#include "rpmsg_platform.h" +#include "virtqueue.h" + +#include +#include + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +/* RL_ENV_MAX_MUTEX_COUNT is an arbitrary count greater than 'count' + if the inital count is 1, this function behaves as a mutex + if it is greater than 1, it acts as a "resource allocator" with + the maximum of 'count' resources available. + Currently, only the first use-case is applicable/applied in RPMsg-Lite. + */ +#define RL_ENV_MAX_MUTEX_COUNT (10) + +static int32_t env_init_counter = 0; +static struct k_sem env_sema = {0}; +static struct k_event env_event = {0}; + +/* Max supported ISR counts */ +#define ISR_COUNT (32U) +/*! + * Structure to keep track of registered ISR's. + */ +struct isr_info +{ + void *data; +}; +static struct isr_info isr_table[ISR_COUNT]; + +/*! + * env_in_isr + * + * @returns - true, if currently in ISR + * + */ +static int32_t env_in_isr(void) +{ + return platform_in_isr(); +} + +/*! + * env_wait_for_link_up + * + * Wait until the link_state parameter of the rpmsg_lite_instance is set. + * Utilize events to avoid busy loop implementation. + * + */ +void env_wait_for_link_up(volatile uint32_t *link_state, uint32_t link_id) +{ + if (*link_state != 1U) + { + k_event_wait_all(&env_event, (1UL << link_id), false, K_FOREVER); + } +} + +/*! + * env_tx_callback + * + * Set event to notify task waiting in env_wait_for_link_up(). + * + */ +void env_tx_callback(uint32_t link_id) +{ + k_event_post(&env_event, (1UL << link_id)); +} + +/*! + * env_init + * + * Initializes OS/BM environment. + * + */ +int32_t env_init(void) +{ + int32_t retval; + k_sched_lock(); /* stop scheduler */ + /* verify 'env_init_counter' */ + RL_ASSERT(env_init_counter >= 0); + if (env_init_counter < 0) + { + k_sched_unlock(); /* re-enable scheduler */ + return -1; + } + env_init_counter++; + /* multiple call of 'env_init' - return ok */ + if (env_init_counter == 1) + { + /* first call */ + k_sem_init(&env_sema, 0, 1); + k_event_init(&env_event); + (void)memset(isr_table, 0, sizeof(isr_table)); + k_sched_unlock(); + retval = platform_init(); + k_sem_give(&env_sema); + + return retval; + } + else + { + k_sched_unlock(); + /* Get the semaphore and then return it, + * this allows for platform_init() to block + * if needed and other tasks to wait for the + * blocking to be done. + * This is in ENV layer as this is ENV specific.*/ + k_sem_take(&env_sema, K_FOREVER); + k_sem_give(&env_sema); + return 0; + } +} + +/*! + * env_deinit + * + * Uninitializes OS/BM environment. + * + * @returns - execution status + */ +int32_t env_deinit(void) +{ + int32_t retval; + + k_sched_lock(); /* stop scheduler */ + /* verify 'env_init_counter' */ + RL_ASSERT(env_init_counter > 0); + if (env_init_counter <= 0) + { + k_sched_unlock(); /* re-enable scheduler */ + return -1; + } + + /* counter on zero - call platform deinit */ + env_init_counter--; + /* multiple call of 'env_deinit' - return ok */ + if (env_init_counter <= 0) + { + /* last call */ + (void)memset(isr_table, 0, sizeof(isr_table)); + retval = platform_deinit(); + k_sem_reset(&env_sema); + k_sched_unlock(); + + return retval; + } + else + { + k_sched_unlock(); + return 0; + } +} + +/*! + * env_allocate_memory - implementation + * + * @param size + */ +void *env_allocate_memory(uint32_t size) +{ + return (k_malloc(size)); +} + +/*! + * env_free_memory - implementation + * + * @param ptr + */ +void env_free_memory(void *ptr) +{ + if (ptr != ((void *)0)) + { + k_free(ptr); + } +} + +/*! + * + * env_memset - implementation + * + * @param ptr + * @param value + * @param size + */ +void env_memset(void *ptr, int32_t value, uint32_t size) +{ + (void)memset(ptr, value, size); +} + +/*! + * + * env_memcpy - implementation + * + * @param dst + * @param src + * @param len + */ +void env_memcpy(void *dst, void const *src, uint32_t len) +{ + (void)memcpy(dst, src, len); +} + +/*! + * + * env_strcmp - implementation + * + * @param dst + * @param src + */ + +int32_t env_strcmp(const char *dst, const char *src) +{ + return (strcmp(dst, src)); +} + +/*! + * + * env_strncpy - implementation + * + * @param dest + * @param src + * @param len + */ +void env_strncpy(char *dest, const char *src, uint32_t len) +{ + (void)strncpy(dest, src, len); +} + +/*! + * + * env_strncmp - implementation + * + * @param dest + * @param src + * @param len + */ +int32_t env_strncmp(char *dest, const char *src, uint32_t len) +{ + return (strncmp(dest, src, len)); +} + +/*! + * + * env_mb - implementation + * + */ +void env_mb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_rmb - implementation + */ +void env_rmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_wmb - implementation + */ +void env_wmb(void) +{ + MEM_BARRIER(); +} + +/*! + * env_map_vatopa - implementation + * + * @param address + */ +uint32_t env_map_vatopa(void *address) +{ + return platform_vatopa(address); +} + +/*! + * env_map_patova - implementation + * + * @param address + */ +void *env_map_patova(uint32_t address) +{ + return platform_patova(address); +} + +/*! + * env_create_mutex + * + * Creates a mutex with the given initial count. + * + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_mutex(void **lock, int32_t count, void *context) +#else +int32_t env_create_mutex(void **lock, int32_t count) +#endif +{ + struct k_sem *semaphore_ptr; + + if (count > RL_ENV_MAX_MUTEX_COUNT) + { + return -1; + } + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + semaphore_ptr = (struct k_sem *)context; +#else + semaphore_ptr = (struct k_sem *)env_allocate_memory(sizeof(struct k_sem)); +#endif + if (semaphore_ptr == ((void *)0)) + { + return -1; + } + + k_sem_init(semaphore_ptr, count, RL_ENV_MAX_MUTEX_COUNT); + /* Becasue k_sem_init() does not return any status, we do not know if all is OK or not. + If something would not be OK dynamically allocated memory has to be freed here. */ + + *lock = (void *)semaphore_ptr; + return 0; +} + +/*! + * env_delete_mutex + * + * Deletes the given lock + * + */ +void env_delete_mutex(void *lock) +{ + k_sem_reset(lock); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(lock); +#endif +} + +/*! + * env_lock_mutex + * + * Tries to acquire the lock, if lock is not available then call to + * this function will suspend. + */ +void env_lock_mutex(void *lock) +{ + if (env_in_isr() == 0) + { + k_sem_take((struct k_sem *)lock, K_FOREVER); + } +} + +/*! + * env_unlock_mutex + * + * Releases the given lock. + */ +void env_unlock_mutex(void *lock) +{ + if (env_in_isr() == 0) + { + k_sem_give((struct k_sem *)lock); + } +} + +/*! + * env_create_sync_lock + * + * Creates a synchronization lock primitive. It is used + * when signal has to be sent from the interrupt context to main + * thread context. + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_sync_lock(void **lock, int32_t state, void *context) +{ + return env_create_mutex(lock, state, context); /* state=1 .. initially free */ +} +#else +int32_t env_create_sync_lock(void **lock, int32_t state) +{ + return env_create_mutex(lock, state); /* state=1 .. initially free */ +} +#endif + +/*! + * env_delete_sync_lock + * + * Deletes the given lock + * + */ +void env_delete_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_delete_mutex(lock); + } +} + +/*! + * env_acquire_sync_lock + * + * Tries to acquire the lock, if lock is not available then call to + * this function waits for lock to become available. + */ +void env_acquire_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_lock_mutex(lock); + } +} + +/*! + * env_release_sync_lock + * + * Releases the given lock. + */ +void env_release_sync_lock(void *lock) +{ + if (lock != ((void *)0)) + { + env_unlock_mutex(lock); + } +} + +/*! + * env_sleep_msec + * + * Suspends the calling thread for given time , in msecs. + */ +void env_sleep_msec(uint32_t num_msec) +{ + k_sleep(num_msec); +} + +/*! + * env_register_isr + * + * Registers interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + * @param data - interrupt handler data (virtqueue) + */ +void env_register_isr(uint32_t vector_id, void *data) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = data; + } +} + +/*! + * env_unregister_isr + * + * Unregisters interrupt handler data for the given interrupt vector. + * + * @param vector_id - virtual interrupt vector number + */ +void env_unregister_isr(uint32_t vector_id) +{ + RL_ASSERT(vector_id < ISR_COUNT); + if (vector_id < ISR_COUNT) + { + isr_table[vector_id].data = ((void *)0); + } +} + +/*! + * env_enable_interrupt + * + * Enables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_enable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_enable(vector_id); +} + +/*! + * env_disable_interrupt + * + * Disables the given interrupt + * + * @param vector_id - virtual interrupt vector number + */ + +void env_disable_interrupt(uint32_t vector_id) +{ + (void)platform_interrupt_disable(vector_id); +} + +/*! + * env_map_memory + * + * Enables memory mapping for given memory region. + * + * @param pa - physical address of memory + * @param va - logical address of memory + * @param size - memory size + * param flags - flags for cache/uncached and access type + */ + +void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags) +{ + platform_map_mem_region(va, pa, size, flags); +} + +/*! + * env_disable_cache + * + * Disables system caches. + * + */ + +void env_disable_cache(void) +{ + platform_cache_all_flush_invalidate(); + platform_cache_disable(); +} + +/*========================================================= */ +/* Util data / functions */ + +void env_isr(uint32_t vector) +{ + struct isr_info *info; + RL_ASSERT(vector < ISR_COUNT); + if (vector < ISR_COUNT) + { + info = &isr_table[vector]; + virtqueue_notification((struct virtqueue *)info->data); + } +} + +/* + * env_create_queue + * + * Creates a message queue. + * + * @param queue - pointer to created queue + * @param length - maximum number of elements in the queue + * @param element_size - queue element size in bytes + * @param queue_static_storage - pointer to queue static storage buffer + * @param queue_static_context - pointer to queue static context + * + * @return - status of function execution + */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +int32_t env_create_queue(void **queue, + int32_t length, + int32_t element_size, + uint8_t *queue_static_storage, + rpmsg_static_queue_ctxt *queue_static_context) +#else +int32_t env_create_queue(void **queue, int32_t length, int32_t element_size) +#endif +{ + struct k_msgq *queue_ptr = ((void *)0); + char *msgq_buffer_ptr = ((void *)0); + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + queue_ptr = (struct k_msgq *)queue_static_context; + msgq_buffer_ptr = (char *)queue_static_storage; +#else + queue_ptr = (struct k_msgq *)env_allocate_memory(sizeof(struct k_msgq)); + msgq_buffer_ptr = (char *)env_allocate_memory(length * element_size); +#endif + if ((queue_ptr == ((void *)0)) || (msgq_buffer_ptr == ((void *)0))) + { + return -1; + } + k_msgq_init(queue_ptr, msgq_buffer_ptr, element_size, length); + /* Becasue k_msgq_init() does not return any status, we do not know if all is OK or not. + If something would not be OK dynamically allocated memory has to be freed here. */ + + *queue = (void *)queue_ptr; + return 0; +} + +/*! + * env_delete_queue + * + * Deletes the message queue. + * + * @param queue - queue to delete + */ + +void env_delete_queue(void *queue) +{ + k_msgq_purge((struct k_msgq *)queue); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(((struct k_msgq *)queue)->buffer_start); + env_free_memory(queue); +#endif +} + +/*! + * env_put_queue + * + * Put an element in a queue. + * + * @param queue - queue to put element in + * @param msg - pointer to the message to be put into the queue + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ + +int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + if (env_in_isr() != 0) + { + timeout_ms = 0; /* force timeout == 0 when in ISR */ + } + + if (0 == k_msgq_put((struct k_msgq *)queue, msg, timeout_ms)) + { + return 1; + } + return 0; +} + +/*! + * env_get_queue + * + * Get an element out of a queue. + * + * @param queue - queue to get element from + * @param msg - pointer to a memory to save the message + * @param timeout_ms - timeout in ms + * + * @return - status of function execution + */ + +int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms) +{ + if (env_in_isr() != 0) + { + timeout_ms = 0; /* force timeout == 0 when in ISR */ + } + + if (0 == k_msgq_get((struct k_msgq *)queue, msg, timeout_ms)) + { + return 1; + } + return 0; +} + +/*! + * env_get_current_queue_size + * + * Get current queue size. + * + * @param queue - queue pointer + * + * @return - Number of queued items in the queue + */ + +int32_t env_get_current_queue_size(void *queue) +{ + return k_msgq_num_used_get((struct k_msgq *)queue); +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx6sx_m4/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx6sx_m4/rpmsg_platform.c new file mode 100755 index 00000000..b4aaa290 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx6sx_m4/rpmsg_platform.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "board.h" +#include "mu_imx.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { + MU_EnableRxFullInt(MUB, RPMSG_MU_CHANNEL); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + MU_DisableRxFullInt(MUB, RPMSG_MU_CHANNEL); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* As Linux suggests, use MU->Data Channle 1 as communication channel */ + uint32_t msg = (RL_GET_Q_ID(vector_id)) << 16; + + env_lock_mutex(platform_lock); + MU_SendMsg(MUB, RPMSG_MU_CHANNEL, msg); + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +void rpmsg_handler(void) +{ + uint32_t msg, channel; + + if (MU_TryReceiveMsg(MUB, RPMSG_MU_CHANNEL, &msg) == kStatus_MU_Success) + { + channel = msg >> 16; + env_isr(channel); + } + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + + return; +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MU_M4_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MU_M4_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + MU_Init(BOARD_MU_BASE_ADDR); + NVIC_SetPriority(BOARD_MU_IRQ_NUM, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(BOARD_MU_IRQ_NUM); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx6sx_m4/rpmsg_platform_zephyr_ipm.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx6sx_m4/rpmsg_platform_zephyr_ipm.c new file mode 100755 index 00000000..c946a578 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx6sx_m4/rpmsg_platform_zephyr_ipm.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" +#include + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif +static struct device *ipm_handle = ((void *)0); + +void platform_ipm_callback(void *context, u32_t id, volatile void *data) +{ + if (id != RPMSG_MU_CHANNEL) + { + return; + } + + /* Data to be transmitted from Master */ + if (*(uint32_t *)data == 0U) + { + env_isr(0); + } + + /* Data to be received from Master */ + if (*(uint32_t *)data == 0x10000U) + { + env_isr(1); + } +} + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if ((isr_counter == 0) && (ipm_handle != ((void *)0))) + { + ipm_set_enabled(ipm_handle, 0); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + int32_t status; + switch (RL_GET_LINK_ID(vector_id)) + { + case RL_PLATFORM_IMX6SX_M4_LINK_ID: + env_lock_mutex(platform_lock); + uint32_t data = (RL_GET_Q_ID(vector_id) << 16); + RL_ASSERT(ipm_handle); + do + { + status = ipm_send(ipm_handle, 0, RPMSG_MU_CHANNEL, &data, sizeof(uint32_t)); + } while (status == EBUSY); + env_unlock_mutex(platform_lock); + return; + + default: + return; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (0 != k_is_in_isr()); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if ((disable_counter == 0) && (ipm_handle != ((void *)0))) + { + ipm_set_enabled(ipm_handle, 1); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if ((disable_counter == 0) && (ipm_handle != ((void *)0))) + { + ipm_set_enabled(ipm_handle, 0); + } + + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* Get IPM device handle */ + ipm_handle = device_get_binding(DT_NXP_IMX_MU_MU_B_LABEL); + if (!ipm_handle) + { + return -1; + } + + /* Register application callback with no context */ + ipm_register_callback(ipm_handle, platform_ipm_callback, ((void *)0)); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7d_m4/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7d_m4/rpmsg_platform.c new file mode 100755 index 00000000..b4aaa290 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7d_m4/rpmsg_platform.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "board.h" +#include "mu_imx.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { + MU_EnableRxFullInt(MUB, RPMSG_MU_CHANNEL); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + MU_DisableRxFullInt(MUB, RPMSG_MU_CHANNEL); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* As Linux suggests, use MU->Data Channle 1 as communication channel */ + uint32_t msg = (RL_GET_Q_ID(vector_id)) << 16; + + env_lock_mutex(platform_lock); + MU_SendMsg(MUB, RPMSG_MU_CHANNEL, msg); + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +void rpmsg_handler(void) +{ + uint32_t msg, channel; + + if (MU_TryReceiveMsg(MUB, RPMSG_MU_CHANNEL, &msg) == kStatus_MU_Success) + { + channel = msg >> 16; + env_isr(channel); + } + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + + return; +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MU_M4_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MU_M4_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + MU_Init(BOARD_MU_BASE_ADDR); + NVIC_SetPriority(BOARD_MU_IRQ_NUM, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(BOARD_MU_IRQ_NUM); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7d_m4/rpmsg_platform_zephyr_ipm.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7d_m4/rpmsg_platform_zephyr_ipm.c new file mode 100755 index 00000000..5458d347 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7d_m4/rpmsg_platform_zephyr_ipm.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" +#include + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif +static struct device *ipm_handle = ((void *)0); + +void platform_ipm_callback(void *context, u32_t id, volatile void *data) +{ + if (id != RPMSG_MU_CHANNEL) + { + return; + } + + /* Data to be transmitted from Master */ + if (*(uint32_t *)data == 0U) + { + env_isr(0); + } + + /* Data to be received from Master */ + if (*(uint32_t *)data == 0x10000U) + { + env_isr(1); + } +} + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if ((isr_counter == 0) && (ipm_handle != ((void *)0))) + { + ipm_set_enabled(ipm_handle, 0); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + int32_t status; + switch (RL_GET_LINK_ID(vector_id)) + { + case RL_PLATFORM_IMX7D_M4_LINK_ID: + env_lock_mutex(platform_lock); + uint32_t data = (RL_GET_Q_ID(vector_id) << 16); + RL_ASSERT(ipm_handle); + do + { + status = ipm_send(ipm_handle, 0, RPMSG_MU_CHANNEL, &data, sizeof(uint32_t)); + } while (status == EBUSY); + env_unlock_mutex(platform_lock); + return; + + default: + return; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (0 != k_is_in_isr()); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if ((disable_counter == 0) && (ipm_handle != ((void *)0))) + { + ipm_set_enabled(ipm_handle, 1); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if ((disable_counter == 0) && (ipm_handle != ((void *)0))) + { + ipm_set_enabled(ipm_handle, 0); + } + + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* Get IPM device handle */ + ipm_handle = device_get_binding(DT_NXP_IMX_MU_MU_B_LABEL); + if (!ipm_handle) + { + return -1; + } + + /* Register application callback with no context */ + ipm_register_callback(ipm_handle, platform_ipm_callback, ((void *)0)); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7ulp_m4/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7ulp_m4/rpmsg_platform.c new file mode 100755 index 00000000..e21f4dec --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx7ulp_m4/rpmsg_platform.c @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * Copyright 2021 ACRIOS Systems s.r.o. + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) +#define APP_MU_A7_SIDE_READY (0x1U) +#define APP_MU_A7_WAIT_INTERVAL_MS (10U) + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { + MU_EnableInterrupts(MUA, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + MU_DisableInterrupts(MUA, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* As Linux suggests, use MU->Data Channel 1 as communication channel */ + uint32_t msg = (uint32_t)(vector_id << 16); + + env_lock_mutex(platform_lock); + MU_SendMsg(MUA, RPMSG_MU_CHANNEL, msg); + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +int32_t MU_A_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(MUA)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(MUA, RPMSG_MU_CHANNEL); // Read message from RX register. + env_isr(channel >> 16); + } + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + + return 0; +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MU_A_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MU_A_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + MU_Init(MUA); + NVIC_SetPriority(MU_A_IRQn, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(MU_A_IRQn); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mm_m4/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mm_m4/rpmsg_platform.c new file mode 100755 index 00000000..312576a0 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mm_m4/rpmsg_platform.c @@ -0,0 +1,293 @@ +/* + * Copyright 2017-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { + MU_EnableInterrupts(MUB, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + MU_DisableInterrupts(MUB, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* As Linux suggests, use MU->Data Channel 1 as communication channel */ + uint32_t msg = (uint32_t)(vector_id << 16); + + env_lock_mutex(platform_lock); + MU_SendMsg(MUB, RPMSG_MU_CHANNEL, msg); + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +int32_t MU_M4_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(MUB)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(MUB, RPMSG_MU_CHANNEL); // Read message from RX register. + env_isr(channel >> 16); + } + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + + return 0; +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MU_M4_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MU_M4_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + MU_Init(MUB); + NVIC_SetPriority(MU_M4_IRQn, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(MU_M4_IRQn); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mn_m7/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mn_m7/rpmsg_platform.c new file mode 100755 index 00000000..33344d2a --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mn_m7/rpmsg_platform.c @@ -0,0 +1,293 @@ +/* + * Copyright 2017-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { + MU_EnableInterrupts(MUB, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + MU_DisableInterrupts(MUB, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* As Linux suggests, use MU->Data Channel 1 as communication channel */ + uint32_t msg = (uint32_t)(vector_id << 16); + + env_lock_mutex(platform_lock); + MU_SendMsg(MUB, RPMSG_MU_CHANNEL, msg); + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +int32_t MU_M7_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(MUB)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(MUB, RPMSG_MU_CHANNEL); // Read message from RX register. + env_isr(channel >> 16); + } + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + + return 0; +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MU_M7_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MU_M7_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + MU_Init(MUB); + NVIC_SetPriority(MU_M7_IRQn, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(MU_M7_IRQn); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mp_m7/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mp_m7/rpmsg_platform.c new file mode 100755 index 00000000..3afc77d7 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mp_m7/rpmsg_platform.c @@ -0,0 +1,293 @@ +/* + * Copyright 2019-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { + MU_EnableInterrupts(MUB, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + MU_DisableInterrupts(MUB, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* As Linux suggests, use MU->Data Channel 1 as communication channel */ + uint32_t msg = (uint32_t)(vector_id << 16); + + env_lock_mutex(platform_lock); + MU_SendMsg(MUB, RPMSG_MU_CHANNEL, msg); + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +int32_t MU1_M7_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(MUB)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(MUB, RPMSG_MU_CHANNEL); // Read message from RX register. + env_isr(channel >> 16); + } + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + + return 0; +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MU1_M7_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MU1_M7_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + MU_Init(MUB); + NVIC_SetPriority(MU1_M7_IRQn, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(MU1_M7_IRQn); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mq_m4/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mq_m4/rpmsg_platform.c new file mode 100755 index 00000000..9f57207e --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8mq_m4/rpmsg_platform.c @@ -0,0 +1,285 @@ +/* + * Copyright 2017-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { + MU_EnableInterrupts(MUB, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + MU_DisableInterrupts(MUB, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* As Linux suggests, use MU->Data Channel 1 as communication channel */ + uint32_t msg = (uint32_t)(vector_id << 16); + + env_lock_mutex(platform_lock); + MU_SendMsg(MUB, RPMSG_MU_CHANNEL, msg); + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +int32_t MU_M4_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(MUB)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(MUB, RPMSG_MU_CHANNEL); // Read message from RX register. + env_isr(channel >> 16); + } + + return 0; +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MU_M4_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MU_M4_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + MU_Init(MUB); + NVIC_SetPriority(MU_M4_IRQn, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(MU_M4_IRQn); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8qm_m4/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8qm_m4/rpmsg_platform.c new file mode 100755 index 00000000..645d65c7 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8qm_m4/rpmsg_platform.c @@ -0,0 +1,492 @@ +/* + * Copyright 2017-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" +#include "fsl_irqsteer.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +/* The MU instance used for CM4 and A core communication */ +#if defined(MIMX8QM_CM4_CORE0) +#define APP_M4_A_MU LSIO__MU5_B +#define APP_M4_A_MU_IRQn LSIO_MU5_INT_B_IRQn +#elif defined(MIMX8QM_CM4_CORE1) +#define APP_M4_A_MU LSIO__MU6_B +#define APP_M4_A_MU_IRQn LSIO_MU6_INT_B_IRQn +#else +#error "Unsupported CPU core!" +#endif + +/* The MU instance used for the communication between two CM4 cores */ +/* + ------------------------------------------------------- + Platform | M4_0 PT | M4_1 PT | A core PT + i.MX8QM/QX | LSIO MU5_B | - | LSIO MU5_A + i.MX8QM | - | LSIO MU6_B | LSIO MU6_A + i.MX8QM | LSIO MU7_A | LSIO MU7_B | - + ------------------------------------------------------- +*/ +#if defined(MIMX8QM_CM4_CORE0) +#define APP_M4_M4_MU LSIO__MU7_A +#define APP_M4_M4_MU_IRQn LSIO_MU7_INT_A_IRQn +#elif defined(MIMX8QM_CM4_CORE1) +#define APP_M4_M4_MU LSIO__MU7_B +#define APP_M4_M4_MU_IRQn LSIO_MU7_INT_B_IRQn +#endif + +#define APP_M4_MU_NVIC_IRQn IRQSTEER_3_IRQn + +/* NVIC IRQn that correspond to the LSIO MU IRQn is obtained with the following + * calculation: + * + * NVIC_IRQn = IRQSTEER_0_IRQn + (LSIO_MU_IRQn - FSL_FEATURE_IRQSTEER_IRQ_START_INDEX) / 64 + * + * LSIO_MU_IRQn min = LSIO_MU0_INT_IRQn = 259 + * LSIO_MU_IRQn max = LSIO_MU13_INT_B_IRQn = 291 + * + * With all the LSIO MUs, the NVIC_IRQn = 35, that corresponds to IRQSTEER_3_IRQn + */ + +static int32_t isr_counter0 = 0; /* RL_PLATFORM_IMX8QM_M4_A_USER_LINK_ID isr counter */ +static int32_t isr_counter1 = 0; /* RL_PLATFORM_IMX8QM_M4_M4_USER_LINK_ID isr counter */ +static int32_t disable_counter0 = 0; +static int32_t disable_counter1 = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + switch (RL_GET_COM_ID(vector_id)) + { + case RL_PLATFORM_IMX8QM_M4_A_COM_ID: + RL_ASSERT(0 <= isr_counter0); + if (isr_counter0 == 0) + { + MU_EnableInterrupts(APP_M4_A_MU, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + isr_counter0++; + break; + case RL_PLATFORM_IMX8QM_M4_M4_COM_ID: + RL_ASSERT(0 <= isr_counter1); + if (isr_counter1 == 0) + { + MU_EnableInterrupts(APP_M4_M4_MU, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + isr_counter1++; + break; + default: + /* All the cases have been listed above, the default clause should not be reached. */ + break; + } + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + switch (RL_GET_COM_ID(vector_id)) + { + case RL_PLATFORM_IMX8QM_M4_A_COM_ID: + RL_ASSERT(0 < isr_counter0); + isr_counter0--; + if (isr_counter0 == 0) + { + MU_DisableInterrupts(APP_M4_A_MU, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + break; + case RL_PLATFORM_IMX8QM_M4_M4_COM_ID: + RL_ASSERT(0 < isr_counter1); + isr_counter1--; + if (isr_counter1 == 0) + { + MU_DisableInterrupts(APP_M4_M4_MU, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + break; + default: + /* All the cases have been listed above, the default clause should not be reached. */ + break; + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* Only vring id and queue id is needed in msg */ + uint32_t msg = RL_GEN_MU_MSG(vector_id); + + env_lock_mutex(platform_lock); + /* As Linux suggests, use MU->Data Channel 1 as communication channel */ + switch (RL_GET_COM_ID(vector_id)) + { + case RL_PLATFORM_IMX8QM_M4_A_COM_ID: + MU_SendMsg(APP_M4_A_MU, RPMSG_MU_CHANNEL, msg); + break; + case RL_PLATFORM_IMX8QM_M4_M4_COM_ID: + MU_SendMsg(APP_M4_M4_MU, RPMSG_MU_CHANNEL, msg); + break; + default: + /* All the cases have been listed above, the default clause should not be reached. */ + break; + } + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +#if defined(MIMX8QM_CM4_CORE0) +int32_t LSIO_MU5_INT_B_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(APP_M4_A_MU)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(APP_M4_A_MU, RPMSG_MU_CHANNEL); /* Read message from RX register. */ + env_isr((uint32_t)((channel >> 16) | (RL_PLATFORM_IMX8QM_M4_A_COM_ID << 3))); + } + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + + return 0; +} + +#elif defined(MIMX8QM_CM4_CORE1) +int32_t LSIO_MU6_INT_B_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(APP_M4_A_MU)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(APP_M4_A_MU, RPMSG_MU_CHANNEL); /* Read message from RX register. */ + env_isr((uint32_t)((channel >> 16) | (RL_PLATFORM_IMX8QM_M4_A_COM_ID << 3))); + } + + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + return 0; +} +#endif + +#if defined(MIMX8QM_CM4_CORE0) +int32_t LSIO_MU7_INT_A_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(APP_M4_M4_MU)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(APP_M4_M4_MU, RPMSG_MU_CHANNEL); /* Read message from RX register. */ + env_isr((uint32_t)((channel >> 16) | (RL_PLATFORM_IMX8QM_M4_M4_COM_ID << 3))); + } + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + + return 0; +} +#elif defined(MIMX8QM_CM4_CORE1) +int32_t LSIO_MU7_INT_B_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(APP_M4_M4_MU)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(APP_M4_M4_MU, RPMSG_MU_CHANNEL); /* Read message from RX register. */ + env_isr((uint32_t)((channel >> 16) | (RL_PLATFORM_IMX8QM_M4_M4_COM_ID << 3))); + } + + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + + return 0; +} +#endif +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + platform_global_isr_disable(); + + switch (RL_GET_COM_ID(vector_id)) + { + case RL_PLATFORM_IMX8QM_M4_A_COM_ID: + RL_ASSERT(0 < disable_counter0); + disable_counter0--; + if (disable_counter0 == 0) + { + NVIC_EnableIRQ(APP_M4_MU_NVIC_IRQn); + } + break; + case RL_PLATFORM_IMX8QM_M4_M4_COM_ID: + RL_ASSERT(0 < disable_counter1); + disable_counter1--; + if (disable_counter1 == 0) + { + NVIC_EnableIRQ(APP_M4_MU_NVIC_IRQn); + } + break; + default: + /* All the cases have been listed above, the default clause should not be reached. */ + break; + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + switch (RL_GET_COM_ID(vector_id)) + { + case RL_PLATFORM_IMX8QM_M4_A_COM_ID: + RL_ASSERT(0 <= disable_counter0); + if (disable_counter0 == 0) + { + NVIC_DisableIRQ(APP_M4_MU_NVIC_IRQn); + } + disable_counter0++; + break; + case RL_PLATFORM_IMX8QM_M4_M4_COM_ID: + RL_ASSERT(0 <= disable_counter1); + if (disable_counter1 == 0) + { + NVIC_DisableIRQ(APP_M4_MU_NVIC_IRQn); + } + disable_counter1++; + break; + default: + /* All the cases have been listed above, the default clause should not be reached. */ + break; + } + + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + MU_Init(APP_M4_A_MU); + NVIC_SetPriority(APP_M4_MU_NVIC_IRQn, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(APP_M4_MU_NVIC_IRQn); + IRQSTEER_EnableInterrupt(IRQSTEER, APP_M4_A_MU_IRQn); + + /* Prepare for the MU Interrupt for the MU used between two M4s*/ + MU_Init(APP_M4_M4_MU); + NVIC_SetPriority(APP_M4_MU_NVIC_IRQn, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(APP_M4_MU_NVIC_IRQn); + IRQSTEER_EnableInterrupt(IRQSTEER, APP_M4_M4_MU_IRQn); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + MU_Deinit(APP_M4_A_MU); + IRQSTEER_DisableInterrupt(IRQSTEER, APP_M4_A_MU_IRQn); + MU_Deinit(APP_M4_M4_MU); + IRQSTEER_DisableInterrupt(IRQSTEER, APP_M4_M4_MU_IRQn); + + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8qx_cm4/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8qx_cm4/rpmsg_platform.c new file mode 100755 index 00000000..034dcfb3 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imx8qx_cm4/rpmsg_platform.c @@ -0,0 +1,306 @@ +/* + * Copyright 2017-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#include "fsl_irqsteer.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +#define APP_M4_MU LSIO__MU5_B +#define APP_M4_MU_IRQn LSIO_MU5_INT_B_IRQn +#define APP_M4_MU_NVIC_IRQn IRQSTEER_3_IRQn + +/* NVIC IRQn that correspond to the LSIO MU IRQn is obtained with the following + * calculation: + * + * NVIC_IRQn = IRQSTEER_0_IRQn + (LSIO_MU_IRQn - FSL_FEATURE_IRQSTEER_IRQ_START_INDEX) / 64 + * + * LSIO_MU_IRQn min = LSIO_MU0_INT_IRQn = 259 + * LSIO_MU_IRQn max = LSIO_MU13_INT_B_IRQn = 291 + * + * With all the LSIO MUs, the NVIC_IRQn = 35, that corresponds to IRQSTEER_3_IRQn + */ + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { + MU_EnableInterrupts(APP_M4_MU, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + MU_DisableInterrupts(APP_M4_MU, (1UL << 27U) >> RPMSG_MU_CHANNEL); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* As Linux suggests, use MU->Data Channel 1 as communication channel */ + uint32_t msg = (uint32_t)(vector_id << 16); + + env_lock_mutex(platform_lock); + MU_SendMsg(APP_M4_MU, RPMSG_MU_CHANNEL, msg); + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +int32_t LSIO_MU5_INT_B_IRQHandler(void) +{ + uint32_t channel; + + if ((((1UL << 27U) >> RPMSG_MU_CHANNEL) & MU_GetStatusFlags(APP_M4_MU)) != 0UL) + { + channel = MU_ReceiveMsgNonBlocking(APP_M4_MU, RPMSG_MU_CHANNEL); // Read message from RX register. + env_isr((uint32_t)(channel >> 16)); + } + + return 0; +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(APP_M4_MU_NVIC_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(APP_M4_MU_NVIC_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + MU_Init(APP_M4_MU); + NVIC_SetPriority(APP_M4_MU_NVIC_IRQn, APP_MU_IRQ_PRIORITY); + IRQSTEER_EnableInterrupt(IRQSTEER, APP_M4_MU_IRQn); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + MU_Deinit(APP_M4_MU); + IRQSTEER_DisableInterrupt(IRQSTEER, APP_M4_MU_IRQn); + + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt1160/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt1160/rpmsg_platform.c new file mode 100755 index 00000000..4cf54a97 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt1160/rpmsg_platform.c @@ -0,0 +1,357 @@ +/* + * Copyright 2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +#include "mcmgr.h" +#endif + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +static void mcmgr_event_handler(uint16_t vring_idx, void *context) +{ + env_isr((uint32_t)vring_idx); +} +#else +static void mu_isr(MU_Type *base) +{ + uint32_t flags; + flags = MU_GetStatusFlags(base); + if (((uint32_t)kMU_GenInt0Flag & flags) != 0UL) + { + MU_ClearStatusFlags(base, (uint32_t)kMU_GenInt0Flag); + env_isr(0); + } + if (((uint32_t)kMU_GenInt1Flag & flags) != 0UL) + { + MU_ClearStatusFlags(base, (uint32_t)kMU_GenInt1Flag); + env_isr(1); + } +} + +#if defined(FSL_FEATURE_MU_SIDE_A) +int32_t MUA_IRQHandler(void) +{ + mu_isr(MUA); + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + return 0; +} +#elif defined(FSL_FEATURE_MU_SIDE_B) +int32_t MUB_IRQHandler(void) +{ + mu_isr(MUB); + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + return 0; +} +#endif +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter < 2) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + MU_EnableInterrupts(MUA, 1UL << (31UL - vector_id)); +#elif defined(FSL_FEATURE_MU_SIDE_B) + MU_EnableInterrupts(MUB, 1UL << (31UL - vector_id)); +#endif + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter < 2) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + MU_DisableInterrupts(MUA, 1UL << (31UL - vector_id)); +#elif defined(FSL_FEATURE_MU_SIDE_B) + MU_DisableInterrupts(MUB, 1UL << (31UL - vector_id)); +#endif + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + env_lock_mutex(platform_lock); +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + (void)MCMGR_TriggerEventForce(kMCMGR_RemoteRPMsgEvent, (uint16_t)RL_GET_Q_ID(vector_id)); +#else +/* Write directly into the MU Control Register to trigger General Purpose Interrupt Request (GIR). + No need to wait until the previous interrupt is processed because the same value + of the virtqueue ID is used for GIR mask when triggering the ISR for the receiver side. + The whole queue of received buffers for associated virtqueue is then handled in the ISR + on the receiver side. */ +#if defined(FSL_FEATURE_MU_SIDE_A) + (void)MU_TriggerInterrupts(MUA, 1UL << (19UL - RL_GET_Q_ID(vector_id))); +#elif defined(FSL_FEATURE_MU_SIDE_B) + (void)MU_TriggerInterrupts(MUB, 1UL << (19UL - RL_GET_Q_ID(vector_id))); +#endif +#endif + env_unlock_mutex(platform_lock); +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency - not implemented in system_MIMXRT1166_cm[4,7].c */ + /* SystemCoreClockUpdate(); */ + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + NVIC_EnableIRQ(MUA_IRQn); +#elif defined(FSL_FEATURE_MU_SIDE_B) + NVIC_EnableIRQ(MUB_IRQn); +#endif + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + NVIC_DisableIRQ(MUA_IRQn); + NVIC_SetPriority(MUA_IRQn, 2); +#elif defined(FSL_FEATURE_MU_SIDE_B) + NVIC_DisableIRQ(MUB_IRQn); +#endif + } + + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* The MU peripheral driver is not initialized here because it covers also + the secondary core booting controls and it needs to be initialized earlier + in the application code */ + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + mcmgr_status_t retval = kStatus_MCMGR_Error; + retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0)); + if (kStatus_MCMGR_Success != retval) + { + return -1; + } +#endif + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt1170/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt1170/rpmsg_platform.c new file mode 100755 index 00000000..c98d7e0a --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt1170/rpmsg_platform.c @@ -0,0 +1,357 @@ +/* + * Copyright 2019-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +#include "mcmgr.h" +#endif + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +static void mcmgr_event_handler(uint16_t vring_idx, void *context) +{ + env_isr((uint32_t)vring_idx); +} +#else +static void mu_isr(MU_Type *base) +{ + uint32_t flags; + flags = MU_GetStatusFlags(base); + if (((uint32_t)kMU_GenInt0Flag & flags) != 0UL) + { + MU_ClearStatusFlags(base, (uint32_t)kMU_GenInt0Flag); + env_isr(0); + } + if (((uint32_t)kMU_GenInt1Flag & flags) != 0UL) + { + MU_ClearStatusFlags(base, (uint32_t)kMU_GenInt1Flag); + env_isr(1); + } +} + +#if defined(FSL_FEATURE_MU_SIDE_A) +int32_t MUA_IRQHandler(void) +{ + mu_isr(MUA); + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + return 0; +} +#elif defined(FSL_FEATURE_MU_SIDE_B) +int32_t MUB_IRQHandler(void) +{ + mu_isr(MUB); + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + return 0; +} +#endif +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter < 2) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + MU_EnableInterrupts(MUA, 1UL << (31UL - vector_id)); +#elif defined(FSL_FEATURE_MU_SIDE_B) + MU_EnableInterrupts(MUB, 1UL << (31UL - vector_id)); +#endif + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter < 2) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + MU_DisableInterrupts(MUA, 1UL << (31UL - vector_id)); +#elif defined(FSL_FEATURE_MU_SIDE_B) + MU_DisableInterrupts(MUB, 1UL << (31UL - vector_id)); +#endif + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + env_lock_mutex(platform_lock); +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + (void)MCMGR_TriggerEventForce(kMCMGR_RemoteRPMsgEvent, (uint16_t)RL_GET_Q_ID(vector_id)); +#else +/* Write directly into the MU Control Register to trigger General Purpose Interrupt Request (GIR). + No need to wait until the previous interrupt is processed because the same value + of the virtqueue ID is used for GIR mask when triggering the ISR for the receiver side. + The whole queue of received buffers for associated virtqueue is then handled in the ISR + on the receiver side. */ +#if defined(FSL_FEATURE_MU_SIDE_A) + (void)MU_TriggerInterrupts(MUA, 1UL << (19UL - RL_GET_Q_ID(vector_id))); +#elif defined(FSL_FEATURE_MU_SIDE_B) + (void)MU_TriggerInterrupts(MUB, 1UL << (19UL - RL_GET_Q_ID(vector_id))); +#endif +#endif + env_unlock_mutex(platform_lock); +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency - not implemented in system_MIMXRT1176_cm[4,7].c */ + /* SystemCoreClockUpdate(); */ + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + NVIC_EnableIRQ(MUA_IRQn); +#elif defined(FSL_FEATURE_MU_SIDE_B) + NVIC_EnableIRQ(MUB_IRQn); +#endif + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + NVIC_DisableIRQ(MUA_IRQn); + NVIC_SetPriority(MUA_IRQn, 2); +#elif defined(FSL_FEATURE_MU_SIDE_B) + NVIC_DisableIRQ(MUB_IRQn); +#endif + } + + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* The MU peripheral driver is not initialized here because it covers also + the secondary core booting controls and it needs to be initialized earlier + in the application code */ + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + mcmgr_status_t retval = kStatus_MCMGR_Error; + retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0)); + if (kStatus_MCMGR_Success != retval) + { + return -1; + } +#endif + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt500_fusionf1/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt500_fusionf1/rpmsg_platform.c new file mode 100755 index 00000000..776fad25 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt500_fusionf1/rpmsg_platform.c @@ -0,0 +1,281 @@ +/* + * Copyright 2019-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" +#include + +#ifdef SDK_OS_BAREMETAL +#include +#include +#else +#include +#endif + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +void MU_B_IRQHandler(void *arg) +{ + uint32_t flags; + flags = MU_GetStatusFlags(MUB); + if (((uint32_t)kMU_GenInt0Flag & flags) != 0UL) + { + MU_ClearStatusFlags(MUB, (uint32_t)kMU_GenInt0Flag); + env_isr(0); + } + if (((uint32_t)kMU_GenInt1Flag & flags) != 0UL) + { + MU_ClearStatusFlags(MUB, (uint32_t)kMU_GenInt1Flag); + env_isr(1); + } +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + + if (isr_counter < 2) + { + MU_EnableInterrupts(MUB, 1UL << (31UL - vector_id)); + } + + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + + if (isr_counter < 2) + { + MU_DisableInterrupts(MUB, 1UL << (31UL - vector_id)); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + env_lock_mutex(platform_lock); + (void)MU_TriggerInterrupts(MUB, 1UL << (19UL - RL_GET_Q_ID(vector_id))); + env_unlock_mutex(platform_lock); +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has approx. 6 cycles */ + loop = SystemCoreClock / 6U / 1000U * num_msec; + + while (loop > 0U) + { + asm("nop"); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (xthal_get_interrupt() & xthal_get_intenable()); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + +#ifdef SDK_OS_BAREMETAL + _xtos_interrupt_enable(6); +#else + xos_interrupt_enable(6); +#endif + disable_counter--; + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + +#ifdef SDK_OS_BAREMETAL + _xtos_interrupt_disable(6); +#else + xos_interrupt_disable(6); +#endif + disable_counter++; + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + /* Register interrupt handler for MU_B on HiFi4 */ +#ifdef SDK_OS_BAREMETAL + _xtos_set_interrupt_handler(6, MU_B_IRQHandler); +#else + xos_register_interrupt_handler(6, MU_B_IRQHandler, ((void *)0)); +#endif + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ +#ifdef SDK_OS_BAREMETAL + _xtos_set_interrupt_handler(6, ((void *)0)); +#else + xos_register_interrupt_handler(6, ((void *)0), ((void *)0)); +#endif + + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt500_m33/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt500_m33/rpmsg_platform.c new file mode 100755 index 00000000..79d00fb3 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt500_m33/rpmsg_platform.c @@ -0,0 +1,310 @@ +/* + * Copyright 2019-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +#include "mcmgr.h" +#endif + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +/* The MU instance used for CM33 and DSP core communication */ +#define APP_MU MUA +#define APP_MU_IRQn MU_A_IRQn + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +static void mcmgr_event_handler(uint16_t vring_idx, void *context) +{ + env_isr((uint32_t)vring_idx); +} + +#else +void MU_A_IRQHandler(void) +{ + uint32_t flags; + flags = MU_GetStatusFlags(APP_MU); + if (((uint32_t)kMU_GenInt0Flag & flags) != 0UL) + { + MU_ClearStatusFlags(APP_MU, (uint32_t)kMU_GenInt0Flag); + env_isr(0); + } + if (((uint32_t)kMU_GenInt1Flag & flags) != 0UL) + { + MU_ClearStatusFlags(APP_MU, (uint32_t)kMU_GenInt1Flag); + env_isr(1); + } +} +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter < 2) + { + MU_EnableInterrupts(APP_MU, 1UL << (31UL - vector_id)); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter < 2) + { + MU_DisableInterrupts(APP_MU, 1UL << (31UL - vector_id)); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + env_lock_mutex(platform_lock); +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + (void)MCMGR_TriggerEvent(kMCMGR_RemoteRPMsgEvent, RL_GET_Q_ID(vector_id)); + env_unlock_mutex(platform_lock); +#else + (void)MU_TriggerInterrupts(APP_MU, 1UL << (19UL - RL_GET_Q_ID(vector_id))); +#endif + env_unlock_mutex(platform_lock); +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(APP_MU_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(APP_MU_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Translate CM33 addresses to DSP addresses + * + */ +uint32_t platform_vatopa(void *addr) +{ + return (((uint32_t)(char *)addr & 0x0FFFFFFFu) + 0x800000u); +} + +/** + * platform_patova + * + * Translate DSP addresses to CM33 addresses + * + */ +void *platform_patova(uint32_t addr) +{ + return (void *)(char *)((addr - 0x00800000u) | 0x20000000u); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + mcmgr_status_t retval = kStatus_MCMGR_Error; + retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0)); + if (kStatus_MCMGR_Success != retval) + { + return -1; + } +#else + MU_Init(APP_MU); + NVIC_SetPriority(APP_MU_IRQn, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(APP_MU_IRQn); +#endif + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + MU_Deinit(APP_MU); + + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt600_hifi4/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt600_hifi4/rpmsg_platform.c new file mode 100755 index 00000000..776fad25 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt600_hifi4/rpmsg_platform.c @@ -0,0 +1,281 @@ +/* + * Copyright 2019-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" +#include + +#ifdef SDK_OS_BAREMETAL +#include +#include +#else +#include +#endif + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +void MU_B_IRQHandler(void *arg) +{ + uint32_t flags; + flags = MU_GetStatusFlags(MUB); + if (((uint32_t)kMU_GenInt0Flag & flags) != 0UL) + { + MU_ClearStatusFlags(MUB, (uint32_t)kMU_GenInt0Flag); + env_isr(0); + } + if (((uint32_t)kMU_GenInt1Flag & flags) != 0UL) + { + MU_ClearStatusFlags(MUB, (uint32_t)kMU_GenInt1Flag); + env_isr(1); + } +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + + if (isr_counter < 2) + { + MU_EnableInterrupts(MUB, 1UL << (31UL - vector_id)); + } + + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + + if (isr_counter < 2) + { + MU_DisableInterrupts(MUB, 1UL << (31UL - vector_id)); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + env_lock_mutex(platform_lock); + (void)MU_TriggerInterrupts(MUB, 1UL << (19UL - RL_GET_Q_ID(vector_id))); + env_unlock_mutex(platform_lock); +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has approx. 6 cycles */ + loop = SystemCoreClock / 6U / 1000U * num_msec; + + while (loop > 0U) + { + asm("nop"); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (xthal_get_interrupt() & xthal_get_intenable()); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + +#ifdef SDK_OS_BAREMETAL + _xtos_interrupt_enable(6); +#else + xos_interrupt_enable(6); +#endif + disable_counter--; + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + +#ifdef SDK_OS_BAREMETAL + _xtos_interrupt_disable(6); +#else + xos_interrupt_disable(6); +#endif + disable_counter++; + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + /* Register interrupt handler for MU_B on HiFi4 */ +#ifdef SDK_OS_BAREMETAL + _xtos_set_interrupt_handler(6, MU_B_IRQHandler); +#else + xos_register_interrupt_handler(6, MU_B_IRQHandler, ((void *)0)); +#endif + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ +#ifdef SDK_OS_BAREMETAL + _xtos_set_interrupt_handler(6, ((void *)0)); +#else + xos_register_interrupt_handler(6, ((void *)0), ((void *)0)); +#endif + + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt600_m33/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt600_m33/rpmsg_platform.c new file mode 100755 index 00000000..be68f480 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/imxrt600_m33/rpmsg_platform.c @@ -0,0 +1,310 @@ +/* + * Copyright 2019-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +#include "mcmgr.h" +#endif + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +/* The MU instance used for CM33 and DSP core communication */ +#define APP_MU MUA +#define APP_MU_IRQn MU_A_IRQn + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +static void mcmgr_event_handler(uint16_t vring_idx, void *context) +{ + env_isr((uint32_t)vring_idx); +} + +#else +void MU_A_IRQHandler(void) +{ + uint32_t flags; + flags = MU_GetStatusFlags(APP_MU); + if (((uint32_t)kMU_GenInt0Flag & flags) != 0UL) + { + MU_ClearStatusFlags(APP_MU, (uint32_t)kMU_GenInt0Flag); + env_isr(0); + } + if (((uint32_t)kMU_GenInt1Flag & flags) != 0UL) + { + MU_ClearStatusFlags(APP_MU, (uint32_t)kMU_GenInt1Flag); + env_isr(1); + } +} +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter < 2) + { + MU_EnableInterrupts(APP_MU, 1UL << (31UL - vector_id)); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter < 2) + { + MU_DisableInterrupts(APP_MU, 1UL << (31UL - vector_id)); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + env_lock_mutex(platform_lock); +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + (void)MCMGR_TriggerEvent(kMCMGR_RemoteRPMsgEvent, RL_GET_Q_ID(vector_id)); + env_unlock_mutex(platform_lock); +#else + (void)MU_TriggerInterrupts(APP_MU, 1UL << (19UL - RL_GET_Q_ID(vector_id))); +#endif + env_unlock_mutex(platform_lock); +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(APP_MU_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(APP_MU_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + mcmgr_status_t retval = kStatus_MCMGR_Error; + retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0)); + if (kStatus_MCMGR_Success != retval) + { + return -1; + } +#else + MU_Init(APP_MU); + NVIC_SetPriority(APP_MU_IRQn, APP_MU_IRQ_PRIORITY); + NVIC_EnableIRQ(APP_MU_IRQn); +#endif + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + MU_Deinit(APP_MU); + + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/ingenic_riscv/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/ingenic_riscv/rpmsg_platform.c new file mode 100755 index 00000000..0713e0df --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/ingenic_riscv/rpmsg_platform.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "x2600_hal.h" + + +/* #include "board.h" */ + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +#define APP_MU_IRQ_PRIORITY (3U) + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +static void platform_global_isr_disable(void) +{ +} + +static void platform_global_isr_enable(void) +{ +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id,isr_data); + /* Prepare the MU Hardware, enable channel 1 interrupt */ + env_lock_mutex(platform_lock); + + // add + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + // add dm + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + /* As Linux suggests, use MU->Data Channle 1 as communication channel */ + uint32_t msg = (RL_GET_Q_ID(vector_id)) << 16; + //prom_printk("%s %d msg=%d ====\r\n",__func__,__LINE__,msg); + env_lock_mutex(platform_lock); + // add write kick register + LL_RISC_CCU_Mbox_Sendmsg(RISC_CCU_Instance, vector_id+1); + env_unlock_mutex(platform_lock); +} + +/* + * MU Interrrupt RPMsg handler + */ +void rpmsg_handler(uint32_t msg) +{ + if(msg >= 1) + msg -= 1; + env_isr(msg); + return; +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + //RL_ASSERT(0 < disable_counter); + //config enable mbx irq + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + // config disable mbx irq + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* + * Prepare for the MU Interrupt + * MU must be initialized before rpmsg init is called + */ + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/k32l3a6/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/k32l3a6/rpmsg_platform.c new file mode 100755 index 00000000..8d2b4604 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/k32l3a6/rpmsg_platform.c @@ -0,0 +1,357 @@ +/* + * Copyright 2019-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mu.h" + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +#include "mcmgr.h" +#endif + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +static void mcmgr_event_handler(uint16_t vring_idx, void *context) +{ + env_isr((uint32_t)vring_idx); +} +#else +static void mu_isr(MU_Type *base) +{ + uint32_t flags; + flags = MU_GetStatusFlags(base); + if (((uint32_t)kMU_GenInt0Flag & flags) != 0UL) + { + MU_ClearStatusFlags(base, (uint32_t)kMU_GenInt0Flag); + env_isr(0); + } + if (((uint32_t)kMU_GenInt1Flag & flags) != 0UL) + { + MU_ClearStatusFlags(base, (uint32_t)kMU_GenInt1Flag); + env_isr(1); + } +} + +#if defined(FSL_FEATURE_MU_SIDE_A) +int32_t MUA_IRQHandler(void) +{ + mu_isr(MUA); + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + return 0; +} +#elif defined(FSL_FEATURE_MU_SIDE_B) +int32_t MUB_IRQHandler(void) +{ + mu_isr(MUB); + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif + return 0; +} +#endif +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter < 2) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + MU_EnableInterrupts(MUA, 1UL << (31UL - vector_id)); +#elif defined(FSL_FEATURE_MU_SIDE_B) + MU_EnableInterrupts(MUB, 1UL << (31UL - vector_id)); +#endif + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter < 2) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + MU_DisableInterrupts(MUA, 1UL << (31UL - vector_id)); +#elif defined(FSL_FEATURE_MU_SIDE_B) + MU_DisableInterrupts(MUB, 1UL << (31UL - vector_id)); +#endif + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + env_lock_mutex(platform_lock); +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + (void)MCMGR_TriggerEventForce(kMCMGR_RemoteRPMsgEvent, (uint16_t)RL_GET_Q_ID(vector_id)); +#else +/* Write directly into the MU Control Register to trigger General Purpose Interrupt Request (GIR). + No need to wait until the previous interrupt is processed because the same value + of the virtqueue ID is used for GIR mask when triggering the ISR for the receiver side. + The whole queue of received buffers for associated virtqueue is then handled in the ISR + on the receiver side. */ +#if defined(FSL_FEATURE_MU_SIDE_A) + (void)MU_TriggerInterrupts(MUA, 1UL << (19UL - RL_GET_Q_ID(vector_id))); +#elif defined(FSL_FEATURE_MU_SIDE_B) + (void)MU_TriggerInterrupts(MUB, 1UL << (19UL - RL_GET_Q_ID(vector_id))); +#endif +#endif + env_unlock_mutex(platform_lock); +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + NVIC_EnableIRQ(MUA_IRQn); +#elif defined(FSL_FEATURE_MU_SIDE_B) + NVIC_EnableIRQ(MUB_IRQn); +#endif + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { +#if defined(FSL_FEATURE_MU_SIDE_A) + NVIC_DisableIRQ(MUA_IRQn); + NVIC_SetPriority(MUA_IRQn, 2); +#elif defined(FSL_FEATURE_MU_SIDE_B) + NVIC_DisableIRQ(MUB_IRQn); +#endif + } + + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* The MU peripheral driver is not initialized here because it covers also + the secondary core booting controls and it needs to be initialized earlier + in the application code */ + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + mcmgr_status_t retval = kStatus_MCMGR_Error; + retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0)); + if (kStatus_MCMGR_Success != retval) + { + return -1; + } +#endif + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5410x/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5410x/rpmsg_platform.c new file mode 100755 index 00000000..a01b31ca --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5410x/rpmsg_platform.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mailbox.h" + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +#include "mcmgr.h" +#endif + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +static void mcmgr_event_handler(uint16_t vring_idx, void *context) +{ + env_isr((uint32_t)vring_idx); +} +#else +void MAILBOX_IRQHandler(void) +{ + mailbox_cpu_id_t cpu_id; +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + cpu_id = kMAILBOX_CM4; +#else + cpu_id = kMAILBOX_CM0Plus; +#endif + + uint32_t value = MAILBOX_GetValue(MAILBOX, cpu_id); + + if ((value & 0x01) != 0UL) + { + MAILBOX_ClearValueBits(MAILBOX, cpu_id, 0x01); + env_isr(0); + } + if ((value & 0x02) != 0UL) + { + MAILBOX_ClearValueBits(MAILBOX, cpu_id, 0x02); + env_isr(1); + } + + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif +} +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + NVIC_SetPriority(MAILBOX_IRQn, 5); +#else + NVIC_SetPriority(MAILBOX_IRQn, 2); +#endif + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + NVIC_DisableIRQ(MAILBOX_IRQn); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + env_lock_mutex(platform_lock); + (void)MCMGR_TriggerEventForce(kMCMGR_RemoteRPMsgEvent, (uint16_t)RL_GET_Q_ID(vector_id)); + env_unlock_mutex(platform_lock); +#else + /* Only single RPMsg-Lite instance (LINK_ID) is defined for this dual core device. Extend + this statement in case multiple instances of RPMsg-Lite are needed. */ + switch (RL_GET_LINK_ID(vector_id)) + { + case RL_PLATFORM_LPC5410x_M4_M0_LINK_ID: + env_lock_mutex(platform_lock); +/* Write directly into the Mailbox register, no need to wait until the content is cleared + (consumed by the receiver side) because the same value of the virtqueue ID is written + into this register when triggering the ISR for the receiver side. The whole queue of + received buffers for associated virtqueue is handled in the ISR then. */ +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + MAILBOX_SetValueBits(MAILBOX, kMAILBOX_CM0Plus, (1 << RL_GET_Q_ID(vector_id))); +#else + MAILBOX_SetValueBits(MAILBOX, kMAILBOX_CM4, (1 << RL_GET_Q_ID(vector_id))); +#endif + env_unlock_mutex(platform_lock); + return; + + default: + return; + } +#endif +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MAILBOX_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MAILBOX_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + mcmgr_status_t retval = kStatus_MCMGR_Error; + retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0)); + if (kStatus_MCMGR_Success != retval) + { + return -1; + } +#else + MAILBOX_Init(MAILBOX); +#endif + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ +/* Important for LPC54102 - do not deinit mailbox, if there + is a pending ISR on the other core! */ +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + while (0U != MAILBOX_GetValue(MAILBOX, kMAILBOX_CM0Plus)) + { + } +#else + while (0U != MAILBOX_GetValue(MAILBOX, kMAILBOX_CM4)) + { + } +#endif + + MAILBOX_Deinit(MAILBOX); + + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5411x/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5411x/rpmsg_platform.c new file mode 100755 index 00000000..da6d9b68 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5411x/rpmsg_platform.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mailbox.h" + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +#include "mcmgr.h" +#endif + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +static void mcmgr_event_handler(uint16_t vring_idx, void *context) +{ + env_isr((uint32_t)vring_idx); +} +#else +void MAILBOX_IRQHandler(void) +{ + mailbox_cpu_id_t cpu_id; +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + cpu_id = kMAILBOX_CM4; +#else + cpu_id = kMAILBOX_CM0Plus; +#endif + + uint32_t value = MAILBOX_GetValue(MAILBOX, cpu_id); + + if ((value & 0x01) != 0UL) + { + MAILBOX_ClearValueBits(MAILBOX, cpu_id, 0x01); + env_isr(0); + } + if ((value & 0x02) != 0UL) + { + MAILBOX_ClearValueBits(MAILBOX, cpu_id, 0x02); + env_isr(1); + } + + /* ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping + * exception return operation might vector to incorrect interrupt. + * For Cortex-M7, if core speed much faster than peripheral register write speed, + * the peripheral interrupt flags may be still set after exiting ISR, this results to + * the same error similar with errata 83869 */ +#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U)) + __DSB(); +#endif +} +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + NVIC_SetPriority(MAILBOX_IRQn, 5); +#else + NVIC_SetPriority(MAILBOX_IRQn, 2); +#endif + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + NVIC_DisableIRQ(MAILBOX_IRQn); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + env_lock_mutex(platform_lock); + (void)MCMGR_TriggerEventForce(kMCMGR_RemoteRPMsgEvent, (uint16_t)RL_GET_Q_ID(vector_id)); + env_unlock_mutex(platform_lock); +#else + /* Only single RPMsg-Lite instance (LINK_ID) is defined for this dual core device. Extend + this statement in case multiple instances of RPMsg-Lite are needed. */ + switch (RL_GET_LINK_ID(vector_id)) + { + case RL_PLATFORM_LPC5411x_M4_M0_LINK_ID: + env_lock_mutex(platform_lock); +/* Write directly into the Mailbox register, no need to wait until the content is cleared + (consumed by the receiver side) because the same value of the virtqueue ID is written + into this register when triggering the ISR for the receiver side. The whole queue of + received buffers for associated virtqueue is handled in the ISR then. */ +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + MAILBOX_SetValueBits(MAILBOX, kMAILBOX_CM0Plus, (1 << RL_GET_Q_ID(vector_id))); +#else + MAILBOX_SetValueBits(MAILBOX, kMAILBOX_CM4, (1 << RL_GET_Q_ID(vector_id))); +#endif + env_unlock_mutex(platform_lock); + return; + + default: + return; + } +#endif +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MAILBOX_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MAILBOX_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + mcmgr_status_t retval = kStatus_MCMGR_Error; + retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0)); + if (kStatus_MCMGR_Success != retval) + { + return -1; + } +#else + MAILBOX_Init(MAILBOX); +#endif + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ +/* Important for LPC5411x - do not deinit mailbox, if there + is a pending ISR on the other core! */ +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + while (0U != MAILBOX_GetValue(MAILBOX, kMAILBOX_CM0Plus)) + { + } +#else + while (0U != MAILBOX_GetValue(MAILBOX, kMAILBOX_CM4)) + { + } +#endif + + MAILBOX_Deinit(MAILBOX); + + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5411x/rpmsg_platform_zephyr_ipm.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5411x/rpmsg_platform_zephyr_ipm.c new file mode 100755 index 00000000..5433cd21 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc5411x/rpmsg_platform_zephyr_ipm.c @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" +#include + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif +static struct device *ipm_handle = ((void *)0); + +void platform_ipm_callback(void *context, u32_t id, volatile void *data) +{ + if (((*(uint32_t *)data) & 0x01) != 0UL) + { + env_isr(0); + } + if (((*(uint32_t *)data) & 0x02) != 0UL) + { + env_isr(1); + } +} + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if ((isr_counter == 0) && (ipm_handle != ((void *)0))) + { + ipm_set_enabled(ipm_handle, 0); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ + switch (RL_GET_LINK_ID(vector_id)) + { + case RL_PLATFORM_LPC5411x_M4_M0_LINK_ID: + env_lock_mutex(platform_lock); + uint32_t data = (1 << RL_GET_Q_ID(vector_id)); + RL_ASSERT(ipm_handle); + ipm_send(ipm_handle, 0, 0, &data, sizeof(uint32_t)); + env_unlock_mutex(platform_lock); + return; + + default: + return; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (0 != k_is_in_isr()); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if ((disable_counter == 0) && (ipm_handle != ((void *)0))) + { + ipm_set_enabled(ipm_handle, 1); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if ((disable_counter == 0) && (ipm_handle != ((void *)0))) + { + ipm_set_enabled(ipm_handle, 0); + } + + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ + /* Get IPM device handle */ + ipm_handle = device_get_binding(DT_NXP_LPC_MAILBOX_0_LABEL); + if (!ipm_handle) + { + return -1; + } + + /* Register application callback with no context */ + ipm_register_callback(ipm_handle, platform_ipm_callback, ((void *)0)); + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc55s69/rpmsg_platform.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc55s69/rpmsg_platform.c new file mode 100755 index 00000000..7720546c --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/lpc55s69/rpmsg_platform.c @@ -0,0 +1,339 @@ +/* + * Copyright 2018-2021 NXP + * All rights reserved. + * + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + +#include "rpmsg_platform.h" +#include "rpmsg_env.h" + +#include "fsl_device_registers.h" +#include "fsl_mailbox.h" + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +#include "mcmgr.h" +#endif + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0" +#endif + +static int32_t isr_counter = 0; +static int32_t disable_counter = 0; +static void *platform_lock; +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +static LOCK_STATIC_CONTEXT platform_lock_static_ctxt; +#endif + +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) +static void mcmgr_event_handler(uint16_t vring_idx, void *context) +{ + env_isr((uint32_t)vring_idx); +} +#else +void MAILBOX_IRQHandler(void) +{ + mailbox_cpu_id_t cpu_id; +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + cpu_id = kMAILBOX_CM33_Core0; +#else + cpu_id = kMAILBOX_CM33_Core1; +#endif + + uint32_t value = MAILBOX_GetValue(MAILBOX, cpu_id); + + if ((value & 0x01) != 0UL) + { + MAILBOX_ClearValueBits(MAILBOX, cpu_id, 0x01); + env_isr(0); + } + if ((value & 0x02) != 0UL) + { + MAILBOX_ClearValueBits(MAILBOX, cpu_id, 0x02); + env_isr(1); + } +} +#endif + +static void platform_global_isr_disable(void) +{ + __asm volatile("cpsid i"); +} + +static void platform_global_isr_enable(void) +{ + __asm volatile("cpsie i"); +} + +int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data) +{ + /* Register ISR to environment layer */ + env_register_isr(vector_id, isr_data); + + env_lock_mutex(platform_lock); + + RL_ASSERT(0 <= isr_counter); + if (isr_counter == 0) + { + NVIC_SetPriority(MAILBOX_IRQn, 5); + } + isr_counter++; + + env_unlock_mutex(platform_lock); + + return 0; +} + +int32_t platform_deinit_interrupt(uint32_t vector_id) +{ + /* Prepare the MU Hardware */ + env_lock_mutex(platform_lock); + + RL_ASSERT(0 < isr_counter); + isr_counter--; + if (isr_counter == 0) + { + NVIC_DisableIRQ(MAILBOX_IRQn); + } + + /* Unregister ISR from environment layer */ + env_unregister_isr(vector_id); + + env_unlock_mutex(platform_lock); + + return 0; +} + +void platform_notify(uint32_t vector_id) +{ +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + env_lock_mutex(platform_lock); + (void)MCMGR_TriggerEventForce(kMCMGR_RemoteRPMsgEvent, (uint16_t)RL_GET_Q_ID(vector_id)); + env_unlock_mutex(platform_lock); +#else + /* Only single RPMsg-Lite instance (LINK_ID) is defined for this dual core device. Extend + this statement in case multiple instances of RPMsg-Lite are needed. */ + switch (RL_GET_LINK_ID(vector_id)) + { + case RL_PLATFORM_LPC55S69_M33_M33_LINK_ID: + env_lock_mutex(platform_lock); +/* Write directly into the Mailbox register, no need to wait until the content is cleared + (consumed by the receiver side) because the same value of the virtqueue ID is written + into this register when triggering the ISR for the receiver side. The whole queue of + received buffers for associated virtqueue is handled in the ISR then. */ +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + MAILBOX_SetValueBits(MAILBOX, kMAILBOX_CM33_Core1, (1 << RL_GET_Q_ID(vector_id))); +#else + MAILBOX_SetValueBits(MAILBOX, kMAILBOX_CM33_Core0, (1 << RL_GET_Q_ID(vector_id))); +#endif + env_unlock_mutex(platform_lock); + return; + + default: + return; + } +#endif +} + +/** + * platform_time_delay + * + * @param num_msec Delay time in ms. + * + * This is not an accurate delay, it ensures at least num_msec passed when return. + */ +void platform_time_delay(uint32_t num_msec) +{ + uint32_t loop; + + /* Recalculate the CPU frequency */ + SystemCoreClockUpdate(); + + /* Calculate the CPU loops to delay, each loop has 3 cycles */ + loop = SystemCoreClock / 3U / 1000U * num_msec; + + /* There's some difference among toolchains, 3 or 4 cycles each loop */ + while (loop > 0U) + { + __NOP(); + loop--; + } +} + +/** + * platform_in_isr + * + * Return whether CPU is processing IRQ + * + * @return True for IRQ, false otherwise. + * + */ +int32_t platform_in_isr(void) +{ + return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0); +} + +/** + * platform_interrupt_enable + * + * Enable peripheral-related interrupt + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_enable(uint32_t vector_id) +{ + RL_ASSERT(0 < disable_counter); + + platform_global_isr_disable(); + disable_counter--; + + if (disable_counter == 0) + { + NVIC_EnableIRQ(MAILBOX_IRQn); + } + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_interrupt_disable + * + * Disable peripheral-related interrupt. + * + * @param vector_id Virtual vector ID that needs to be converted to IRQ number + * + * @return vector_id Return value is never checked. + * + */ +int32_t platform_interrupt_disable(uint32_t vector_id) +{ + RL_ASSERT(0 <= disable_counter); + + platform_global_isr_disable(); + /* virtqueues use the same NVIC vector + if counter is set - the interrupts are disabled */ + if (disable_counter == 0) + { + NVIC_DisableIRQ(MAILBOX_IRQn); + } + disable_counter++; + platform_global_isr_enable(); + return ((int32_t)vector_id); +} + +/** + * platform_map_mem_region + * + * Dummy implementation + * + */ +void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags) +{ +} + +/** + * platform_cache_all_flush_invalidate + * + * Dummy implementation + * + */ +void platform_cache_all_flush_invalidate(void) +{ +} + +/** + * platform_cache_disable + * + * Dummy implementation + * + */ +void platform_cache_disable(void) +{ +} + +/** + * platform_vatopa + * + * Dummy implementation + * + */ +uint32_t platform_vatopa(void *addr) +{ + return ((uint32_t)(char *)addr); +} + +/** + * platform_patova + * + * Dummy implementation + * + */ +void *platform_patova(uint32_t addr) +{ + return ((void *)(char *)addr); +} + +/** + * platform_init + * + * platform/environment init + */ +int32_t platform_init(void) +{ +#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1) + mcmgr_status_t retval = kStatus_MCMGR_Error; + retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0)); + if (kStatus_MCMGR_Success != retval) + { + return -1; + } +#else + MAILBOX_Init(MAILBOX); +#endif + + /* Create lock used in multi-instanced RPMsg */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (0 != env_create_mutex(&platform_lock, 1, &platform_lock_static_ctxt)) +#else + if (0 != env_create_mutex(&platform_lock, 1)) +#endif + { + return -1; + } + + return 0; +} + +/** + * platform_deinit + * + * platform/environment deinit process + */ +int32_t platform_deinit(void) +{ +/* Important for LPC5411x - do not deinit mailbox, if there + is a pending ISR on the other core! */ +#if defined(FSL_FEATURE_MAILBOX_SIDE_A) + while (0U != MAILBOX_GetValue(MAILBOX, kMAILBOX_CM33_Core1)) + { + } +#else + while (0U != MAILBOX_GetValue(MAILBOX, kMAILBOX_CM33_Core0)) + { + } +#endif + + MAILBOX_Deinit(MAILBOX); + + /* Delete lock used in multi-instanced RPMsg */ + env_delete_mutex(platform_lock); + platform_lock = ((void *)0); + return 0; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_lite.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_lite.c new file mode 100755 index 00000000..e0febb3d --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_lite.c @@ -0,0 +1,1420 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2022 NXP + * Copyright 2021 ACRIOS Systems s.r.o. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rpmsg_lite.h" +#include "rpmsg_platform.h" + +/* rpmsg_std_hdr contains a reserved field, + * this implementation of RPMSG uses this reserved + * field to hold the idx and totlen of the buffer + * not being returned to the vring in the receive + * callback function. This way, the no-copy API + * can use this field to return the buffer later. + */ +struct rpmsg_hdr_reserved +{ + uint16_t rfu; /* reserved for future usage */ + uint16_t idx; +}; + +RL_PACKED_BEGIN +/*! + * Common header for all rpmsg messages. + * Every message sent/received on the rpmsg bus begins with this header. + */ +struct rpmsg_std_hdr +{ + uint32_t src; /*!< source endpoint address */ + uint32_t dst; /*!< destination endpoint address */ + struct rpmsg_hdr_reserved reserved; /*!< reserved for future use */ + uint16_t len; /*!< length of payload (in bytes) */ + uint16_t flags; /*!< message flags */ +} RL_PACKED_END; + +RL_PACKED_BEGIN +/*! + * Common message structure. + * Contains the header and the payload. + */ +struct rpmsg_std_msg +{ + struct rpmsg_std_hdr hdr; /*!< RPMsg message header */ + uint8_t data[1]; /*!< bytes of message payload data */ +} RL_PACKED_END; + +/* Interface which is used to interact with the virtqueue layer, + * a different interface is used, when the local processor is the MASTER + * and when it is the REMOTE. + */ +struct virtqueue_ops +{ + void (*vq_tx)(struct virtqueue *vq, void *buffer, uint32_t len, uint16_t idx); + void *(*vq_tx_alloc)(struct virtqueue *vq, uint32_t *len, uint16_t *idx); + void *(*vq_rx)(struct virtqueue *vq, uint32_t *len, uint16_t *idx); + void (*vq_rx_free)(struct virtqueue *vq, void *buffer, uint32_t len, uint16_t idx); +}; + +/* Zero-Copy extension macros */ +#define RPMSG_STD_MSG_FROM_BUF(buf) (struct rpmsg_std_msg *)(void *)((char *)(buf)-offsetof(struct rpmsg_std_msg, data)) + +#if !(defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)) +/* Check RL_BUFFER_COUNT and RL_BUFFER_SIZE only when RL_ALLOW_CUSTOM_SHMEM_CONFIG is not set to 1 */ +#if (!RL_BUFFER_COUNT) || (RL_BUFFER_COUNT & (RL_BUFFER_COUNT - 1)) +#error "RL_BUFFER_COUNT must be power of two (2, 4, ...)" +#endif + +/* Buffer is formed by payload and struct rpmsg_std_hdr */ +#define RL_BUFFER_SIZE (RL_BUFFER_PAYLOAD_SIZE + 16UL) + +#if (!RL_BUFFER_SIZE) || (RL_BUFFER_SIZE & (RL_BUFFER_SIZE - 1)) +#error \ + "RL_BUFFER_SIZE must be power of two (256, 512, ...)"\ + "RL_BUFFER_PAYLOAD_SIZE must be equal to (240, 496, 1008, ...) [2^n - 16]." +#endif +#endif /* !(defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)) */ + +/*! + * @brief + * Traverse the linked list of endpoints to get the one with defined address. + * + * @param rpmsg_lite_dev RPMsg Lite instance + * @param addr Local endpoint address + * + * @return RL_NULL if not found, node pointer containing the ept on success + * + */ +static struct llist *rpmsg_lite_get_endpoint_from_addr(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t addr) +{ + struct llist *rl_ept_lut_head; + + rl_ept_lut_head = rpmsg_lite_dev->rl_endpoints; + while (rl_ept_lut_head != RL_NULL) + { + struct rpmsg_lite_endpoint *rl_ept = (struct rpmsg_lite_endpoint *)rl_ept_lut_head->data; + if (rl_ept->addr == addr) + { + return rl_ept_lut_head; + } + rl_ept_lut_head = rl_ept_lut_head->next; + } + return RL_NULL; +} + +/*************************************************************** + mmm mm m m mmmmm mm mmm m m mmmm + m" " ## # # # # ## m" " # m" #" " + # # # # # #mmmm" # # # #m# "#mmm + # #mm# # # # # #mm# # # #m "# + "mmm" # # #mmmmm #mmmmm #mmmm" # # "mmm" # "m "mmm#" +****************************************************************/ + +/*! + * @brief + * Called when remote side calls virtqueue_kick() + * at its transmit virtqueue. + * In this callback, the buffer is read-out + * of the rvq and user callback is called. + * + * @param vq Virtqueue affected by the kick + * + */ +static void rpmsg_lite_rx_callback(struct virtqueue *vq) +{ + struct rpmsg_std_msg *rpmsg_msg; + uint32_t len; + uint16_t idx; + struct rpmsg_lite_endpoint *ept; + int32_t cb_ret; + struct llist *node; + struct rpmsg_lite_instance *rpmsg_lite_dev = (struct rpmsg_lite_instance *)vq->priv; +#if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1) + uint32_t rx_freed = RL_FALSE; +#endif + + RL_ASSERT(rpmsg_lite_dev != RL_NULL); + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + env_lock_mutex(rpmsg_lite_dev->lock); +#endif + + /* Process the received data from remote node */ + rpmsg_msg = (struct rpmsg_std_msg *)rpmsg_lite_dev->vq_ops->vq_rx(rpmsg_lite_dev->rvq, &len, &idx); + while (rpmsg_msg != RL_NULL) + { + node = rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, rpmsg_msg->hdr.dst); + + cb_ret = RL_RELEASE; + if (node != RL_NULL) + { + ept = (struct rpmsg_lite_endpoint *)node->data; + cb_ret = ept->rx_cb(rpmsg_msg->data, rpmsg_msg->hdr.len, rpmsg_msg->hdr.src, ept->rx_cb_data); + } + + if (cb_ret == RL_HOLD) + { + rpmsg_msg->hdr.reserved.idx = idx; + } + else + { + rpmsg_lite_dev->vq_ops->vq_rx_free(rpmsg_lite_dev->rvq, rpmsg_msg, len, idx); +#if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1) + rx_freed = RL_TRUE; +#endif + } + rpmsg_msg = (struct rpmsg_std_msg *)rpmsg_lite_dev->vq_ops->vq_rx(rpmsg_lite_dev->rvq, &len, &idx); +#if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1) + if ((rpmsg_msg == RL_NULL) && (rx_freed == RL_TRUE)) + { + /* Let the remote device know that some buffers have been freed */ + virtqueue_kick(rpmsg_lite_dev->rvq); + } +#endif + } +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + env_unlock_mutex(rpmsg_lite_dev->lock); +#endif +} + +/*! + * @brief + * Called when remote side calls virtqueue_kick() + * at its receive virtqueue. + * + * @param vq Virtqueue affected by the kick + * + */ +static void rpmsg_lite_tx_callback(struct virtqueue *vq) +{ + struct rpmsg_lite_instance *rpmsg_lite_dev = (struct rpmsg_lite_instance *)vq->priv; + + RL_ASSERT(rpmsg_lite_dev != RL_NULL); + rpmsg_lite_dev->link_state = 1U; + env_tx_callback(rpmsg_lite_dev->link_id); +} + +/**************************************************************************** + + m m mmmm m m mm mm m mmmm m mmmmm mm m mmm + "m m" m" "m # # ## #"m # # "m # # #"m # m" " + # # # # #mmmm# # # # #m # # # # # # #m # # mm + "mm" # # # # #mm# # # # # # # # # # # # # + ## #mm#" # # # # # ## #mmm" #mmmmm mm#mm # ## "mmm" + # + In case this processor has the REMOTE role +*****************************************************************************/ +/*! + * @brief + * Places buffer on the virtqueue for consumption by the other side. + * + * @param vq Virtqueue to use + * @param buffer Buffer pointer + * @param len Buffer length + * @idx Buffer index + * + * @return Status of function execution + * + */ +static void vq_tx_remote(struct virtqueue *tvq, void *buffer, uint32_t len, uint16_t idx) +{ + int32_t status; + status = virtqueue_add_consumed_buffer(tvq, idx, len); + RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */ + + /* As long as the length of the virtqueue ring buffer is not shorter + * than the number of buffers in the pool, this function should not fail. + * This condition is always met, so we don't need to return anything here */ +} + +/*! + * @brief + * Provides buffer to transmit messages. + * + * @param vq Virtqueue to use + * @param len Length of returned buffer + * @param idx Buffer index + * + * return Pointer to buffer. + */ +static void *vq_tx_alloc_remote(struct virtqueue *tvq, uint32_t *len, uint16_t *idx) +{ + return virtqueue_get_available_buffer(tvq, idx, len); +} + +/*! + * @brief + * Retrieves the received buffer from the virtqueue. + * + * @param vq Virtqueue to use + * @param len Size of received buffer + * @param idx Index of buffer + * + * @return Pointer to received buffer + * + */ +static void *vq_rx_remote(struct virtqueue *rvq, uint32_t *len, uint16_t *idx) +{ + return virtqueue_get_available_buffer(rvq, idx, len); +} + +/*! + * @brief + * Places the used buffer back on the virtqueue. + * + * @param vq Virtqueue to use + * @param len Size of received buffer + * @param idx Index of buffer + * + */ +static void vq_rx_free_remote(struct virtqueue *rvq, void *buffer, uint32_t len, uint16_t idx) +{ + int32_t status; +#if defined(RL_CLEAR_USED_BUFFERS) && (RL_CLEAR_USED_BUFFERS == 1) + env_memset(buffer, 0x00, len); +#endif + status = virtqueue_add_consumed_buffer(rvq, idx, len); + RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */ + /* As long as the length of the virtqueue ring buffer is not shorter + * than the number of buffers in the pool, this function should not fail. + * This condition is always met, so we don't need to return anything here */ +} + +/**************************************************************************** + + m m mmmm m m mm mm m mmmm m mmmmm mm m mmm + "m m" m" "m # # ## #"m # # "m # # #"m # m" " + # # # # #mmmm# # # # #m # # # # # # #m # # mm + "mm" # # # # #mm# # # # # # # # # # # # # + ## #mm#" # # # # # ## #mmm" #mmmmm mm#mm # ## "mmm" + # + In case this processor has the MASTER role +*****************************************************************************/ + +/*! + * @brief + * Places buffer on the virtqueue for consumption by the other side. + * + * @param vq Virtqueue to use + * @param buffer Buffer pointer + * @param len Buffer length + * @idx Buffer index + * + * @return Status of function execution + * + */ +static void vq_tx_master(struct virtqueue *tvq, void *buffer, uint32_t len, uint16_t idx) +{ + int32_t status; + status = virtqueue_add_buffer(tvq, idx); + RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */ + + /* As long as the length of the virtqueue ring buffer is not shorter + * than the number of buffers in the pool, this function should not fail. + * This condition is always met, so we don't need to return anything here */ +} + +/*! + * @brief + * Provides buffer to transmit messages. + * + * @param vq Virtqueue to use + * @param len Length of returned buffer + * @param idx Buffer index + * + * return Pointer to buffer. + */ +static void *vq_tx_alloc_master(struct virtqueue *tvq, uint32_t *len, uint16_t *idx) +{ + return virtqueue_get_buffer(tvq, len, idx); +} + +/*! + * @brief + * Retrieves the received buffer from the virtqueue. + * + * @param vq Virtqueue to use + * @param len Size of received buffer + * @param idx Index of buffer + * + * @return Pointer to received buffer + * + */ +static void *vq_rx_master(struct virtqueue *rvq, uint32_t *len, uint16_t *idx) +{ + return virtqueue_get_buffer(rvq, len, idx); +} + +/*! + * @brief + * Places the used buffer back on the virtqueue. + * + * @param vq Virtqueue to use + * @param len Size of received buffer + * @param idx Index of buffer + * + */ +static void vq_rx_free_master(struct virtqueue *rvq, void *buffer, uint32_t len, uint16_t idx) +{ + int32_t status; +#if defined(RL_CLEAR_USED_BUFFERS) && (RL_CLEAR_USED_BUFFERS == 1) + env_memset(buffer, 0x00, len); +#endif + status = virtqueue_add_buffer(rvq, idx); + RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */ + + /* As long as the length of the virtqueue ring buffer is not shorter + * than the number of buffers in the pool, this function should not fail. + * This condition is always met, so we don't need to return anything here */ +} + +/* Interface used in case this processor is MASTER */ +static const struct virtqueue_ops master_vq_ops = { + vq_tx_master, + vq_tx_alloc_master, + vq_rx_master, + vq_rx_free_master, +}; + +/* Interface used in case this processor is REMOTE */ +static const struct virtqueue_ops remote_vq_ops = { + vq_tx_remote, + vq_tx_alloc_remote, + vq_rx_remote, + vq_rx_free_remote, +}; + +/* helper function for virtqueue notification */ +static void virtqueue_notify(struct virtqueue *vq) +{ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + struct rpmsg_lite_instance *inst = vq->priv; + platform_notify(inst->env ? env_get_platform_context(inst->env) : RL_NULL, vq->vq_queue_index); +#else + platform_notify(vq->vq_queue_index); +#endif + //printf("%s %d =====\r\n",__func__,__LINE__); +} + +/************************************************* + + mmmmmm mmmmm mmmmmmm mm m mmmmmmm m + # # "# # #"m # # # # # + #mmmmm #mmm#" # # #m # #mmmmm" #"# # + # # # # # # # ## ##" + #mmmmm # # # ## #mmmmm # # + +**************************************************/ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev, + uint32_t addr, + rl_ept_rx_cb_t rx_cb, + void *rx_cb_data, + struct rpmsg_lite_ept_static_context *ept_context) +#else +struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev, + uint32_t addr, + rl_ept_rx_cb_t rx_cb, + void *rx_cb_data) +#endif +{ + struct rpmsg_lite_endpoint *rl_ept; + struct llist *node; + uint32_t i; + + if (rpmsg_lite_dev == RL_NULL) + { + return RL_NULL; + } + + env_lock_mutex(rpmsg_lite_dev->lock); + { + if (addr == RL_ADDR_ANY) + { + /* find lowest free address */ + for (i = 1; i < 0xFFFFFFFFU; i++) + { + if (rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, i) == RL_NULL) + { + addr = i; + break; + } + } + if (addr == RL_ADDR_ANY) + { + /* no address is free, cannot happen normally */ + env_unlock_mutex(rpmsg_lite_dev->lock); + return RL_NULL; + } + } + else + { + if (rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, addr) != RL_NULL) + { + /* Already exists! */ + env_unlock_mutex(rpmsg_lite_dev->lock); + return RL_NULL; + } + } + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (ept_context == RL_NULL) + { + env_unlock_mutex(rpmsg_lite_dev->lock); + return RL_NULL; + } + + rl_ept = &(ept_context->ept); + node = &(ept_context->node); +#else + rl_ept = env_allocate_memory(sizeof(struct rpmsg_lite_endpoint)); + if (rl_ept == RL_NULL) + { + env_unlock_mutex(rpmsg_lite_dev->lock); + return RL_NULL; + } + node = env_allocate_memory(sizeof(struct llist)); + if (node == RL_NULL) + { + env_free_memory(rl_ept); + env_unlock_mutex(rpmsg_lite_dev->lock); + return RL_NULL; + } +#endif /* RL_USE_STATIC_API */ + + env_memset(rl_ept, 0x00, sizeof(struct rpmsg_lite_endpoint)); + + rl_ept->addr = addr; + rl_ept->rx_cb = rx_cb; + rl_ept->rx_cb_data = rx_cb_data; + + node->data = rl_ept; + + add_to_list((struct llist **)&rpmsg_lite_dev->rl_endpoints, node); + } + env_unlock_mutex(rpmsg_lite_dev->lock); + + return rl_ept; +} +/************************************************* + + mmmmmm mmmmm mmmmmmm mmmm mmmmmm m + # # "# # # "m # # + #mmmmm #mmm#" # # # #mmmmm # + # # # # # # # + #mmmmm # # #mmm" #mmmmm #mmmmm + +**************************************************/ + +int32_t rpmsg_lite_destroy_ept(struct rpmsg_lite_instance *rpmsg_lite_dev, struct rpmsg_lite_endpoint *rl_ept) +{ + struct llist *node; + + if (rpmsg_lite_dev == RL_NULL) + { + return RL_ERR_PARAM; + } + + if (rl_ept == RL_NULL) + { + return RL_ERR_PARAM; + } + + env_lock_mutex(rpmsg_lite_dev->lock); + node = rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, rl_ept->addr); + if (node != RL_NULL) + { + remove_from_list((struct llist **)&rpmsg_lite_dev->rl_endpoints, node); + env_unlock_mutex(rpmsg_lite_dev->lock); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(node); + env_free_memory(rl_ept); +#endif + return RL_SUCCESS; + } + else + { + env_unlock_mutex(rpmsg_lite_dev->lock); + return RL_ERR_PARAM; + } +} + +/****************************************** + +mmmmmmm m m mm mmmmm mmmmm + # # # ## # "# # + # ## # # #mmm#" # + # m""m #mm# # # + # m" "m # # # mm#mm + +*******************************************/ + +uint32_t rpmsg_lite_is_link_up(struct rpmsg_lite_instance *rpmsg_lite_dev) +{ + if (rpmsg_lite_dev == RL_NULL) + { + return 0U; + } + + return (RL_TRUE == rpmsg_lite_dev->link_state ? RL_TRUE : RL_FALSE); +} + +void rpmsg_lite_wait_for_link_up(struct rpmsg_lite_instance *rpmsg_lite_dev) +{ + if (rpmsg_lite_dev == RL_NULL) + { + return; + } + + env_wait_for_link_up(&rpmsg_lite_dev->link_state, rpmsg_lite_dev->link_id); +} + +/*! + * @brief + * Internal function to format a RPMsg compatible + * message and sends it + * + * @param rpmsg_lite_dev RPMsg Lite instance + * @param src Local endpoint address + * @param dst Remote endpoint address + * @param data Payload buffer + * @param size Size of payload, in bytes + * @param flags Value of flags field + * @param timeout Timeout in ms, 0 if nonblocking + * + * @return Status of function execution, RL_SUCCESS on success + * + */ +static int32_t rpmsg_lite_format_message(struct rpmsg_lite_instance *rpmsg_lite_dev, + uint32_t src, + uint32_t dst, + char *data, + uint32_t size, + int32_t flags, + uint32_t timeout) +{ + struct rpmsg_std_msg *rpmsg_msg; + void *buffer; + uint16_t idx; + uint32_t tick_count = 0U; + uint32_t buff_len; + + if (rpmsg_lite_dev == RL_NULL) + { + return RL_ERR_PARAM; + } + + if (data == RL_NULL) + { + return RL_ERR_PARAM; + } + + if (rpmsg_lite_dev->link_state != RL_TRUE) + { + return RL_NOT_READY; + } + + /* Lock the device to enable exclusive access to virtqueues */ + env_lock_mutex(rpmsg_lite_dev->lock); + /* Get rpmsg buffer for sending message. */ + buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, &buff_len, &idx); + env_unlock_mutex(rpmsg_lite_dev->lock); + + if ((buffer == RL_NULL) && (timeout == RL_FALSE)) + { + return RL_ERR_NO_MEM; + } + + while (buffer == RL_NULL) + { + env_sleep_msec(RL_MS_PER_INTERVAL); + env_lock_mutex(rpmsg_lite_dev->lock); + buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, &buff_len, &idx); + env_unlock_mutex(rpmsg_lite_dev->lock); + tick_count += (uint32_t)RL_MS_PER_INTERVAL; + if ((tick_count >= timeout) && (buffer == RL_NULL)) + { + return RL_ERR_NO_MEM; + } + } + + rpmsg_msg = (struct rpmsg_std_msg *)buffer; + + /* Initialize RPMSG header. */ + rpmsg_msg->hdr.dst = dst; + rpmsg_msg->hdr.src = src; + rpmsg_msg->hdr.len = (uint16_t)size; + rpmsg_msg->hdr.flags = (uint16_t)flags; + /* Copy data to rpmsg buffer. */ + env_memcpy(rpmsg_msg->data, data, size); + + env_lock_mutex(rpmsg_lite_dev->lock); + /* Enqueue buffer on virtqueue. */ + rpmsg_lite_dev->vq_ops->vq_tx(rpmsg_lite_dev->tvq, buffer, buff_len, idx); + /* Let the other side know that there is a job to process. */ + virtqueue_kick(rpmsg_lite_dev->tvq); + env_unlock_mutex(rpmsg_lite_dev->lock); + + return RL_SUCCESS; +} + +int32_t rpmsg_lite_send(struct rpmsg_lite_instance *rpmsg_lite_dev, + struct rpmsg_lite_endpoint *ept, + uint32_t dst, + char *data, + uint32_t size, + uint32_t timeout) +{ + if (ept == RL_NULL) + { + return RL_ERR_PARAM; + } + + // FIXME : may be just copy the data size equal to buffer length and Tx it. +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE(rpmsg_lite_dev->link_id)) +#else + if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE) +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + { + return RL_ERR_BUFF_SIZE; + } + + return rpmsg_lite_format_message(rpmsg_lite_dev, ept->addr, dst, data, size, RL_NO_FLAGS, timeout); +} + +#if defined(RL_API_HAS_ZEROCOPY) && (RL_API_HAS_ZEROCOPY == 1) + +void *rpmsg_lite_alloc_tx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t *size, uint32_t timeout) +{ + struct rpmsg_std_msg *rpmsg_msg; + void *buffer; + uint16_t idx; + uint32_t tick_count = 0U; + + if (size == RL_NULL) + { + return RL_NULL; + } + + if (rpmsg_lite_dev->link_state != RL_TRUE) + { + *size = 0; + return RL_NULL; + } + + /* Lock the device to enable exclusive access to virtqueues */ + env_lock_mutex(rpmsg_lite_dev->lock); + /* Get rpmsg buffer for sending message. */ + buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, size, &idx); + env_unlock_mutex(rpmsg_lite_dev->lock); + + if ((buffer == RL_NULL) && (timeout == RL_FALSE)) + { + *size = 0; + return RL_NULL; + } + + while (buffer == RL_NULL) + { + env_sleep_msec(RL_MS_PER_INTERVAL); + env_lock_mutex(rpmsg_lite_dev->lock); + buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, size, &idx); + env_unlock_mutex(rpmsg_lite_dev->lock); + tick_count += (uint32_t)RL_MS_PER_INTERVAL; + if ((tick_count >= timeout) && (buffer == RL_NULL)) + { + *size = 0; + return RL_NULL; + } + } + + rpmsg_msg = (struct rpmsg_std_msg *)buffer; + + /* keep idx and totlen information for nocopy tx function */ + rpmsg_msg->hdr.reserved.idx = idx; + + /* return the maximum payload size */ + *size -= sizeof(struct rpmsg_std_hdr); + + return rpmsg_msg->data; +} + +int32_t rpmsg_lite_send_nocopy(struct rpmsg_lite_instance *rpmsg_lite_dev, + struct rpmsg_lite_endpoint *ept, + uint32_t dst, + void *data, + uint32_t size) +{ + struct rpmsg_std_msg *rpmsg_msg; + uint32_t src; + + if ((ept == RL_NULL) || (data == RL_NULL)) + { + return RL_ERR_PARAM; + } + +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE(rpmsg_lite_dev->link_id)) +#else + if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE) +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + { + return RL_ERR_BUFF_SIZE; + } + + if (rpmsg_lite_dev->link_state != RL_TRUE) + { + return RL_NOT_READY; + } + + src = ept->addr; + +#if defined(RL_DEBUG_CHECK_BUFFERS) && (RL_DEBUG_CHECK_BUFFERS == 1) + RL_ASSERT( + /* master check */ + ((rpmsg_lite_dev->vq_ops == &master_vq_ops) && + (data >= (void *)(rpmsg_lite_dev->sh_mem_base + + (rpmsg_lite_dev->rvq->vq_nentries * rpmsg_lite_dev->rvq->vq_ring.desc->len))) && + (data <= (void *)(rpmsg_lite_dev->sh_mem_base + + (2 * rpmsg_lite_dev->rvq->vq_nentries * rpmsg_lite_dev->rvq->vq_ring.desc->len)))) || + + /* remote check */ + ((rpmsg_lite_dev->vq_ops == &remote_vq_ops) && (data >= (void *)rpmsg_lite_dev->sh_mem_base) && + (data <= (void *)(rpmsg_lite_dev->sh_mem_base + + (rpmsg_lite_dev->rvq->vq_nentries * rpmsg_lite_dev->rvq->vq_ring.desc->len))))) +#endif + + rpmsg_msg = RPMSG_STD_MSG_FROM_BUF(data); + + /* Initialize RPMSG header. */ + rpmsg_msg->hdr.dst = dst; + rpmsg_msg->hdr.src = src; + rpmsg_msg->hdr.len = (uint16_t)size; + rpmsg_msg->hdr.flags = (uint16_t)RL_NO_FLAGS; + + env_lock_mutex(rpmsg_lite_dev->lock); + /* Enqueue buffer on virtqueue. */ + rpmsg_lite_dev->vq_ops->vq_tx( + rpmsg_lite_dev->tvq, (void *)rpmsg_msg, + (uint32_t)virtqueue_get_buffer_length(rpmsg_lite_dev->tvq, rpmsg_msg->hdr.reserved.idx), + rpmsg_msg->hdr.reserved.idx); + /* Let the other side know that there is a job to process. */ + virtqueue_kick(rpmsg_lite_dev->tvq); + env_unlock_mutex(rpmsg_lite_dev->lock); + + return RL_SUCCESS; +} + +/****************************************** + + mmmmm m m mm mmmmm mmmmm + # "# # # ## # "# # + #mmmm" ## # # #mmm#" # + # "m m""m #mm# # # + # " m" "m # # # mm#mm + + *******************************************/ + +int32_t rpmsg_lite_release_rx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, void *rxbuf) +{ + struct rpmsg_std_msg *rpmsg_msg; + + if (rpmsg_lite_dev == RL_NULL) + { + return RL_ERR_PARAM; + } + if (rxbuf == RL_NULL) + { + return RL_ERR_PARAM; + } + +#if defined(RL_DEBUG_CHECK_BUFFERS) && (RL_DEBUG_CHECK_BUFFERS == 1) + RL_ASSERT( + /* master check */ + ((rpmsg_lite_dev->vq_ops == &master_vq_ops) && (rxbuf >= (void *)rpmsg_lite_dev->sh_mem_base) && + (rxbuf <= (void *)(rpmsg_lite_dev->sh_mem_base + + (rpmsg_lite_dev->rvq->vq_nentries * rpmsg_lite_dev->rvq->vq_ring.desc->len)))) || + + /* remote check */ + ((rpmsg_lite_dev->vq_ops == &remote_vq_ops) && + (rxbuf >= (void *)(rpmsg_lite_dev->sh_mem_base + + (rpmsg_lite_dev->rvq->vq_nentries * rpmsg_lite_dev->rvq->vq_ring.desc->len))) && + (rxbuf <= (void *)(rpmsg_lite_dev->sh_mem_base + + (2 * rpmsg_lite_dev->rvq->vq_nentries * rpmsg_lite_dev->rvq->vq_ring.desc->len))))) +#endif + + rpmsg_msg = RPMSG_STD_MSG_FROM_BUF(rxbuf); + + env_lock_mutex(rpmsg_lite_dev->lock); + + /* Return used buffer, with total length (header length + buffer size). */ + rpmsg_lite_dev->vq_ops->vq_rx_free( + rpmsg_lite_dev->rvq, rpmsg_msg, + (uint32_t)virtqueue_get_buffer_length(rpmsg_lite_dev->rvq, rpmsg_msg->hdr.reserved.idx), + rpmsg_msg->hdr.reserved.idx); + +#if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1) + /* Let the remote device know that a buffer has been freed */ + virtqueue_kick(rpmsg_lite_dev->rvq); +#endif + + env_unlock_mutex(rpmsg_lite_dev->lock); + + return RL_SUCCESS; +} + +#endif /* RL_API_HAS_ZEROCOPY */ + +/****************************** + + mmmmm mm m mmmmm mmmmmmm + # #"m # # # + # # #m # # # + # # # # # # + mm#mm # ## mm#mm # + + *****************************/ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr, + size_t shmem_length, + uint32_t link_id, + uint32_t init_flags, + struct rpmsg_lite_instance *static_context) +#elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +struct rpmsg_lite_instance *rpmsg_lite_master_init( + void *shmem_addr, size_t shmem_length, uint32_t link_id, uint32_t init_flags, void *env_cfg) +#else +struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr, + size_t shmem_length, + uint32_t link_id, + uint32_t init_flags) +#endif +{ + int32_t status; + void (*callback[2])(struct virtqueue * vq); + const char *vq_names[2]; + struct vring_alloc_info ring_info; + struct virtqueue *vqs[2]; + void *buffer; + uint32_t idx, j; + struct rpmsg_lite_instance *rpmsg_lite_dev = RL_NULL; + + if (link_id > RL_PLATFORM_HIGHEST_LINK_ID) + { + return RL_NULL; + } + + if (shmem_addr == RL_NULL) + { + return RL_NULL; + } + +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + /* Get the custom shmem configuration defined per each rpmsg_lite instance + (i.e. per each link id) from the platform layer */ + rpmsg_platform_shmem_config_t shmem_config = {0}; + if (RL_SUCCESS != platform_get_custom_shmem_config(link_id, &shmem_config)) + { + return RL_NULL; + } + + /* shmem_config.buffer_count must be power of two (2, 4, ...) */ + if (0U != (shmem_config.buffer_count & (shmem_config.buffer_count - 1U))) + { + return RL_NULL; + } + + /* buffer size must be power of two (256, 512, ...) */ + if (0U != ((shmem_config.buffer_payload_size + 16UL) & ((shmem_config.buffer_payload_size + 16UL) - 1U))) + { + return RL_NULL; + } + + if ((2U * (uint32_t)shmem_config.buffer_count) > + ((RL_WORD_ALIGN_DOWN(shmem_length - 2U * shmem_config.vring_size)) / + (uint32_t)(shmem_config.buffer_payload_size + 16UL))) + { + return RL_NULL; + } +#else + if ((2U * (uint32_t)RL_BUFFER_COUNT) > + ((RL_WORD_ALIGN_DOWN(shmem_length - (uint32_t)RL_VRING_OVERHEAD)) / (uint32_t)RL_BUFFER_SIZE)) + { + return RL_NULL; + } + +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (static_context == RL_NULL) + { + return RL_NULL; + } + rpmsg_lite_dev = static_context; +#else + rpmsg_lite_dev = env_allocate_memory(sizeof(struct rpmsg_lite_instance)); + if (rpmsg_lite_dev == RL_NULL) + { + return RL_NULL; + } +#endif + + env_memset(rpmsg_lite_dev, 0, sizeof(struct rpmsg_lite_instance)); +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + status = env_init(&rpmsg_lite_dev->env, env_cfg); +#else + status = env_init(); +#endif + if (status != RL_SUCCESS) + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(rpmsg_lite_dev); +#endif + return RL_NULL; + } + + rpmsg_lite_dev->link_id = link_id; + + /* + * Since device is RPMSG Remote so we need to manage the + * shared buffers. Create shared memory pool to handle buffers. + */ +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + rpmsg_lite_dev->sh_mem_base = (char *)RL_WORD_ALIGN_UP((uint32_t)(char *)shmem_addr + 2U * shmem_config.vring_size); + rpmsg_lite_dev->sh_mem_remaining = (RL_WORD_ALIGN_DOWN(shmem_length - 2U * shmem_config.vring_size)) / + (uint32_t)(shmem_config.buffer_payload_size + 16UL); +#else + rpmsg_lite_dev->sh_mem_base = (char *)RL_WORD_ALIGN_UP((uint32_t)(char *)shmem_addr + (uint32_t)RL_VRING_OVERHEAD); + rpmsg_lite_dev->sh_mem_remaining = + (RL_WORD_ALIGN_DOWN(shmem_length - (uint32_t)RL_VRING_OVERHEAD)) / (uint32_t)RL_BUFFER_SIZE; +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + rpmsg_lite_dev->sh_mem_total = rpmsg_lite_dev->sh_mem_remaining; + + /* Initialize names and callbacks*/ + vq_names[0] = "rx_vq"; + vq_names[1] = "tx_vq"; + callback[0] = rpmsg_lite_rx_callback; + callback[1] = rpmsg_lite_tx_callback; + rpmsg_lite_dev->vq_ops = &master_vq_ops; + + /* Create virtqueue for each vring. */ + for (idx = 0U; idx < 2U; idx++) + { +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + ring_info.phy_addr = + (void *)(char *)((uint32_t)(char *)shmem_addr + (uint32_t)((idx == 0U) ? (0U) : (shmem_config.vring_size))); + ring_info.align = shmem_config.vring_align; + ring_info.num_descs = shmem_config.buffer_count; +#else + ring_info.phy_addr = + (void *)(char *)((uint32_t)(char *)shmem_addr + (uint32_t)((idx == 0U) ? (0U) : (VRING_SIZE))); + ring_info.align = VRING_ALIGN; + ring_info.num_descs = RL_BUFFER_COUNT; +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + + env_memset((void *)ring_info.phy_addr, 0x00, (uint32_t)vring_size(ring_info.num_descs, ring_info.align)); + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + status = virtqueue_create_static((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info, + callback[idx], virtqueue_notify, &vqs[idx], + (struct vq_static_context *)&rpmsg_lite_dev->vq_ctxt[idx]); +#else + status = virtqueue_create((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info, callback[idx], + virtqueue_notify, &vqs[idx]); +#endif /* RL_USE_STATIC_API */ + + if (status == RL_SUCCESS) + { + /* Initialize vring control block in virtqueue. */ + vq_ring_init(vqs[idx]); + + /* Disable callbacks - will be enabled by the application + * once initialization is completed. + */ + virtqueue_disable_cb(vqs[idx]); + } + else + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(rpmsg_lite_dev); +#endif + return RL_NULL; + } + + /* virtqueue has reference to the RPMsg Lite instance */ + vqs[idx]->priv = (void *)rpmsg_lite_dev; +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + vqs[idx]->env = rpmsg_lite_dev->env; +#endif + } + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1, &rpmsg_lite_dev->lock_static_ctxt); +#else + status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1); +#endif + if (status != RL_SUCCESS) + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(rpmsg_lite_dev); +#endif + return RL_NULL; + } + + // FIXME - a better way to handle this , tx for master is rx for remote and vice versa. + rpmsg_lite_dev->tvq = vqs[1]; + rpmsg_lite_dev->rvq = vqs[0]; + + for (j = 0U; j < 2U; j++) + { + for (idx = 0U; ((idx < vqs[j]->vq_nentries) && (idx < rpmsg_lite_dev->sh_mem_total)); idx++) + { + /* Initialize TX virtqueue buffers for remote device */ + buffer = (rpmsg_lite_dev->sh_mem_remaining > 0U) ? + (rpmsg_lite_dev->sh_mem_base + +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + (uint32_t)(shmem_config.buffer_payload_size + 16UL) * + (rpmsg_lite_dev->sh_mem_total - rpmsg_lite_dev->sh_mem_remaining--)) : +#else + (uint32_t)RL_BUFFER_SIZE * + (rpmsg_lite_dev->sh_mem_total - rpmsg_lite_dev->sh_mem_remaining--)) : +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + (RL_NULL); + + RL_ASSERT(buffer != RL_NULL); + +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + env_memset(buffer, 0x00, (uint32_t)(shmem_config.buffer_payload_size + 16UL)); +#else + env_memset(buffer, 0x00, (uint32_t)RL_BUFFER_SIZE); +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + if (vqs[j] == rpmsg_lite_dev->rvq) + { +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + status = + virtqueue_fill_avail_buffers(vqs[j], buffer, (uint32_t)(shmem_config.buffer_payload_size + 16UL)); +#else + status = virtqueue_fill_avail_buffers(vqs[j], buffer, (uint32_t)RL_BUFFER_SIZE); +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + } + else if (vqs[j] == rpmsg_lite_dev->tvq) + { +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + status = + virtqueue_fill_used_buffers(vqs[j], buffer, (uint32_t)(shmem_config.buffer_payload_size + 16UL)); +#else + status = virtqueue_fill_used_buffers(vqs[j], buffer, (uint32_t)RL_BUFFER_SIZE); +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + } + else + { + /* should not happen */ + } + + if (status != RL_SUCCESS) + { + /* Clean up! */ + env_delete_mutex(rpmsg_lite_dev->lock); +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(rpmsg_lite_dev); +#endif + return RL_NULL; + } + } + } + + /* Install ISRs */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq); + env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq); + env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index); + env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index); + rpmsg_lite_dev->link_state = 1U; + env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index); + env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index); +#else + (void)platform_init_interrupt(rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq); + (void)platform_init_interrupt(rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq); + env_disable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index); + env_disable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index); + rpmsg_lite_dev->link_state = 1U; + env_enable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index); + env_enable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index); +#endif + + /* + * Let the remote device know that Master is ready for + * communication. + */ + virtqueue_kick(rpmsg_lite_dev->rvq); + + return rpmsg_lite_dev; +} + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr, + uint32_t link_id, + uint32_t init_flags, + struct rpmsg_lite_instance *static_context) +#elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) +struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr, + uint32_t link_id, + uint32_t init_flags, + void *env_cfg) +#else +struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr, uint32_t link_id, uint32_t init_flags) +#endif +{ + int32_t status; + void (*callback[2])(struct virtqueue * vq); + const char *vq_names[2]; + struct vring_alloc_info ring_info; + struct virtqueue *vqs[2]; + uint32_t idx; + struct rpmsg_lite_instance *rpmsg_lite_dev = RL_NULL; + if (link_id > RL_PLATFORM_HIGHEST_LINK_ID) + { + return RL_NULL; + } + + if (shmem_addr == RL_NULL) + { + return RL_NULL; + } + +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + /* Get the custom shmem configuration defined per each rpmsg_lite instance + (i.e. per each link id) from the platform layer */ + rpmsg_platform_shmem_config_t shmem_config = {0}; + if (RL_SUCCESS != platform_get_custom_shmem_config(link_id, &shmem_config)) + { + return RL_NULL; + } + + /* shmem_config.buffer_count must be power of two (2, 4, ...) */ + if (0U != (shmem_config.buffer_count & (shmem_config.buffer_count - 1U))) + { + return RL_NULL; + } + + /* buffer size must be power of two (256, 512, ...) */ + if (0U != ((shmem_config.buffer_payload_size + 16UL) & ((shmem_config.buffer_payload_size + 16UL) - 1U))) + { + return RL_NULL; + } +#endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */ + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (static_context == RL_NULL) + { + return RL_NULL; + } + rpmsg_lite_dev = static_context; +#else + rpmsg_lite_dev = env_allocate_memory(sizeof(struct rpmsg_lite_instance)); + if (rpmsg_lite_dev == RL_NULL) + { + return RL_NULL; + } +#endif + + env_memset(rpmsg_lite_dev, 0, sizeof(struct rpmsg_lite_instance)); +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + status = env_init(&rpmsg_lite_dev->env, env_cfg); +#else + status = env_init(); +#endif + + if (status != RL_SUCCESS) + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(rpmsg_lite_dev); +#endif + return RL_NULL; + } + + rpmsg_lite_dev->link_id = link_id; + + vq_names[0] = "tx_vq"; /* swapped in case of remote */ + vq_names[1] = "rx_vq"; + callback[0] = rpmsg_lite_tx_callback; + callback[1] = rpmsg_lite_rx_callback; + rpmsg_lite_dev->vq_ops = &remote_vq_ops; +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + rpmsg_lite_dev->sh_mem_base = (char *)RL_WORD_ALIGN_UP((uint32_t)(char *)shmem_addr + 2U * shmem_config.vring_size); +#else + rpmsg_lite_dev->sh_mem_base = (char *)RL_WORD_ALIGN_UP((uint32_t)(char *)shmem_addr + (uint32_t)RL_VRING_OVERHEAD); +#endif /* defined(RL_ALLOW_CUSTOM_VRING_CONFIG) && (RL_ALLOW_CUSTOM_VRING_CONFIG == 1) */ + + /* Create virtqueue for each vring. */ + for (idx = 0U; idx < 2U; idx++) + { +#if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) + ring_info.phy_addr = + (void *)(char *)((uint32_t)(char *)shmem_addr + (uint32_t)((idx == 0U) ? (0U) : (shmem_config.vring_size))); + ring_info.align = shmem_config.vring_align; + ring_info.num_descs = shmem_config.buffer_count; +#else + ring_info.phy_addr = + (void *)(char *)((uint32_t)(char *)shmem_addr + (uint32_t)((idx == 0U) ? (0U) : (VRING_SIZE))); + ring_info.align = VRING_ALIGN; + ring_info.num_descs = RL_BUFFER_COUNT; +#endif /* defined(RL_ALLOW_CUSTOM_VRING_CONFIG) && (RL_ALLOW_CUSTOM_VRING_CONFIG == 1) */ + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + status = virtqueue_create_static((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info, + callback[idx], virtqueue_notify, &vqs[idx], + (struct vq_static_context *)&rpmsg_lite_dev->vq_ctxt[idx]); +#else + status = virtqueue_create((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info, callback[idx], + virtqueue_notify, &vqs[idx]); +#endif /* RL_USE_STATIC_API */ + + if (status != RL_SUCCESS) + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(rpmsg_lite_dev); +#endif + return RL_NULL; + } + + /* virtqueue has reference to the RPMsg Lite instance */ + vqs[idx]->priv = (void *)rpmsg_lite_dev; +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + vqs[idx]->env = rpmsg_lite_dev->env; +#endif + } + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1, &rpmsg_lite_dev->lock_static_ctxt); +#else + status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1); +#endif + if (status != RL_SUCCESS) + { +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(rpmsg_lite_dev); +#endif + return RL_NULL; + } + + // FIXME - a better way to handle this , tx for master is rx for remote and vice versa. + rpmsg_lite_dev->tvq = vqs[0]; + rpmsg_lite_dev->rvq = vqs[1]; + + /* Install ISRs */ +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq); + env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq); + env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index); + env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index); + rpmsg_lite_dev->link_state = 0; + env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index); + env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index); +#else + (void)platform_init_interrupt(rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq); + (void)platform_init_interrupt(rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq); + env_disable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index); + env_disable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index); + rpmsg_lite_dev->link_state = 0; + env_enable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index); + env_enable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index); +#endif + + return rpmsg_lite_dev; +} + +/******************************************* + + mmmm mmmmmm mmmmm mm m mmmmm mmmmmmm + # "m # # #"m # # # + # # #mmmmm # # #m # # # + # # # # # # # # # + #mmm" #mmmmm mm#mm # ## mm#mm # + +********************************************/ + +int32_t rpmsg_lite_deinit(struct rpmsg_lite_instance *rpmsg_lite_dev) +{ + if (rpmsg_lite_dev == RL_NULL) + { + return RL_ERR_PARAM; + } + + if (!((rpmsg_lite_dev->rvq != RL_NULL) && (rpmsg_lite_dev->tvq != RL_NULL) && (rpmsg_lite_dev->lock != RL_NULL))) + { + /* ERROR - trying to initialize uninitialized RPMSG? */ + RL_ASSERT((rpmsg_lite_dev->rvq != RL_NULL) && (rpmsg_lite_dev->tvq != RL_NULL) && + (rpmsg_lite_dev->lock != RL_NULL)); + return RL_ERR_PARAM; + } +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index); + env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index); + rpmsg_lite_dev->link_state = 0; + env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index); + env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index); + + env_deinit_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index); + env_deinit_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index); +#else + env_disable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index); + env_disable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index); + rpmsg_lite_dev->link_state = 0; + env_enable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index); + env_enable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index); + + (void)platform_deinit_interrupt(rpmsg_lite_dev->rvq->vq_queue_index); + (void)platform_deinit_interrupt(rpmsg_lite_dev->tvq->vq_queue_index); +#endif + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + virtqueue_free_static(rpmsg_lite_dev->rvq); + virtqueue_free_static(rpmsg_lite_dev->tvq); +#else + virtqueue_free(rpmsg_lite_dev->rvq); + virtqueue_free(rpmsg_lite_dev->tvq); +#endif /* RL_USE_STATIC_API */ + + env_delete_mutex(rpmsg_lite_dev->lock); +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + (void)env_deinit(rpmsg_lite_dev->env); +#else + (void)env_deinit(); +#endif + +#if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)) + env_free_memory(rpmsg_lite_dev); +#endif /* RL_USE_STATIC_API */ + + return RL_SUCCESS; +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_ns.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_ns.c new file mode 100755 index 00000000..143919aa --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_ns.c @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rpmsg_lite.h" +#include "rpmsg_ns.h" +#include + +#define RL_NS_NAME_SIZE (32) + +/*! + * struct rpmsg_ns_msg - dynamic name service announcement message + * @name: name of remote service that is published + * @addr: address of remote service that is published + * @flags: indicates whether service is created or destroyed + * + * This message is sent across to publish a new service, or announce + * about its removal. When we receive these messages, an appropriate + * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe() + * or ->remove() handler of the appropriate rpmsg driver will be invoked + * (if/as-soon-as one is registered). + */ +RL_PACKED_BEGIN +struct rpmsg_ns_msg +{ + char name[RL_NS_NAME_SIZE]; + uint32_t addr; + uint32_t flags; +} RL_PACKED_END; + +/*! + * @brief + * Nameservice callback, called in interrupt context + * + * @param payload Pointer to the buffer containing received data + * @param payload_len Size of data received, in bytes + * @param src Pointer to address of the endpoint from which data is received + * @param priv Private data provided during endpoint creation + * + * @return RL_RELEASE, message is always freed + * + */ +static int32_t rpmsg_ns_rx_cb(void *payload, uint32_t payload_len, uint32_t src, void *priv) +{ + struct rpmsg_ns_msg *ns_msg_ptr = payload; + struct rpmsg_ns_callback_data *cb_ctxt = priv; + RL_ASSERT(priv != RL_NULL); + RL_ASSERT(cb_ctxt->cb != RL_NULL); + + /* Drop likely bad messages received at nameservice address */ + if (payload_len == sizeof(struct rpmsg_ns_msg)) + { + cb_ctxt->cb(ns_msg_ptr->addr, ns_msg_ptr->name, ns_msg_ptr->flags, cb_ctxt->user_data); + } + + return RL_RELEASE; +} + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +rpmsg_ns_handle rpmsg_ns_bind(struct rpmsg_lite_instance *rpmsg_lite_dev, + rpmsg_ns_new_ept_cb app_cb, + void *user_data, + rpmsg_ns_static_context *ns_ept_ctxt) +#else +rpmsg_ns_handle rpmsg_ns_bind(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_ns_new_ept_cb app_cb, void *user_data) +#endif /* RL_USE_STATIC_API */ +{ + struct rpmsg_ns_context *ns_ctxt; + + if (app_cb == RL_NULL) + { + return RL_NULL; + } + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + if (ns_ept_ctxt == RL_NULL) + { + return RL_NULL; + } + + ns_ctxt = &ns_ept_ctxt->ns_ctxt; + + /* Set-up the nameservice callback context */ + ns_ept_ctxt->cb_ctxt.user_data = user_data; + ns_ept_ctxt->cb_ctxt.cb = app_cb; + + ns_ctxt->cb_ctxt = &ns_ept_ctxt->cb_ctxt; + + ns_ctxt->ept = rpmsg_lite_create_ept(rpmsg_lite_dev, RL_NS_EPT_ADDR, rpmsg_ns_rx_cb, (void *)ns_ctxt->cb_ctxt, + &ns_ept_ctxt->ept_ctxt); +#else + { + struct rpmsg_ns_callback_data *cb_ctxt; + + cb_ctxt = env_allocate_memory(sizeof(struct rpmsg_ns_callback_data)); + if (cb_ctxt == RL_NULL) + { + return RL_NULL; + } + ns_ctxt = env_allocate_memory(sizeof(struct rpmsg_ns_context)); + if (ns_ctxt == RL_NULL) + { + env_free_memory(cb_ctxt); + return RL_NULL; + } + + /* Set-up the nameservice callback context */ + cb_ctxt->user_data = user_data; + cb_ctxt->cb = app_cb; + + ns_ctxt->cb_ctxt = cb_ctxt; + + ns_ctxt->ept = rpmsg_lite_create_ept(rpmsg_lite_dev, RL_NS_EPT_ADDR, rpmsg_ns_rx_cb, (void *)ns_ctxt->cb_ctxt); + } +#endif /* RL_USE_STATIC_API */ + + return (rpmsg_ns_handle)ns_ctxt; +} + +int32_t rpmsg_ns_unbind(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_ns_handle handle) +{ + struct rpmsg_ns_context *ns_ctxt = (struct rpmsg_ns_context *)handle; + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + return rpmsg_lite_destroy_ept(rpmsg_lite_dev, ns_ctxt->ept); +#else + { + int32_t retval; + + retval = rpmsg_lite_destroy_ept(rpmsg_lite_dev, ns_ctxt->ept); + env_free_memory(ns_ctxt->cb_ctxt); + env_free_memory(ns_ctxt); + return retval; + } +#endif +} + +int32_t rpmsg_ns_announce(struct rpmsg_lite_instance *rpmsg_lite_dev, + struct rpmsg_lite_endpoint *new_ept, + const char *ept_name, + uint32_t flags) +{ + struct rpmsg_ns_msg ns_msg; + + if (ept_name == RL_NULL) + { + return RL_ERR_PARAM; + } + + if (new_ept == RL_NULL) + { + return RL_ERR_PARAM; + } + + env_strncpy(ns_msg.name, ept_name, RL_NS_NAME_SIZE); + ns_msg.flags = flags; + ns_msg.addr = new_ept->addr; + + return rpmsg_lite_send(rpmsg_lite_dev, new_ept, RL_NS_EPT_ADDR, (char *)&ns_msg, sizeof(struct rpmsg_ns_msg), + RL_BLOCK); +} diff --git a/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_queue.c b/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_queue.c new file mode 100755 index 00000000..0fdaf1d0 --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_queue.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * Copyright (c) 2015 Xilinx, Inc. + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2021 NXP + * Copyright 2021 ACRIOS Systems s.r.o. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "rpmsg_lite.h" +#include "rpmsg_queue.h" + +int32_t rpmsg_queue_rx_cb(void *payload, uint32_t payload_len, uint32_t src, void *priv) +{ + rpmsg_queue_rx_cb_data_t msg; + + RL_ASSERT(priv != RL_NULL); + + msg.data = payload; + msg.len = payload_len; + msg.src = src; + + /* if message is successfully added into queue then hold rpmsg buffer */ + if (0 != env_put_queue(priv, &msg, 0)) + { + /* hold the rx buffer */ + return RL_HOLD; + } + + return RL_RELEASE; +} + +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) +rpmsg_queue_handle rpmsg_queue_create(struct rpmsg_lite_instance *rpmsg_lite_dev, + uint8_t *queue_storage, + rpmsg_static_queue_ctxt *queue_ctxt) +#else +rpmsg_queue_handle rpmsg_queue_create(struct rpmsg_lite_instance *rpmsg_lite_dev) +#endif +{ + int32_t status; + void *q = RL_NULL; + + if (rpmsg_lite_dev == RL_NULL) + { + return RL_NULL; + } + + /* create message queue for channel default endpoint */ +#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1) + status = env_create_queue(&q, (int32_t)rpmsg_lite_dev->rvq->vq_nentries, (int32_t)sizeof(rpmsg_queue_rx_cb_data_t), + queue_storage, queue_ctxt); +#else + status = env_create_queue(&q, (int32_t)rpmsg_lite_dev->rvq->vq_nentries, (int32_t)sizeof(rpmsg_queue_rx_cb_data_t)); +#endif + if ((status != 0) || (q == RL_NULL)) + { + return RL_NULL; + } + + return ((rpmsg_queue_handle)q); +} + +int32_t rpmsg_queue_destroy(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_queue_handle q) +{ + if (rpmsg_lite_dev == RL_NULL) + { + return RL_ERR_PARAM; + } + + if (q == RL_NULL) + { + return RL_ERR_PARAM; + } + env_delete_queue((void *)q); + return RL_SUCCESS; +} + +int32_t rpmsg_queue_recv(struct rpmsg_lite_instance *rpmsg_lite_dev, + rpmsg_queue_handle q, + uint32_t *src, + char *data, + uint32_t maxlen, + uint32_t *len, + uint32_t timeout) +{ + rpmsg_queue_rx_cb_data_t msg = {0}; + int32_t retval = RL_SUCCESS; + + if (rpmsg_lite_dev == RL_NULL) + { + return RL_ERR_PARAM; + } + if (q == RL_NULL) + { + return RL_ERR_PARAM; + } + if (data == RL_NULL) + { + return RL_ERR_PARAM; + } + + /* Get an element out of the message queue for the selected endpoint */ + if (0 != env_get_queue((void *)q, &msg, timeout)) + { + if (src != RL_NULL) + { + *src = msg.src; + } + if (len != RL_NULL) + { + *len = msg.len; + } + + if (maxlen >= msg.len) + { + env_memcpy(data, msg.data, msg.len); + } + else + { + retval = RL_ERR_BUFF_SIZE; + } + + /* Release used buffer. */ + return ((RL_SUCCESS == rpmsg_lite_release_rx_buffer(rpmsg_lite_dev, msg.data)) ? retval : RL_ERR_PARAM); + } + else + { + return RL_ERR_NO_BUFF; /* failed */ + } +} + +int32_t rpmsg_queue_recv_nocopy(struct rpmsg_lite_instance *rpmsg_lite_dev, + rpmsg_queue_handle q, + uint32_t *src, + char **data, + uint32_t *len, + uint32_t timeout) +{ + rpmsg_queue_rx_cb_data_t msg = {0}; + + if (rpmsg_lite_dev == RL_NULL) + { + return RL_ERR_PARAM; + } + if (data == RL_NULL) + { + return RL_ERR_PARAM; + } + if (q == RL_NULL) + { + return RL_ERR_PARAM; + } + + /* Get an element out of the message queue for the selected endpoint */ + if (0 != env_get_queue((void *)q, &msg, timeout)) + { + if (src != RL_NULL) + { + *src = msg.src; + } + if (len != RL_NULL) + { + *len = msg.len; + } + + *data = msg.data; + + return RL_SUCCESS; /* success */ + } + + return RL_ERR_NO_BUFF; /* failed */ +} + +int32_t rpmsg_queue_nocopy_free(struct rpmsg_lite_instance *rpmsg_lite_dev, void *data) +{ + if (rpmsg_lite_dev == RL_NULL) + { + return RL_ERR_PARAM; + } + if (data == RL_NULL) + { + return RL_ERR_PARAM; + } + + /* Release used buffer. */ + return ((RL_SUCCESS == rpmsg_lite_release_rx_buffer(rpmsg_lite_dev, data)) ? RL_SUCCESS : RL_ERR_PARAM); +} + +int32_t rpmsg_queue_get_current_size(rpmsg_queue_handle q) +{ + if (q == RL_NULL) + { + return RL_ERR_PARAM; + } + + /* Return actual queue size. */ + return env_get_current_queue_size((void *)q); +} diff --git a/Middlewares/Third_Party/rpmsg/virtio/virtqueue.c b/Middlewares/Third_Party/rpmsg/virtio/virtqueue.c new file mode 100755 index 00000000..52fe00bc --- /dev/null +++ b/Middlewares/Third_Party/rpmsg/virtio/virtqueue.c @@ -0,0 +1,735 @@ +/*- + * Copyright (c) 2011, Bryan Venteicher + * Copyright (c) 2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * All rights reserved. + * + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rpmsg_env.h" +#include "virtqueue.h" + +/* Prototype for internal functions. */ +static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx); +static void vq_ring_update_used(struct virtqueue *vq, uint16_t head_idx, uint32_t len); +static uint16_t vq_ring_add_buffer( + struct virtqueue *vq, struct vring_desc *desc, uint16_t head_idx, void *buffer, uint32_t length); +static int32_t vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc); +static int32_t vq_ring_must_notify_host(struct virtqueue *vq); +static void vq_ring_notify_host(struct virtqueue *vq); +static uint16_t virtqueue_nused(struct virtqueue *vq); + +/*! + * virtqueue_create - Creates new VirtIO queue + * + * @param id - VirtIO queue ID , must be unique + * @param name - Name of VirtIO queue + * @param ring - Pointer to vring_alloc_info control block + * @param callback - Pointer to callback function, invoked + * when message is available on VirtIO queue + * @param notify - Pointer to notify function, used to notify + * other side that there is job available for it + * @param v_queue - Created VirtIO queue. + * + * @return - Function status + */ +int32_t virtqueue_create(uint16_t id, + const char *name, + struct vring_alloc_info *ring, + void (*callback_fc)(struct virtqueue *vq), + void (*notify_fc)(struct virtqueue *vq), + struct virtqueue **v_queue) +{ + struct virtqueue *vq = VQ_NULL; + volatile int32_t status = VQUEUE_SUCCESS; + uint32_t vq_size = 0U; + + VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM); + VQ_PARAM_CHK(ring->num_descs == 0U, status, ERROR_VQUEUE_INVLD_PARAM); + VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1U), status, ERROR_VRING_ALIGN); + + if (status == VQUEUE_SUCCESS) + { + vq_size = sizeof(struct virtqueue); + vq = (struct virtqueue *)env_allocate_memory(vq_size); + + if (vq == VQ_NULL) + { + return (ERROR_NO_MEM); + } + + env_memset(vq, 0x00, vq_size); + + env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ); + vq->vq_queue_index = id; + vq->vq_alignment = (int32_t)(ring->align); + vq->vq_nentries = ring->num_descs; + vq->callback_fc = callback_fc; + vq->notify_fc = notify_fc; + + // indirect addition is not supported + vq->vq_ring_size = vring_size(ring->num_descs, ring->align); + vq->vq_ring_mem = (void *)ring->phy_addr; + + vring_init(&vq->vq_ring, vq->vq_nentries, vq->vq_ring_mem, (uint32_t)vq->vq_alignment); + + *v_queue = vq; + } + + return (status); +} + +/*! + * virtqueue_create_static - Creates new VirtIO queue - static version + * + * @param id - VirtIO queue ID , must be unique + * @param name - Name of VirtIO queue + * @param ring - Pointer to vring_alloc_info control block + * @param callback - Pointer to callback function, invoked + * when message is available on VirtIO queue + * @param notify - Pointer to notify function, used to notify + * other side that there is job available for it + * @param v_queue - Created VirtIO queue. + * @param vq_ctxt - Statically allocated virtqueue context + * + * @return - Function status + */ +int32_t virtqueue_create_static(uint16_t id, + const char *name, + struct vring_alloc_info *ring, + void (*callback_fc)(struct virtqueue *vq), + void (*notify_fc)(struct virtqueue *vq), + struct virtqueue **v_queue, + struct vq_static_context *vq_ctxt) +{ + struct virtqueue *vq = VQ_NULL; + volatile int32_t status = VQUEUE_SUCCESS; + uint32_t vq_size = 0U; + + VQ_PARAM_CHK(vq_ctxt == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM); + VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM); + VQ_PARAM_CHK(ring->num_descs == 0U, status, ERROR_VQUEUE_INVLD_PARAM); + VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1U), status, ERROR_VRING_ALIGN); + + if (status == VQUEUE_SUCCESS) + { + vq_size = sizeof(struct virtqueue); + vq = &vq_ctxt->vq; + + env_memset(vq, 0x00, vq_size); + + env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ); + vq->vq_queue_index = id; + vq->vq_alignment = (int32_t)(ring->align); + vq->vq_nentries = ring->num_descs; + vq->callback_fc = callback_fc; + vq->notify_fc = notify_fc; + + // indirect addition is not supported + vq->vq_ring_size = vring_size(ring->num_descs, ring->align); + vq->vq_ring_mem = (void *)ring->phy_addr; + + vring_init(&vq->vq_ring, vq->vq_nentries, vq->vq_ring_mem, (uint32_t)vq->vq_alignment); + + *v_queue = vq; + } + + return (status); +} + +/*! + * virtqueue_add_buffer() - Enqueues new buffer in vring for consumption + * by other side. + * + * @param vq - Pointer to VirtIO queue control block. + * @param head_idx - Index of buffer to be added to the avail ring + * + * @return - Function status + */ +int32_t virtqueue_add_buffer(struct virtqueue *vq, uint16_t head_idx) +{ + volatile int32_t status = VQUEUE_SUCCESS; + + VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM); + + VQUEUE_BUSY(vq, avail_write); + + if (status == VQUEUE_SUCCESS) + { + VQ_RING_ASSERT_VALID_IDX(vq, head_idx); + + /* + * Update vring_avail control block fields so that other + * side can get buffer using it. + */ + vq_ring_update_avail(vq, head_idx); + } + + VQUEUE_IDLE(vq, avail_write); + + return (status); +} + +/*! + * virtqueue_fill_avail_buffers - Enqueues single buffer in vring, updates avail + * + * @param vq - Pointer to VirtIO queue control block + * @param buffer - Address of buffer + * @param len - Length of buffer + * + * @return - Function status + */ +int32_t virtqueue_fill_avail_buffers(struct virtqueue *vq, void *buffer, uint32_t len) +{ + struct vring_desc *dp; + uint16_t head_idx; + + volatile int32_t status = VQUEUE_SUCCESS; + + VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM); + + VQUEUE_BUSY(vq, avail_write); + + if (status == VQUEUE_SUCCESS) + { + head_idx = vq->vq_desc_head_idx; + + dp = &vq->vq_ring.desc[head_idx]; +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + dp->addr = env_map_vatopa(vq->env, buffer); +#else + dp->addr = env_map_vatopa(buffer); +#endif + dp->len = len; + dp->flags = VRING_DESC_F_WRITE; + + vq->vq_desc_head_idx++; + + vq_ring_update_avail(vq, head_idx); + } + + VQUEUE_IDLE(vq, avail_write); + + return (status); +} + +/*! + * virtqueue_get_buffer - Returns used buffers from VirtIO queue + * + * @param vq - Pointer to VirtIO queue control block + * @param len - Length of consumed buffer + * @param idx - Index to buffer descriptor pool + * + * @return - Pointer to used buffer + */ +void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx) +{ + struct vring_used_elem *uep; + uint16_t used_idx, desc_idx; + + if ((vq == VQ_NULL) || (vq->vq_used_cons_idx == vq->vq_ring.used->idx)) + { + return (VQ_NULL); + } + VQUEUE_BUSY(vq, used_read); + + used_idx = (uint16_t)(vq->vq_used_cons_idx & ((uint16_t)(vq->vq_nentries - 1U))); + uep = &vq->vq_ring.used->ring[used_idx]; + + env_rmb(); + + desc_idx = (uint16_t)uep->id; + if (len != VQ_NULL) + { + *len = uep->len; + } + + if (idx != VQ_NULL) + { + *idx = desc_idx; + } + + vq->vq_used_cons_idx++; + + VQUEUE_IDLE(vq, used_read); + +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + return env_map_patova(vq->env, ((uint32_t)(vq->vq_ring.desc[desc_idx].addr))); +#else + return env_map_patova((uint32_t)(vq->vq_ring.desc[desc_idx].addr)); +#endif +} + +/*! + * virtqueue_get_buffer_length - Returns size of a buffer + * + * @param vq - Pointer to VirtIO queue control block + * @param idx - Index to buffer descriptor pool + * + * @return - Buffer length + */ +uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx) +{ + return vq->vq_ring.desc[idx].len; +} + +/*! + * virtqueue_free - Frees VirtIO queue resources + * + * @param vq - Pointer to VirtIO queue control block + * + */ +void virtqueue_free(struct virtqueue *vq) +{ + if (vq != VQ_NULL) + { + if (vq->vq_ring_mem != VQ_NULL) + { + vq->vq_ring_size = 0; + vq->vq_ring_mem = VQ_NULL; + } + + env_free_memory(vq); + } +} + +/*! + * virtqueue_free - Frees VirtIO queue resources - static version + * + * @param vq - Pointer to VirtIO queue control block + * + */ +void virtqueue_free_static(struct virtqueue *vq) +{ + if (vq != VQ_NULL) + { + if (vq->vq_ring_mem != VQ_NULL) + { + vq->vq_ring_size = 0; + vq->vq_ring_mem = VQ_NULL; + } + } +} + +/*! + * virtqueue_get_available_buffer - Returns buffer available for use in the + * VirtIO queue + * + * @param vq - Pointer to VirtIO queue control block + * @param avail_idx - Pointer to index used in vring desc table + * @param len - Length of buffer + * + * @return - Pointer to available buffer + */ +void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx, uint32_t *len) +{ + uint16_t head_idx = 0; + void *buffer; + + if (vq->vq_available_idx == vq->vq_ring.avail->idx) + { + return (VQ_NULL); + } + + VQUEUE_BUSY(vq, avail_read); + + head_idx = (uint16_t)(vq->vq_available_idx++ & ((uint16_t)(vq->vq_nentries - 1U))); + *avail_idx = vq->vq_ring.avail->ring[head_idx]; + + env_rmb(); +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + buffer = env_map_patova(vq->env, ((uint32_t)(vq->vq_ring.desc[*avail_idx].addr))); +#else + buffer = env_map_patova((uint32_t)(vq->vq_ring.desc[*avail_idx].addr)); +#endif + *len = vq->vq_ring.desc[*avail_idx].len; + //printf("%s indx = %d len = %d buffer = 0x%x \r\n",__func__,*avail_idx,*len,buffer); + VQUEUE_IDLE(vq, avail_read); + + return (buffer); +} + +/*! + * virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue + * + * @param vq - Pointer to VirtIO queue control block + * @param head_idx - Index of vring desc containing used buffer + * @param len - Length of buffer + * + * @return - Function status + */ +int32_t virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx, uint32_t len) +{ + if (head_idx > vq->vq_nentries) + { + return (ERROR_VRING_NO_BUFF); + } + + VQUEUE_BUSY(vq, used_write); + vq_ring_update_used(vq, head_idx, len); + VQUEUE_IDLE(vq, used_write); + + return (VQUEUE_SUCCESS); +} + +/*! + * virtqueue_fill_used_buffers - Fill used buffer ring + * + * @param vq - Pointer to VirtIO queue control block + * @param buffer - Buffer to add + * @param len - Length of buffer + * + * @return - Function status + */ +int32_t virtqueue_fill_used_buffers(struct virtqueue *vq, void *buffer, uint32_t len) +{ + uint16_t head_idx; + uint16_t idx; + + VQUEUE_BUSY(vq, used_write); + + head_idx = vq->vq_desc_head_idx; + VQ_RING_ASSERT_VALID_IDX(vq, head_idx); + + /* Enqueue buffer onto the ring. */ + idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, buffer, len); + + vq->vq_desc_head_idx = idx; + + vq_ring_update_used(vq, head_idx, len); + + VQUEUE_IDLE(vq, used_write); + + return (VQUEUE_SUCCESS); +} + +/*! + * virtqueue_enable_cb - Enables callback generation + * + * @param vq - Pointer to VirtIO queue control block + * + * @return - Function status + */ +int32_t virtqueue_enable_cb(struct virtqueue *vq) +{ + return (vq_ring_enable_interrupt(vq, 0)); +} + +/*! + * virtqueue_enable_cb - Disables callback generation + * + * @param vq - Pointer to VirtIO queue control block + * + */ +void virtqueue_disable_cb(struct virtqueue *vq) +{ + VQUEUE_BUSY(vq, avail_write); + + if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL) + { + vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - vq->vq_nentries - 1U; + } + else + { + vq->vq_ring.avail->flags |= (uint16_t)VRING_AVAIL_F_NO_INTERRUPT; + } + + VQUEUE_IDLE(vq, avail_write); +} + +/*! + * virtqueue_kick - Notifies other side that there is buffer available for it. + * + * @param vq - Pointer to VirtIO queue control block + */ +void virtqueue_kick(struct virtqueue *vq) +{ + VQUEUE_BUSY(vq, avail_write); + + /* Ensure updated avail->idx is visible to host. */ + env_mb(); + if (0 != vq_ring_must_notify_host(vq)) + { + vq_ring_notify_host(vq); + } + vq->vq_queued_cnt = 0; + + VQUEUE_IDLE(vq, avail_write); +} + +/*! + * virtqueue_dump Dumps important virtqueue fields , use for debugging purposes + * + * @param vq - Pointer to VirtIO queue control block + */ +void virtqueue_dump(struct virtqueue *vq) +{ + if (vq == VQ_NULL) + { + return; + } + + env_print( + "VQ: %s - size=%d; used=%d; queued=%d; " + "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; " + "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n", + vq->vq_name, vq->vq_nentries, virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx, + vq->vq_ring.avail->idx, vq->vq_used_cons_idx, vq->vq_ring.used->idx, vq->vq_ring.avail->flags, + vq->vq_ring.used->flags); +} + +/*! + * virtqueue_get_desc_size - Returns vring descriptor size + * + * @param vq - Pointer to VirtIO queue control block + * + * @return - Descriptor length + */ +uint32_t virtqueue_get_desc_size(struct virtqueue *vq) +{ + uint16_t head_idx; + uint16_t avail_idx; + uint32_t len; + + if (vq->vq_available_idx == vq->vq_ring.avail->idx) + { + return 0; + } + + head_idx = (uint16_t)(vq->vq_available_idx & ((uint16_t)(vq->vq_nentries - 1U))); + avail_idx = vq->vq_ring.avail->ring[head_idx]; + len = vq->vq_ring.desc[avail_idx].len; + + return (len); +} + +/************************************************************************** + * Helper Functions * + **************************************************************************/ + +/*! + * + * vq_ring_add_buffer + * + */ +static uint16_t vq_ring_add_buffer( + struct virtqueue *vq, struct vring_desc *desc, uint16_t head_idx, void *buffer, uint32_t length) +{ + struct vring_desc *dp; + + if (buffer == VQ_NULL) + { + return head_idx; + } + + VQASSERT(vq, head_idx != VQ_RING_DESC_CHAIN_END, "premature end of free desc chain"); + + dp = &desc[head_idx]; +#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1) + dp->addr = env_map_vatopa(vq->env, buffer); +#else + dp->addr = env_map_vatopa(buffer); +#endif + dp->len = length; + dp->flags = VRING_DESC_F_WRITE; + + return (head_idx + 1U); +} + +/*! + * + * vq_ring_init + * + */ +void vq_ring_init(struct virtqueue *vq) +{ + struct vring *vr; + uint32_t i, size; + + size = (uint32_t)(vq->vq_nentries); + vr = &vq->vq_ring; + + for (i = 0U; i < size - 1U; i++) + { + vr->desc[i].next = (uint16_t)(i + 1U); + } + vr->desc[i].next = (uint16_t)VQ_RING_DESC_CHAIN_END; +} + +/*! + * + * vq_ring_update_avail + * + */ +static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) +{ + uint16_t avail_idx; + + /* + * Place the head of the descriptor chain into the next slot and make + * it usable to the host. The chain is made available now rather than + * deferring to virtqueue_notify() in the hopes that if the host is + * currently running on another CPU, we can keep it processing the new + * descriptor. + */ + avail_idx = (uint16_t)(vq->vq_ring.avail->idx & ((uint16_t)(vq->vq_nentries - 1U))); + vq->vq_ring.avail->ring[avail_idx] = desc_idx; + + env_wmb(); + + vq->vq_ring.avail->idx++; + + /* Keep pending count until virtqueue_notify(). */ + vq->vq_queued_cnt++; +} + +/*! + * + * vq_ring_update_used + * + */ +static void vq_ring_update_used(struct virtqueue *vq, uint16_t head_idx, uint32_t len) +{ + uint16_t used_idx; + struct vring_used_elem *used_desc = VQ_NULL; + + /* + * Place the head of the descriptor chain into the next slot and make + * it usable to the host. The chain is made available now rather than + * deferring to virtqueue_notify() in the hopes that if the host is + * currently running on another CPU, we can keep it processing the new + * descriptor. + */ + used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1U); + used_desc = &(vq->vq_ring.used->ring[used_idx]); + used_desc->id = head_idx; + used_desc->len = len; + //printf("%s vaname = %s : used_idx = %d head_idx = %d vqringmem = 0x%x &usedidx = 0x%x ####\n",__func__,vq->vq_name,used_idx,head_idx,vq->vq_ring_mem,&vq->vq_ring.used->idx); + env_wmb(); + + vq->vq_ring.used->idx++; +} + +/*! + * + * vq_ring_enable_interrupt + * + */ +static int32_t vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) +{ + /* + * Enable interrupts, making sure we get the latest index of + * what's already been consumed. + */ + if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL) + { + vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc; + } + else + { + vq->vq_ring.avail->flags &= ~(uint16_t)VRING_AVAIL_F_NO_INTERRUPT; + } + + env_mb(); + + /* + * Enough items may have already been consumed to meet our threshold + * since we last checked. Let our caller know so it processes the new + * entries. + */ + if (virtqueue_nused(vq) > ndesc) + { + return (1); + } + + return (0); +} + +/*! + * + * virtqueue_interrupt + * + */ +void virtqueue_notification(struct virtqueue *vq) +{ + if (vq != VQ_NULL) + { + if (vq->callback_fc != VQ_NULL) + { + vq->callback_fc(vq); + } + } +} + +/*! + * + * vq_ring_must_notify_host + * + */ +static int32_t vq_ring_must_notify_host(struct virtqueue *vq) +{ + uint16_t new_idx, prev_idx; + uint16_t event_idx; + + if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL) + { + new_idx = vq->vq_ring.avail->idx; + prev_idx = new_idx - vq->vq_queued_cnt; + event_idx = (uint16_t)vring_avail_event(&vq->vq_ring); + + return ((vring_need_event(event_idx, new_idx, prev_idx) != 0) ? 1 : 0); + } + + return (((vq->vq_ring.used->flags & ((uint16_t)VRING_USED_F_NO_NOTIFY)) == 0U) ? 1 : 0); +} + +/*! + * + * vq_ring_notify_host + * + */ +static void vq_ring_notify_host(struct virtqueue *vq) +{ + if (vq->notify_fc != VQ_NULL) + { + vq->notify_fc(vq); + } +} + +/*! + * + * virtqueue_nused + * + */ +static uint16_t virtqueue_nused(struct virtqueue *vq) +{ + uint16_t used_idx, nused; + + used_idx = vq->vq_ring.used->idx; + + nused = (uint16_t)(used_idx - vq->vq_used_cons_idx); + VQASSERT(vq, nused <= vq->vq_nentries, "used more than available"); + + return (nused); +} diff --git a/cpu/core-riscv/ld.lds b/cpu/core-riscv/ld.lds index 7e02ef17..639874d8 100644 --- a/cpu/core-riscv/ld.lds +++ b/cpu/core-riscv/ld.lds @@ -113,7 +113,12 @@ SECTIONS _edata = .; } > dataram - + .resource_table : { + . = ALIGN(4); + KEEP (*(.resource_table*)) + . = ALIGN(4); + } > dataram + .bss : { . = ALIGN(4); diff --git a/drivers/drivers-x2600/include/x2600_ll_risc_ccu.h b/drivers/drivers-x2600/include/x2600_ll_risc_ccu.h index aaa5b21e..f27503a0 100755 --- a/drivers/drivers-x2600/include/x2600_ll_risc_ccu.h +++ b/drivers/drivers-x2600/include/x2600_ll_risc_ccu.h @@ -66,6 +66,9 @@ void LL_RISC_CCU_StructInit(LL_RISC_CCU_InitTypeDef *RISC_CCU_InitStruct); ErrorStatus LL_RISC_CCU_Init(RISCV_CCU_TypeDef *RISC_CCU, LL_RISC_CCU_InitTypeDef *RISC_CCU_InitStruct); +void LL_RISC_CCU_Mbox_Sendmsg(RISCV_CCU_TypeDef *RISC_CCU, unsigned int msg); +unsigned int LL_RISC_CCU_Mbox_Recvmsg(RISCV_CCU_TypeDef *RISC_CCU); + /* 6. Exported Variables ------------------------------------------ */ /* 7. Private Types ----------------------------------------------- */ diff --git a/drivers/drivers-x2600/src/x2600_ll_risc_ccu.c b/drivers/drivers-x2600/src/x2600_ll_risc_ccu.c index 42139dde..a00b60e6 100644 --- a/drivers/drivers-x2600/src/x2600_ll_risc_ccu.c +++ b/drivers/drivers-x2600/src/x2600_ll_risc_ccu.c @@ -1,11 +1,11 @@ #include "x2600_ll_risc_ccu.h" +/*systick*/ void LL_RISC_CCU_StructInit(LL_RISC_CCU_InitTypeDef *RISC_CCU_InitStruct) { RISC_CCU_InitStruct->ticks_per_second = 100; //默认100Hz } - static void LL_RISC_CCU_EventInit(RISCV_CCU_TypeDef *RISC_CCU, int ticks_per_second) { unsigned int ticks_us = 1000000 / ticks_per_second; @@ -29,3 +29,17 @@ ErrorStatus LL_RISC_CCU_Init(RISCV_CCU_TypeDef *RISC_CCU, LL_RISC_CCU_InitTypeDe return SUCCESS; } +/*mailbox*/ +void LL_RISC_CCU_Mbox_Sendmsg(RISCV_CCU_TypeDef *RISC_CCU, unsigned int msg) +{ + WRITE_REG(RISC_CCU->MBOX_TO_HOST, msg); +} + +unsigned int LL_RISC_CCU_Mbox_Recvmsg(RISCV_CCU_TypeDef *RISC_CCU) +{ + unsigned int msg = 0; + msg = READ_REG(RISC_CCU->MBOX_FROM_HOST); + WRITE_REG(RISC_CCU->MBOX_FROM_HOST, 0); + return msg; +} + diff --git a/projects/x2660-halley/Templates/template-riscv-freertos/README.md b/projects/x2660-halley/Templates/template-riscv-freertos/README.md index 220afc6b..330db14a 100644 --- a/projects/x2660-halley/Templates/template-riscv-freertos/README.md +++ b/projects/x2660-halley/Templates/template-riscv-freertos/README.md @@ -14,7 +14,7 @@ b. 基于cmake ``` $ mkdir build $ cd build -$ cmake -DCMAKE_TOOLCHAIN_FILE=../mips-gcc-sde-elf.cmake .. +$ cmake -DCMAKE_TOOLCHAIN_FILE=../riscv32-gcc.cmake .. $ make ``` @@ -39,7 +39,7 @@ d. 或者选择状态栏,build,仅编译. ``` $ mkdir build $ cd build -$ cmake -DCMAKE_TOOLCHAIN_FILE=../mips-gcc-sde-elf.cmake -G "MinGW Makefiles" ../ +$ cmake -DCMAKE_TOOLCHAIN_FILE=../riscv32-gcc.cmake -G "MinGW Makefiles" ../ $ mingw32-make ``` diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/cmake-kits.json b/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/cmake-kits.json new file mode 100644 index 00000000..e9582fe3 --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/cmake-kits.json @@ -0,0 +1,18 @@ +[ + { + "name": "RISCV GCC for ingenic cross compile on Windows", + + "toolchainFile": "riscv32-gcc.cmake", + "preferredGenerator": { + "name":"MinGW Makefiles" + } + }, + { + "name": "RISCV GCC for ingenic cross compile on Linux", + "toolchainFile": "riscv32-gcc.cmake", + "preferredGenerator": { + "name":"Unix Makefiles" + } + } +] + \ No newline at end of file diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/launch.json b/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/launch.json new file mode 100644 index 00000000..617a05ab --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/launch.json @@ -0,0 +1,55 @@ +{ + "version": "0.2.0", + "configurations": [ + // GDB Debugging: + { + "program": "${command:cmake.launchTargetPath}", + "name": "Launch (gdb)", + "request": "launch", + "args": [], + "stopAtEntry": false, + "cwd": "${workspaceFolder}", + "console": "integratedTerminal", + "internalConsoleOptions": "openOnSessionStart", + "type": "cppdbg", + "MIMode": "gdb", + "miDebuggerPath": "riscv32-ingenicv0-elf-gdb", + "miDebuggerArgs": "", + "miDebuggerServerAddress": "localhost:3333", + "targetArchitecture": "mips", + "preLaunchTask": "adb forward", + "customLaunchSetupCommands": [ + { + "description": "gdb 启用整齐打印", + "text": "-enable-pretty-printing", + "ignoreFailures": true + }, + { + "text":"cd ${workspaceFolder}", + "ignoreFailures": false + }, + { + "text":"file build/${command:cmake.buildType}/${command:cmake.launchTargetFilename}", + "ignoreFailures": false + }, + { + "text": "target remote localhost:3333", + "ignoreFailures": false + }, + { + "text": "monitor reset halt", + "ignoreFailures": false + }, + { + "text": "load", + "ignoreFailures": false + } + ], + "logging": { + "engineLogging": false, + "programOutput": true + } + } + ] + } + \ No newline at end of file diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/settings.json b/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/settings.json new file mode 100644 index 00000000..872b961e --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/settings.json @@ -0,0 +1,8 @@ +{ + "cmake.buildDirectory": "${workspaceFolder}/build/${buildType}", + "files.associations": { + "*.build": "makefile", + "*.mk": "makefile", + "Makefile*": "makefile" + } +} diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/tasks.json b/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/tasks.json new file mode 100644 index 00000000..53b4731e --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/.vscode/tasks.json @@ -0,0 +1,12 @@ +{ + // See https://go.microsoft.com/fwlink/?LinkId=733558 + // for the documentation about the tasks.json format + "version": "2.0.0", + "tasks": [ + { + "label": "adb forward", + "type": "shell", + "command": "adb forward tcp:3333 tcp:3333", + }, + ] +} \ No newline at end of file diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/CMakeLists.txt b/projects/x2660-halley/Templates/template-riscv-rpmsg/CMakeLists.txt new file mode 100755 index 00000000..7665376d --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/CMakeLists.txt @@ -0,0 +1,119 @@ +cmake_minimum_required(VERSION 3.8) +# +# Core project settings +# +Project(template) # Modified +enable_language(C CXX ASM) +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) +# Setup compiler settings +set(CMAKE_C_STANDARD 11) +set(CMAKE_C_STANDARD_REQUIRED ON) +set(CMAKE_C_EXTENSIONS ON) +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS ON) +set(PROJ_PATH ${CMAKE_CURRENT_SOURCE_DIR}) +set(SDK_PATH ${PROJ_PATH}/../../../../) +message("Build type: " ${CMAKE_BUILD_TYPE}) + +# Set linker script +set(linker_script_SRC ${SDK_PATH}/cpu/core-riscv/ld.lds) # Modified +set(EXECUTABLE ${CMAKE_PROJECT_NAME}) +set(CPU_PARAMETERS "-march=rv32imc -mabi=ilp32 -Wno-abi") + +set(CMAKE_ASM_FLAGS "${CPU_PARAMETERS} -D_ASSEMBLER_ -D__ASSEMBLY__") + +set(CMAKE_C_FLAGS "${CPU_PARAMETERS} -fno-pic -fno-builtin -fomit-frame-pointer -Wall -nostdlib -Wall -fdata-sections -ffunction-sections") + +# Compiler options + +if(CMAKE_BUILD_TYPE STREQUAL Debug) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O0 -g -ggdb -DDEBUG") +else() + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O2") +endif() + +set(CMAKE_CXX_FLAGS ${CMAKE_C_FLAGS}) + +set(CMAKE_LD_FLAGS "${CPU_PARAMETERS}") + + +set(sources_SRCS # Modified + ${SDK_PATH}/cpu/core-riscv/spinlock.c + ${SDK_PATH}/cpu/core-riscv/start.S + ${SDK_PATH}/cpu/core-riscv/genex.S + ${SDK_PATH}/cpu/core-riscv/traps.c + ${SDK_PATH}/cpu/soc-x2600/src/interrupt.c + ${SDK_PATH}/cpu/soc-x2600/src/serial.c + ${SDK_PATH}/cpu/soc-x2600/src/startup.c + ${SDK_PATH}/drivers/drivers-x2600/src/x2600_hal_def.c + ${SDK_PATH}/drivers/drivers-x2600/src/x2600_hal_tick_risc_ccu.c + ${SDK_PATH}/drivers/drivers-x2600/src/x2600_ll_risc_ccu.c + ${SDK_PATH}/lib/libc/minimal/ctype.c + ${SDK_PATH}/lib/libc/minimal/div64.c + ${SDK_PATH}/lib/libc/minimal/string.c + ${SDK_PATH}/lib/libc/minimal/vsprintf.c + ${SDK_PATH}/Middlewares/Third_Party/rpmsg/common/llist.c + ${SDK_PATH}/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_bm.c + ${SDK_PATH}/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/ingenic_riscv/rpmsg_platform.c + ${SDK_PATH}/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_lite.c + ${SDK_PATH}/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_ns.c + ${SDK_PATH}/Middlewares/Third_Party/rpmsg/virtio/virtqueue.c + ${SDK_PATH}/Middlewares/Rpmsg_Library/Src/rpmsg_api.c + main.c + +) + +if(CMAKE_EXPORT_COMPILE_COMMANDS) + set(CMAKE_C_STANDARD_INCLUDE_DIRECTORIES ${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES}) + set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES}) +endif() + +# +# Include directories +# +#set(include_path_DIRS +# Modified + +include_directories( + ${PROJ_PATH}/include + ${SDK_PATH}/lib/libc/minimal/include + ${SDK_PATH}/drivers/drivers-x2600/include + ${SDK_PATH}/cpu/core-riscv/include + ${SDK_PATH}/cpu/soc-x2600/include + ${SDK_PATH}/Middlewares/Third_Party/rpmsg/include + ${SDK_PATH}/Middlewares/Third_Party/rpmsg/include/environment/bm + ${SDK_PATH}/Middlewares/Third_Party/rpmsg/include/platform/ingenic_riscv + ${SDK_PATH}/Middlewares/Rpmsg_Library/Inc +) + +# +# -L libdirs. +# +link_directories( +#path/to/lib +) + +# Executable files +add_executable(${EXECUTABLE} ${sources_SRCS}) + +# Linker options +target_link_libraries(${EXECUTABLE} PRIVATE + -T${linker_script_SRC} + ${CMAKE_LD_FLAGS} + -Wl,-Map=${CMAKE_PROJECT_NAME}.map,--cref + -Wl,--gc-sections + -Wl,--start-group + -Wl,--end-group + -Wl,--print-memory-usage +) + +# Execute post-build to print size +add_custom_command(TARGET ${EXECUTABLE} POST_BUILD + COMMAND ${CMAKE_SIZE} $ +) + +# Convert output to hex and binary +add_custom_command(TARGET ${EXECUTABLE} POST_BUILD + COMMAND ${CMAKE_OBJCOPY} -O binary $ ${EXECUTABLE}.bin + ) diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/Makefile b/projects/x2660-halley/Templates/template-riscv-rpmsg/Makefile new file mode 100644 index 00000000..cd3b0df3 --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/Makefile @@ -0,0 +1,185 @@ +###################################### +# target +###################################### +TARGET = template + +SDK_PATH = ../../../../ + + +###################################### +# building variables +###################################### +# debug build? +DEBUG = 1 +# optimization +OPT = -Og -fno-pic -fno-builtin -fomit-frame-pointer -Wall -nostdlib -Wall -fdata-sections -ffunction-sections + +####################################### +# paths +####################################### +# Build path +BUILD_DIR = build + +###################################### +# source +###################################### +# C sources +C_SOURCES = \ +$(SDK_PATH)/cpu/core-riscv/traps.c \ +$(SDK_PATH)/cpu/core-riscv/spinlock.c \ +$(SDK_PATH)/cpu/soc-x2600/src/startup.c \ +$(SDK_PATH)/cpu/soc-x2600/src/serial.c \ +$(SDK_PATH)/cpu/soc-x2600/src/interrupt.c \ +$(SDK_PATH)/drivers/drivers-x2600/src/x2600_hal_def.c \ +${SDK_PATH}/drivers/drivers-x2600/src/x2600_hal_tick_risc_ccu.c \ +${SDK_PATH}/drivers/drivers-x2600/src/x2600_ll_risc_ccu.c \ +$(SDK_PATH)/lib/libc/minimal/vsprintf.c \ +$(SDK_PATH)/lib/libc/minimal/string.c \ +$(SDK_PATH)/lib/libc/minimal/ctype.c \ +$(SDK_PATH)/lib/libc/minimal/div64.c \ +${SDK_PATH}/Middlewares/Third_Party/rpmsg/common/llist.c \ +${SDK_PATH}/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/environment/rpmsg_env_bm.c \ +${SDK_PATH}/Middlewares/Third_Party/rpmsg/rpmsg_lite/porting/platform/ingenic_riscv/rpmsg_platform.c \ +${SDK_PATH}/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_lite.c \ +${SDK_PATH}/Middlewares/Third_Party/rpmsg/rpmsg_lite/rpmsg_ns.c \ +${SDK_PATH}/Middlewares/Third_Party/rpmsg/virtio/virtqueue.c \ +${SDK_PATH}/Middlewares/Rpmsg_Library/Src/rpmsg_api.c \ +main.c + +# ASM sources +ASM_SOURCES = \ +$(SDK_PATH)/cpu/core-riscv/start.S \ +$(SDK_PATH)/cpu/core-riscv/genex.S + + +####################################### +# binaries +####################################### +PREFIX = riscv32-ingenicv0-elf- +# The gcc compiler bin path can be either defined in make command via GCC_PATH variable (> make GCC_PATH=xxx) +# either it can be added to the PATH environment variable. +ifdef GCC_PATH +CC = $(GCC_PATH)/$(PREFIX)gcc +AS = $(GCC_PATH)/$(PREFIX)as +LD = $(GCC_PATH)/$(PREFIX)ld +CP = $(GCC_PATH)/$(PREFIX)objcopy +SZ = $(GCC_PATH)/$(PREFIX)size +else +CC = $(PREFIX)gcc +AS = $(PREFIX)as +LD = $(PREFIX)ld +CP = $(PREFIX)objcopy +SZ = $(PREFIX)size +endif +BIN = $(CP) -O binary -S + +####################################### +# CFLAGS +####################################### +# cpu +CPU = -march=rv32imc -mabi=ilp32 -Wno-abi + +# fpu +FPU = + +# float-abi +FLOAT-ABI = + +# mcu +#MCU = $(CPU) -mthumb $(FPU) $(FLOAT-ABI) +MCU = $(CPU) $(FPU) $(FLOAT-ABI) + +# macros for gcc +# AS defines +AS_DEFS = -D_ASSEMBLER_ -D__ASSEMBLY__ + +# C defines +C_DEFS = + + +# AS includes +AS_INCLUDES = \ +-I$(SDK_PATH)/cpu/core-riscv/include \ +-I$(SDK_PATH)/lib/libc/minimal/include \ + +# C includes +C_INCLUDES = \ +-Iinclude \ +-I$(SDK_PATH)/lib/libc/minimal/include \ +-I${SDK_PATH}/drivers/drivers-x2600/include \ +-I$(SDK_PATH)/cpu/core-riscv/include \ +-I$(SDK_PATH)/cpu/soc-x2600/include \ +-I$(SDK_PATH)/drivers/drivers-x2600/include \ +-I${SDK_PATH}/Middlewares/Third_Party/rpmsg/include \ +-I${SDK_PATH}/Middlewares/Third_Party/rpmsg/include/environment/bm \ +-I${SDK_PATH}/Middlewares/Third_Party/rpmsg/include/platform/ingenic_riscv \ +-I${SDK_PATH}/Middlewares/Rpmsg_Library/Inc \ + + +# compile gcc flags +ASFLAGS = $(MCU) $(AS_DEFS) $(AS_INCLUDES) $(OPT) -Wall -fdata-sections -ffunction-sections + +CFLAGS = $(MCU) $(C_DEFS) $(C_INCLUDES) $(OPT) -Wall -fdata-sections -ffunction-sections + +ifeq ($(DEBUG), 1) +CFLAGS += -g -gdwarf-2 -O0 +endif + + +# Generate dependency information +CFLAGS += -MMD -MP -MF"$(@:%.o=%.d)" + + +####################################### +# LDFLAGS +####################################### +# link script +LDSCRIPT = $(SDK_PATH)/cpu/core-riscv/ld.lds + +# libraries +#LIBS = -lc -lm -lnosys +LIBDIR = +LDFLAGS = $(MCU) -T$(LDSCRIPT) $(LIBDIR) $(LIBS) -Wl,-Map=$(BUILD_DIR)/$(TARGET).map,--cref -Wl,--gc-sections -nostdlib + +# default action: build all +all: $(BUILD_DIR)/$(TARGET).elf $(BUILD_DIR)/$(TARGET).bin + + +####################################### +# build the application +####################################### +# list of objects +OBJECTS = $(addprefix $(BUILD_DIR)/,$(notdir $(C_SOURCES:.c=.o))) +vpath %.c $(sort $(dir $(C_SOURCES))) +# list of ASM program objects +OBJECTS += $(addprefix $(BUILD_DIR)/,$(notdir $(ASM_SOURCES:.S=.o))) +vpath %.S $(sort $(dir $(ASM_SOURCES))) + +$(BUILD_DIR)/%.o: %.c Makefile | $(BUILD_DIR) + $(CC) -c $(CFLAGS) $< -o $@ + +$(BUILD_DIR)/%.o: %.S Makefile | $(BUILD_DIR) + $(CC) -c $(ASFLAGS) -o $@ $< + +$(BUILD_DIR)/$(TARGET).elf: $(OBJECTS) Makefile + $(CC) $(OBJECTS) $(LDFLAGS) -o $@ + $(SZ) $@ + +$(BUILD_DIR)/%.bin: $(BUILD_DIR)/%.elf | $(BUILD_DIR) + $(BIN) $< $@ + +$(BUILD_DIR): + mkdir $@ + +####################################### +# clean up +####################################### +clean: + -rm -fR $(BUILD_DIR) + +####################################### +# dependencies +####################################### +-include $(wildcard $(BUILD_DIR)/*.d) + +# *** EOF *** diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/README.md b/projects/x2660-halley/Templates/template-riscv-rpmsg/README.md new file mode 100644 index 00000000..330db14a --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/README.md @@ -0,0 +1,46 @@ + +1. Linux 命令行编译 + +a. 基于Makefile + +``` +$ make +``` +会在build目录生成template.elf, template.bin文件. + + +b. 基于cmake + +``` +$ mkdir build +$ cd build +$ cmake -DCMAKE_TOOLCHAIN_FILE=../riscv32-gcc.cmake .. +$ make + +``` +会在build目录下生成template.elf,template.bin文件. + + +2. windows 编译. + +2.1 基于vscode + +a. vscode 大概projects/template 文件夹 +b. 选择cmake-kits "GCC for ingenic cross compile on Windows" +c. lunch(F5) 运行编译、调试 +d. 或者选择状态栏,build,仅编译. + + + +2.2 基于命令行 + +前提: 系统必须安装msys - mingw64-make 工具. + +``` +$ mkdir build +$ cd build +$ cmake -DCMAKE_TOOLCHAIN_FILE=../riscv32-gcc.cmake -G "MinGW Makefiles" ../ +$ mingw32-make +``` + + diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/include/board_eth_phy_conf.h b/projects/x2660-halley/Templates/template-riscv-rpmsg/include/board_eth_phy_conf.h new file mode 100644 index 00000000..c907ead4 --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/include/board_eth_phy_conf.h @@ -0,0 +1,75 @@ +#ifndef __ETH_PHY_CONF_H +#define __ETH_PHY_CONF_H + +/* ################## Ethernet peripheral configuration ##################### */ + +/* Section 1 : Ethernet peripheral configuration */ + +/* MAC ADDRESS: MAC_ADDR0:MAC_ADDR1:MAC_ADDR2:MAC_ADDR3:MAC_ADDR4:MAC_ADDR5 */ +#define MAC_ADDR0 0x00U +#define MAC_ADDR1 0x11U +#define MAC_ADDR2 0x22U +#define MAC_ADDR3 0x33U +#define MAC_ADDR4 0x44U +#define MAC_ADDR5 0x55U + +/* Definition of the Ethernet driver buffers size and count */ +#define ETH_RX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for receive */ +#define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for transmit */ +#define ETH_RXBUFNB ((uint32_t)4U) /* 4 Rx buffers of size ETH_RX_BUF_SIZE */ +#define ETH_TXBUFNB ((uint32_t)4U) /* 4 Tx buffers of size ETH_TX_BUF_SIZE */ + +/* Section 2: PHY configuration section */ + +/* DP83848 PHY Address*/ +#define DP83848_PHY_ADDRESS 0x01U +/* PHY Reset delay these values are based on a 1 ms Systick interrupt*/ +#define PHY_RESET_DELAY ((uint32_t)0x000000FFU) +/* PHY Configuration delay */ +#define PHY_CONFIG_DELAY ((uint32_t)0x00000FFFU) + +#define PHY_READ_TO ((uint32_t)0x0000FFFFU) +#define PHY_WRITE_TO ((uint32_t)0x0000FFFFU) + +/* Section 3: Common PHY Registers */ + +#define PHY_BCR ((uint16_t)0x00U) /*!< Transceiver Basic Control Register */ +#define PHY_BSR ((uint16_t)0x01U) /*!< Transceiver Basic Status Register */ + +#define PHY_RESET ((uint16_t)0x8000U) /*!< PHY Reset */ +#define PHY_LOOPBACK ((uint16_t)0x4000U) /*!< Select loop-back mode */ +#define PHY_FULLDUPLEX_100M ((uint16_t)0x2100U) /*!< Set the full-duplex mode at 100 Mb/s */ +#define PHY_HALFDUPLEX_100M ((uint16_t)0x2600U) /*!< Set the half-duplex mode at 100 Mb/s */ +#define PHY_FULLDUPLEX_10M ((uint16_t)0x0100U) /*!< Set the full-duplex mode at 10 Mb/s */ +#define PHY_HALFDUPLEX_10M ((uint16_t)0x0000U) /*!< Set the half-duplex mode at 10 Mb/s */ +#define PHY_AUTONEGOTIATION ((uint16_t)0x1000U) /*!< Enable auto-negotiation function */ +#define PHY_RESTART_AUTONEGOTIATION ((uint16_t)0x0200U) /*!< Restart auto-negotiation function */ +#define PHY_POWERDOWN ((uint16_t)0x0800U) /*!< Select the power down mode */ +#define PHY_ISOLATE ((uint16_t)0x0400U) /*!< Isolate PHY from MII */ + +#define PHY_AUTONEGO_COMPLETE ((uint16_t)0x0020U) /*!< Auto-Negotiation process completed */ +#define PHY_LINKED_STATUS ((uint16_t)0x0004U) /*!< Valid link established */ +#define PHY_JABBER_DETECTION ((uint16_t)0x0002U) /*!< Jabber condition detected */ + +/* Section 4: Extended PHY Registers */ + +#define PHY_SR ((uint16_t)0x10U) /*!< PHY status register Offset */ +#define PHY_MICR ((uint16_t)0x11U) /*!< MII Interrupt Control Register */ +#define PHY_MISR ((uint16_t)0x12U) /*!< MII Interrupt Status and Misc. Control Register */ + +#define PHY_LINK_STATUS ((uint16_t)0x0001U) /*!< PHY Link mask */ +#define PHY_SPEED_STATUS ((uint16_t)0x0002U) /*!< PHY Speed mask */ +#define PHY_DUPLEX_STATUS ((uint16_t)0x0004U) /*!< PHY Duplex mask */ + +#define PHY_MICR_INT_EN ((uint16_t)0x0002U) /*!< PHY Enable interrupts */ +#define PHY_MICR_INT_OE ((uint16_t)0x0001U) /*!< PHY Enable output interrupt events */ + +#define PHY_MISR_LINK_INT_EN ((uint16_t)0x0020U) /*!< Enable Interrupt on change of link status */ +#define PHY_LINK_INTERRUPT ((uint16_t)0x2600U) /*!< PHY link status interrupt mask */ + +/* ################## Ethernet peripheral configuration ##################### */ + + + +#endif // __ETH_PHY_CONF_H + diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/include/x2600_hal_conf.h b/projects/x2660-halley/Templates/template-riscv-rpmsg/include/x2600_hal_conf.h new file mode 100644 index 00000000..f302f82a --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/include/x2600_hal_conf.h @@ -0,0 +1,119 @@ +#ifndef __X2600_HAL_CONF_H__ +#define __X2600_HAL_CONF_H__ +/* TODO: 本文件应该通过工具生成,在配置工程中选择不同的组件时,在此处包含不同的头文件 + 暂时包含全部头文件. +*/ + + +/* 1. Includes ---------------------------------------------------- */ + + +/* Hal Module selections. */ +#if 0 +#define HAL_MSC_ENABLED +#define HAL_I2C_ENABLED +#define HAL_UART_ENABLED +#define HAL_ADC_ENABLED +#define HAL_SPI_ENABLED +#define HAL_WDT_ENABLED +#define HAL_TCU_ENABLED +#define HAL_RTC_ENABLED +#define HAL_EFUSE_ENABLED +#define HAL_PWM_ENABLED +#define HAL_GMAC_ENABLED +#define HAL_USB_ENABLED +#endif + +/* 系统时钟配配置,通过工具生成,随开发板或者平台变化.*/ +#include +#include + +#include "x2600_hal_tick.h" +#include "x2600_ll_ost_core.h" +#include "x2600_ll_ost_global.h" +#include "x2600_ll_cpm.h" +#include "x2600_ll_gpio.h" +#include "x2600_ll_risc_ccu.h" + +#include "x2600_hal_pdma.h" + +//#include "x2600_hal_sfcnor.h" + +#ifdef HAL_MSC_ENABLED + +#endif + +#ifdef HAL_I2C_ENABLED +#include "x2600_hal_i2c.h" +#endif + +#ifdef HAL_UART_ENABLED +#include "x2600_hal_uart.h" +#endif + +#ifdef HAL_ADC_ENABLED +#include "x2600_hal_adc.h" +#endif + +#ifdef HAL_SPI_ENABLED +#include "x2600_hal_spi.h" +#endif + +#ifdef HAL_WDT_ENABLED +#include "x2600_hal_wdt.h" +#endif + +#ifdef HAL_TCU_ENABLED +#include "x2600_hal_tcu.h" +#endif + +#ifdef HAL_RTC_ENABLED +#include "x2600_hal_rtc.h" +#endif + +#ifdef HAL_EFUSE_ENABLED +#include "x2600_ll_efuse.h" +#include "x2600_hal_efuse.h" +#endif + +#ifdef HAL_PWM_ENABLED +#include "x2600_hal_pwm.h" +#endif + +#ifdef HAL_GMAC_ENABLED +#include "x2600_hal_gmac.h" +#endif + +#ifdef HAL_USB_ENABLED +#include "x2600_hal_pcd.h" +#include "x2600_hal_pcd_ex.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* 2. Exported Types ---------------------------------------------- */ + +/* 3. Exported Constants ------------------------------------------ */ + +/* 4. Exported Macros --------------------------------------------- */ + +/* 5. Exported Funcs ---------------------------------------------- */ + +/* 6. Exported Variables ------------------------------------------ */ + +/* 7. Private Types ----------------------------------------------- */ + +/* 8. Private Constants ------------------------------------------- */ + +/* 9. Private Macros ---------------------------------------------- */ + +/* 10. Private Funcs ---------------------------------------------- */ + +/* 11. Private Variables ------------------------------------------ */ + +#ifdef __cplusplus +} +#endif +#endif /* __X2600_HAL_H__ */ diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/include/x2600_sysclk_conf.h b/projects/x2660-halley/Templates/template-riscv-rpmsg/include/x2600_sysclk_conf.h new file mode 100644 index 00000000..3c4888de --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/include/x2600_sysclk_conf.h @@ -0,0 +1,68 @@ +/** + * @file x2600_sysclk_conf.h + * @author MPU系统软件部团队 + * @brief + * + * @copyright 版权所有 (北京君正集成电路股份有限公司) {2022} + * @copyright Copyright© 2022 Ingenic Semiconductor Co.,Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __X2600_SYSCLK_CONF_H__ +#define __X2600_SYSCLK_CONF_H__ + +#ifdef __cplusplus +extern "C" { +#endif +/* 1. Includes ---------------------------------------------------- */ + +/* 2. Exported Types ---------------------------------------------- */ + + +/* 3. Exported Constants ------------------------------------------ */ + +/* 4. Exported Macros --------------------------------------------- */ +#define SYSCLK_EXTAL (24000000) +#define SYSCLK_APLL (1200000000) +#define SYSCLK_MPLL (1200000000) + +#define SystemCoreClock SYSCLK_APLL + +#define CGU_CONFIG_MSC_APLL_24M { \ + .PLLMux = MSC1CDR_SCLK_A, \ + .Div = 24, \ + .Config = 0 } + +#define CGU_CONFIG_MSC_APLL_48M { \ + .PLLMux = MSC1CDR_SCLK_A, \ + .Div = 11, \ + .Config = 0 } + + +#define CGU_CONFIG_SSI_MPLL_500K { \ + .PLLMux = SSICDR_MPLL, \ + .Div = 15, \ + .Config = 0 } + +/* 5. Exported Funcs ---------------------------------------------- */ + +/* 6. Exported Variables ------------------------------------------ */ + +/* 7. Private Types ----------------------------------------------- */ + +/* 8. Private Constants ------------------------------------------- */ + +/* 9. Private Macros ---------------------------------------------- */ + +/* 10. Private Funcs ---------------------------------------------- */ + +/* 11. Private Variables ------------------------------------------ */ + +#ifdef __cplusplus +} +#endif +#endif /* __X2600_HAL_ADC_H__ */ diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/main.c b/projects/x2660-halley/Templates/template-riscv-rpmsg/main.c new file mode 100644 index 00000000..f7219a63 --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/main.c @@ -0,0 +1,28 @@ +#include +#include + +static ingenic_rpmsg_t ingenic_rpmsg_ctx; + +int main() +{ + unsigned int i,j; + void *buffer; + uint32_t len, src; + char msg[16]; + + printf("\nriscv start\n"); + + create_channel(&ingenic_rpmsg_ctx); + + while(1) { + msg_recv(&buffer, &len, &src); + printf("====%s\n", buffer); + sprintf(msg, "hello host!"); + msg_send(&ingenic_rpmsg_ctx, src, msg, sizeof(msg), 100); + } + + printf("\nriscv end\n"); + + return 0; +} + diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/riscv32-gcc.cmake b/projects/x2660-halley/Templates/template-riscv-rpmsg/riscv32-gcc.cmake new file mode 100644 index 00000000..d09813df --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/riscv32-gcc.cmake @@ -0,0 +1,18 @@ +set(CMAKE_SYSTEM_NAME Generic) +set(CMAKE_SYSTEM_PROCESSOR riscv32) + +# Some default GCC settings +set(TOOLCHAIN_PREFIX "riscv32-ingenicv0-elf-") + +set(CMAKE_C_COMPILER ${TOOLCHAIN_PREFIX}gcc) +set(CMAKE_ASM_COMPILER ${CMAKE_C_COMPILER}) +set(CMAKE_CXX_COMPILER ${TOOLCHAIN_PREFIX}g++) + +set(CMAKE_OBJCOPY ${TOOLCHAIN_PREFIX}objcopy) +set(CMAKE_SIZE ${TOOLCHAIN_PREFIX}size) + +set(CMAKE_EXECUTABLE_SUFFIX_ASM ".elf") +set(CMAKE_EXECUTABLE_SUFFIX_C ".elf") +set(CMAKE_EXECUTABLE_SUFFIX_CXX ".elf") + +set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/Makefile b/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/Makefile new file mode 100644 index 00000000..70798c60 --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/Makefile @@ -0,0 +1,5 @@ +all: + mips-linux-gnu-gcc main.c rpmsg_api.c -o rpmsg_test +clean: + rm rpmsg_test + diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/main.c b/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/main.c new file mode 100644 index 00000000..6c3db880 --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/main.c @@ -0,0 +1,40 @@ +#include +#include +#include +#include + + +#include "rpmsg_api.h" + +int main(int argc, char* argv[]) +{ + int fd = 0; + char buf[16]; + int ret = 0; + int index = 0; + + while(!enum_channel(index)){ + printf("channel %d is avaliable\n", index); + index++; + } + + fd = creat_ept(0); + if(fd < 0) + return -EINVAL; + + ret = send_msg(fd,"hello remote\n",16); + if(ret < 0) + goto end; + + ret = recv_msg(fd, buf, sizeof(buf), 0); + if(ret < 0) + goto end; + + /*deal with recv msg*/ + printf("====%s\n", buf); + +end: + ret = destory_ept(fd); + + return ret; +} diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/rpmsg_api.c b/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/rpmsg_api.c new file mode 100644 index 00000000..26ca3bfa --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/rpmsg_api.c @@ -0,0 +1,113 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rpmsg_api.h" + +int enum_channel(int index) +{ + int ret = 0; + char name[32]; + + sprintf(name, "/dev/rpmsg_ctrl%d", index); + ret = access(name, F_OK); + return ret; +} + +int creat_ept(int channel_nr) +{ + int fd = 0; + int fd_rpmsg = 0; + int ret = 0; + struct rpmsg_endpoint_info ept_info = {0}; + char channel_name[32] = {0}; + char ept_name[32] = {0}; + static int cnt = 0; + + /*creat rpmsg0*/ + sprintf(channel_name, "/dev/rpmsg_ctrl%d", channel_nr); + fd = open(channel_name, O_RDWR); + if(fd < 0) { + printf("channel dosen't exist!\n"); + return 0; + } + + sprintf(ept_info.name, "rpmsg_chrdev_ept%d",cnt); + ept_info.src = RPMSG_ADDR_ANY; + ept_info.dst = 0x03; + + ret = ioctl(fd, RPMSG_CREATE_EPT_IOCTL, &ept_info); + close(fd); + + + /*acctually creat ept*/ + sprintf(ept_name, "/dev/rpmsg%d", cnt); + fd_rpmsg = open(ept_name, O_RDWR); + if(fd_rpmsg < 0) + perror("open"); + + cnt++; + + return fd_rpmsg; +} + +int destory_ept(int fd) +{ + unsigned int ret = 0; + ret = ioctl(fd, RPMSG_DESTROY_EPT_IOCTL, NULL); + close(fd); + + return ret; +} + +int send_msg(int fd, void *msg, int len) +{ + unsigned int ret = 0; + + /*send msg*/ + ret = write(fd, msg, len); + + return ret; +} + +int recv_msg(int fd, void *buf, int len, bool block) +{ + fd_set fds; + struct timeval tv; + int ret = 0; + + FD_ZERO(&fds); + FD_SET(fd,&fds); + + tv.tv_sec = 3; + tv.tv_usec = 0; + + if(block) + ret = select(fd + 1,&fds,NULL,NULL,NULL); + else + ret = select(fd + 1,&fds,NULL,NULL,&tv); + + if(-1 == ret) + { + if(EINTR == errno) { + perror("Fail to select"); + return -errno; + } + } + + if(0 == ret) + { + fprintf(stderr,"select Timeout\n"); + return -errno; + } + + ret = read(fd, buf, len); + return ret; +} + diff --git a/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/rpmsg_api.h b/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/rpmsg_api.h new file mode 100644 index 00000000..683c5716 --- /dev/null +++ b/projects/x2660-halley/Templates/template-riscv-rpmsg/xburst2_app/rpmsg_api.h @@ -0,0 +1,24 @@ +#ifndef __RPMSG_API_H__ +#define __RPMSG_API_H__ + +#include + +struct rpmsg_endpoint_info { + char name[32]; + unsigned int src; + unsigned int dst; +}; + +#define RPMSG_CREATE_EPT_IOCTL _IOW(0xb5, 0x1, struct rpmsg_endpoint_info) +#define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2) + +#define RPMSG_ADDR_ANY 0xFFFFFFFF + + +int enum_channel(int index); +int creat_ept(int channel_nr); +int destory_ept(int fd); +int send_msg(int fd, void *msg, int len); +int recv_msg(int fd, void *buf, int len, bool block); + +#endif /* __RPMSG_API_H__ */ diff --git a/projects/x2660-halley/Templates/template-riscv/README.md b/projects/x2660-halley/Templates/template-riscv/README.md index 220afc6b..330db14a 100644 --- a/projects/x2660-halley/Templates/template-riscv/README.md +++ b/projects/x2660-halley/Templates/template-riscv/README.md @@ -14,7 +14,7 @@ b. 基于cmake ``` $ mkdir build $ cd build -$ cmake -DCMAKE_TOOLCHAIN_FILE=../mips-gcc-sde-elf.cmake .. +$ cmake -DCMAKE_TOOLCHAIN_FILE=../riscv32-gcc.cmake .. $ make ``` @@ -39,7 +39,7 @@ d. 或者选择状态栏,build,仅编译. ``` $ mkdir build $ cd build -$ cmake -DCMAKE_TOOLCHAIN_FILE=../mips-gcc-sde-elf.cmake -G "MinGW Makefiles" ../ +$ cmake -DCMAKE_TOOLCHAIN_FILE=../riscv32-gcc.cmake -G "MinGW Makefiles" ../ $ mingw32-make ``` -- Gitee