From 8da29cffa4d45766479d780f87ff6466a0397982 Mon Sep 17 00:00:00 2001 From: yinbin Date: Fri, 17 Jan 2025 10:59:08 +0800 Subject: [PATCH 1/6] loopback: encapsulation dpdk eth --- src/lstack/Makefile | 2 +- src/lstack/core/lstack_init.c | 2 + src/lstack/core/lstack_protocol_stack.c | 4 +- src/lstack/include/lstack_ethdev.h | 18 + src/lstack/include/lstack_protocol_stack.h | 3 + src/lstack/netif/lstack_ethdev.c | 416 ++++++++++++++++++--- 6 files changed, 387 insertions(+), 58 deletions(-) diff --git a/src/lstack/Makefile b/src/lstack/Makefile index 8a659c2..ca1cf22 100644 --- a/src/lstack/Makefile +++ b/src/lstack/Makefile @@ -27,7 +27,7 @@ CC ?= gcc RM = rm -f ARCH := $(shell uname -m) -CFLAGS = -O2 -g +CFLAGS = -O2 -g -Wno-unused-variable LDFLAGS = -shared -ldl -lm -lpthread -lrt -lnuma -lconfig -lboundscheck ifeq ($(GAZELLE_COVERAGE_ENABLE), 1) diff --git a/src/lstack/core/lstack_init.c b/src/lstack/core/lstack_init.c index 29796ee..451d705 100644 --- a/src/lstack/core/lstack_init.c +++ b/src/lstack/core/lstack_init.c @@ -254,6 +254,7 @@ __attribute__((constructor)) void gazelle_network_init(void) LSTACK_PRE_LOG(LSTACK_INFO, "cfg_init success\n"); wrap_api_init(); + if (set_rlimit_unlimited() != 0) { LSTACK_PRE_LOG(LSTACK_WARNING, "set rlimit unlimited failed. errno=%d\n", errno); @@ -318,6 +319,7 @@ __attribute__((constructor)) void gazelle_network_init(void) } } + eth_user_init(); if (!get_global_cfg_params()->stack_mode_rtc) { if (stack_setup_thread() != 0) { gazelle_exit(); diff --git a/src/lstack/core/lstack_protocol_stack.c b/src/lstack/core/lstack_protocol_stack.c index fcc0ad7..d9f9146 100644 --- a/src/lstack/core/lstack_protocol_stack.c +++ b/src/lstack/core/lstack_protocol_stack.c @@ -535,7 +535,9 @@ int stack_polling(unsigned wakeup_tick) rpc_poll_msg(&stack->dfx_rpc_queue, 2); force_quit = rpc_poll_msg(&stack->rpc_queue, rpc_number); - eth_dev_poll(); + //eth_dev_poll(); + stack->eth_user->linkinput(stack->eth_user, stack->queue_id); + timeout = sys_timer_run(); if (cfg->stack_interrupt) { intr_wait(stack->stack_idx, timeout); diff --git a/src/lstack/include/lstack_ethdev.h b/src/lstack/include/lstack_ethdev.h index 0c3d906..bdd22be 100644 --- a/src/lstack/include/lstack_ethdev.h +++ b/src/lstack/include/lstack_ethdev.h @@ -33,5 +33,23 @@ void kni_handle_tx(struct rte_mbuf *mbuf); #endif void netif_poll(struct netif *netif); +/////////////////////////// loopback add ////////////////// +// 定义网卡结构 +struct ethdev { + int port_id; + int queue_num; + int netif_idx; + + int (*linkinput)(struct ethdev *dev, int queue_id); + int (*input)(struct ethdev *dev, struct pbuf *p); + + int (*output)(struct ethdev *dev, int queue_id, struct pbuf *p); + int (*linkoutput)(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num); +}; + +void eth_user_init(void); + + +///////////////////////// /////////////// //////////////// #endif /* __GAZELLE_ETHDEV_H__ */ diff --git a/src/lstack/include/lstack_protocol_stack.h b/src/lstack/include/lstack_protocol_stack.h index c9c50c9..ba84316 100644 --- a/src/lstack/include/lstack_protocol_stack.h +++ b/src/lstack/include/lstack_protocol_stack.h @@ -69,6 +69,9 @@ struct protocol_stack { struct netif netif; struct lstack_dev_ops dev_ops; + struct ethdev *eth_user; + struct ethdev *eth_virtio; + struct ethdev *eth_loopback; uint32_t rx_ring_used; uint32_t tx_ring_used; diff --git a/src/lstack/netif/lstack_ethdev.c b/src/lstack/netif/lstack_ethdev.c index 315cced..4da6318 100644 --- a/src/lstack/netif/lstack_ethdev.c +++ b/src/lstack/netif/lstack_ethdev.c @@ -43,6 +43,19 @@ #define MBUF_MAX_LEN 1514 #define PACKET_READ_SIZE 32 +//////////////////////////////////////loopback/////////////// +static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf); + +#define ETHDEV_MEMZONE_NAME "gazelle_ethdev_memzone_0001" +static struct ethdev *g_eth_user; +///////////////////////////////////////////////////////////// + + + + + + + /* any protocol stack thread receives arp packet and sync it to other threads, * so that it can have the arp table */ static void stack_broadcast_arp(struct rte_mbuf *mbuf, struct protocol_stack *cur_stack) @@ -287,61 +300,61 @@ int32_t eth_dev_poll(void) return nr_pkts; } -static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf) -{ - struct protocol_stack *stack = get_protocol_stack(); - struct rte_mbuf *pre_mbuf = NULL; - struct rte_mbuf *first_mbuf = NULL; - void *buf_addr; - - while (likely(pbuf != NULL)) { - struct rte_mbuf *mbuf = pbuf_to_mbuf(pbuf); - - mbuf->data_len = pbuf->len; - mbuf->pkt_len = pbuf->tot_len; - mbuf->next = NULL; - buf_addr = rte_pktmbuf_mtod(mbuf, void *); - - /* - * |rte_mbuf | mbuf_private | data_off | data | - * ^ ^ - * buf_addr payload - * m->buf_addr pointer pbuf->payload - */ - mbuf->data_off += (uint8_t *)pbuf->payload - (uint8_t *)buf_addr; - - if (first_mbuf == NULL) { - first_mbuf = mbuf; - first_mbuf->nb_segs = 1; - } else { - first_mbuf->nb_segs++; - pre_mbuf->next = mbuf; - } - - if (likely(first_mbuf->pkt_len > MBUF_MAX_LEN)) { - mbuf->ol_flags |= RTE_MBUF_F_TX_TCP_SEG; - mbuf->tso_segsz = MBUF_MAX_DATA_LEN; - } - - pre_mbuf = mbuf; - rte_mbuf_refcnt_update(mbuf, 1); - - if (get_protocol_stack_group()->latency_start) { - calculate_lstack_latency(&stack->latency, pbuf, GAZELLE_LATENCY_WRITE_LSTACK, 0); - } - pbuf = pbuf->next; - } - - uint32_t sent_pkts = stack->dev_ops.tx_xmit(stack, &first_mbuf, 1); - stack->stats.tx += sent_pkts; - if (sent_pkts < 1) { - stack->stats.tx_drop++; - rte_pktmbuf_free(first_mbuf); - return ERR_MEM; - } - - return ERR_OK; -} +///////////////////////////static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf) +///////////////////////////{ +/////////////////////////// struct protocol_stack *stack = get_protocol_stack(); +/////////////////////////// struct rte_mbuf *pre_mbuf = NULL; +/////////////////////////// struct rte_mbuf *first_mbuf = NULL; +/////////////////////////// void *buf_addr; +/////////////////////////// +/////////////////////////// while (likely(pbuf != NULL)) { +/////////////////////////// struct rte_mbuf *mbuf = pbuf_to_mbuf(pbuf); +/////////////////////////// +/////////////////////////// mbuf->data_len = pbuf->len; +/////////////////////////// mbuf->pkt_len = pbuf->tot_len; +/////////////////////////// mbuf->next = NULL; +/////////////////////////// buf_addr = rte_pktmbuf_mtod(mbuf, void *); +/////////////////////////// +/////////////////////////// /* +/////////////////////////// * |rte_mbuf | mbuf_private | data_off | data | +/////////////////////////// * ^ ^ +/////////////////////////// * buf_addr payload +/////////////////////////// * m->buf_addr pointer pbuf->payload +/////////////////////////// */ +/////////////////////////// mbuf->data_off += (uint8_t *)pbuf->payload - (uint8_t *)buf_addr; +/////////////////////////// +/////////////////////////// if (first_mbuf == NULL) { +/////////////////////////// first_mbuf = mbuf; +/////////////////////////// first_mbuf->nb_segs = 1; +/////////////////////////// } else { +/////////////////////////// first_mbuf->nb_segs++; +/////////////////////////// pre_mbuf->next = mbuf; +/////////////////////////// } +/////////////////////////// +/////////////////////////// if (likely(first_mbuf->pkt_len > MBUF_MAX_LEN)) { +/////////////////////////// mbuf->ol_flags |= RTE_MBUF_F_TX_TCP_SEG; +/////////////////////////// mbuf->tso_segsz = MBUF_MAX_DATA_LEN; +/////////////////////////// } +/////////////////////////// +/////////////////////////// pre_mbuf = mbuf; +/////////////////////////// rte_mbuf_refcnt_update(mbuf, 1); +/////////////////////////// +/////////////////////////// if (get_protocol_stack_group()->latency_start) { +/////////////////////////// calculate_lstack_latency(&stack->latency, pbuf, GAZELLE_LATENCY_WRITE_LSTACK, 0); +/////////////////////////// } +/////////////////////////// pbuf = pbuf->next; +/////////////////////////// } +/////////////////////////// +/////////////////////////// uint32_t sent_pkts = stack->dev_ops.tx_xmit(stack, &first_mbuf, 1); +/////////////////////////// stack->stats.tx += sent_pkts; +/////////////////////////// if (sent_pkts < 1) { +/////////////////////////// stack->stats.tx_drop++; +/////////////////////////// rte_pktmbuf_free(first_mbuf); +/////////////////////////// return ERR_MEM; +/////////////////////////// } +/////////////////////////// +/////////////////////////// return ERR_OK; +///////////////////////////} static err_t eth_dev_init(struct netif *netif) { @@ -383,7 +396,8 @@ int32_t ethdev_init(struct protocol_stack *stack) { struct cfg_params *cfg = get_global_cfg_params(); int ret = 0; - + + stack->eth_user = g_eth_user; vdev_dev_ops_init(&stack->dev_ops); if (cfg->send_cache_mode) { ret = tx_cache_init(stack->queue_id, stack, &stack->dev_ops); @@ -434,3 +448,293 @@ int32_t ethdev_init(struct protocol_stack *stack) return 0; } + +//////////////////////////////////////// new eth //////////////////////////////// + +//// 定义网卡结构 +//struct ethdev { +// int port_id; +// int queue_num; +// int netif_idx; +// +// int (*linkinput)(struct ethdev *dev, int queue_id); +// int (*input)(struct ethdev *dev, struct pbuf *p); +// +// int (*output)(struct ethdev *dev, queue_id, struct pbuf *p); +// int (*linkoutput)(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], int num); +//}; +// +//int ethdev_create(void); +//void ethdev_destroy(int port_id); +//struct ethdev *ethdev_get(int port_id); + + +struct ethdev *get_g_eth_user() +{ + return g_eth_user; +} + + +enum PACKET_TRANSFER_TYPE get_packet_transfer_type(struct rte_mbuf *pkt, bool *is_arp) +{ + enum PACKET_TRANSFER_TYPE transfer_type = TRANSFER_CURRENT_THREAD; + + if (use_ltran()) { + return transfer_type; + } + + if (unlikely(IS_ARP_PKT(pkt->packet_type)) || + unlikely(IS_ICMPV6_PKT(pkt->packet_type))) { + *is_arp = true; + return transfer_type; + } + + if (get_global_cfg_params()->tuple_filter && get_protocol_stack()->queue_id == 0) { + transfer_type = distribute_pakages(pkt); + } + + if (get_global_cfg_params()->flow_bifurcation) { + uint16_t dst_port = eth_dev_get_dst_port(pkt); + if (virtio_distribute_pkg_to_kernel(dst_port)) { + transfer_type = TRANSFER_KERNEL; + } + } + + return transfer_type; +} + +struct pbuf *generate_pbuf_from_mbuf(struct rte_mbuf *mbuf) +{ + void *payload = NULL; + struct pbuf *next = NULL; + struct pbuf *prev = NULL; + struct pbuf *head = NULL; + struct pbuf_custom *pc = NULL; + struct rte_mbuf *m = mbuf; + uint16_t len, pkt_len; + struct rte_mbuf *next_m = NULL; + + pkt_len = (uint16_t)rte_pktmbuf_pkt_len(m); + + while (m != NULL) { + len = (uint16_t)rte_pktmbuf_data_len(m); + payload = rte_pktmbuf_mtod(m, void *); + pc = mbuf_to_pbuf(m); + next = pbuf_alloced_custom(PBUF_RAW, (uint16_t)len, PBUF_RAM, pc, payload, (uint16_t)len); + if (next == NULL) { + get_protocol_stack()->stats.rx_allocmbuf_fail++; + break; + } + next->tot_len = pkt_len; + pkt_len -= len; + + if (head == NULL) { + head = next; + } + if (prev != NULL) { + prev->next = next; + } + prev = next; + + next_m = m->next; + m->next = NULL; + m = next_m; + } + + return head; +} + +int eth_user_input(struct ethdev *dev, struct pbuf *pbuf_input) +{ + int32_t ret = -1; + struct protocol_stack *stack; + + stack = get_protocol_stack(); + if (pbuf_input != NULL) { + ret = stack->netif.input(pbuf_input, &stack->netif); + if (ret != ERR_OK) { + LSTACK_LOG(ERR, LSTACK, "eth_dev_recv: failed to handle rx pbuf ret=%d\n", ret); + stack->stats.rx_drop++; + } + } + return ret; +} + +//int32_t eth_dev_poll(void) +int eth_user_linkinput(struct ethdev *dev, int queue_id) +{ + uint32_t nr_pkts; + int transfer_type; /* 1 current thread recv; 0 other thread recv; -1 kni recv; */ + bool is_arp =false; + struct pbuf *pbuf_input; + struct cfg_params *cfg; + struct protocol_stack *stack; + + cfg = get_global_cfg_params(); + stack = get_protocol_stack(); + + nr_pkts = stack->dev_ops.rx_poll(stack, stack->pkts, cfg->nic_read_number); + if (nr_pkts == 0) { + return 0; + } + + if (!use_ltran() && get_protocol_stack_group()->latency_start) { + uint64_t time_stamp = sys_now_us(); + time_stamp_into_mbuf(nr_pkts, stack->pkts, time_stamp); + } + + for (uint32_t i = 0; i < nr_pkts; i++) { + is_arp = false; + transfer_type = get_packet_transfer_type(stack->pkts[i], &is_arp); + + if (likely(transfer_type == TRANSFER_CURRENT_THREAD)) { + if (is_arp) { + stack_broadcast_arp(stack->pkts[i], stack); + /* copy arp into other process */ + transfer_arp_to_other_process(stack->pkts[i]); + } + + pbuf_input = generate_pbuf_from_mbuf(stack->pkts[i]); + dev->input(dev, pbuf_input);//eth_user_input() + } else if (transfer_type == TRANSFER_KERNEL) { + if (get_global_cfg_params()->flow_bifurcation) { + virtio_tap_process_tx(stack->queue_id, stack->pkts[i]); + } else { +#if RTE_VERSION < RTE_VERSION_NUM(23, 11, 0, 0) + kni_handle_tx(stack->pkts[i]); +#else + rte_pktmbuf_free(stack->pkts[i]); +#endif + } + } else { + /* transfer to other thread */ + } + } + + stack->stats.rx += nr_pkts; + + return nr_pkts; +} + +struct rte_mbuf *generate_mbuf_from_pbuf(struct pbuf *pbuf) +{ + struct rte_mbuf *pre_mbuf = NULL; + struct rte_mbuf *first_mbuf = NULL; + void *buf_addr; + + while (likely(pbuf != NULL)) { + struct rte_mbuf *mbuf = pbuf_to_mbuf(pbuf); + + mbuf->data_len = pbuf->len; + mbuf->pkt_len = pbuf->tot_len; + mbuf->next = NULL; + buf_addr = rte_pktmbuf_mtod(mbuf, void *); + + /* + * |rte_mbuf | mbuf_private | data_off | data | + * ^ ^ + * buf_addr payload + * m->buf_addr pointer pbuf->payload + */ + mbuf->data_off += (uint8_t *)pbuf->payload - (uint8_t *)buf_addr; + + if (first_mbuf == NULL) { + first_mbuf = mbuf; + first_mbuf->nb_segs = 1; + } else { + first_mbuf->nb_segs++; + pre_mbuf->next = mbuf; + } + + if (likely(first_mbuf->pkt_len > MBUF_MAX_LEN)) { + mbuf->ol_flags |= RTE_MBUF_F_TX_TCP_SEG; + mbuf->tso_segsz = MBUF_MAX_DATA_LEN; + } + + pre_mbuf = mbuf; + rte_mbuf_refcnt_update(mbuf, 1); + + if (get_protocol_stack_group()->latency_start) { + calculate_lstack_latency(&get_protocol_stack()->latency, pbuf, GAZELLE_LATENCY_WRITE_LSTACK, 0); + } + pbuf = pbuf->next; + } + + return first_mbuf; +} + +//uint32_t sent_pkts = stack->dev_ops.tx_xmit(stack, &first_mbuf, 1); +int eth_user_linkoutput(struct ethdev *dev, int queue_id, struct rte_mbuf **pkts, uint32_t nr_pkts) +{ + struct protocol_stack *stack; + uint32_t sent_pkts; + + stack = get_protocol_stack(); + sent_pkts = stack->dev_ops.tx_xmit(stack, pkts, 1); + return sent_pkts; +} + +int eth_user_output(struct ethdev *dev, int queue_id, struct pbuf *p) +{ + uint32_t sent_pkts; + struct protocol_stack *stack; + struct rte_mbuf *mbuf_output; + + stack = get_protocol_stack(); + mbuf_output = generate_mbuf_from_pbuf(p); + + sent_pkts = stack->eth_user->linkoutput(stack->eth_user, stack->queue_id, &mbuf_output, 1); + if (sent_pkts < 1) { + rte_pktmbuf_free(mbuf_output); + } + return sent_pkts; +} + +static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf) +{ + struct ethdev *dev; + struct protocol_stack *stack; + uint32_t sent_pkts; + + stack = get_protocol_stack(); + dev = stack->eth_user; + sent_pkts = dev->output(dev, stack->queue_id, pbuf); + stack->stats.tx += sent_pkts; + if (sent_pkts < 1) { + stack->stats.tx_drop++; + return ERR_MEM; + } + return ERR_OK; +} + + + +void eth_user_init(void) +{ + const struct rte_memzone *memzone; + struct ethdev *eth_user; + + if (get_global_cfg_params()->is_primary) { + memzone = rte_memzone_reserve(ETHDEV_MEMZONE_NAME, sizeof(struct ethdev), rte_socket_id(), RTE_MEMZONE_SIZE_HINT_ONLY); + if (memzone == NULL) { + LSTACK_EXIT(EXIT_FAILURE, "%s: rte_memzone_reserve for ethdev_table failed.\n", __func__); + } + eth_user = (struct ethdev*)(memzone->addr); + memset_s(eth_user, sizeof(struct ethdev), 0, sizeof(struct ethdev)); + + eth_user->linkinput = eth_user_linkinput; + eth_user->input = eth_user_input; + eth_user->linkoutput = eth_user_linkoutput; + eth_user->output = eth_user_output; + g_eth_user = eth_user; + } +// else { +// struct ethdev *eth_user; +// u64_t init_id = 0; +// memzone = rte_memzone_lookup(ETHDEV_MEMZONE_NAME); +// if (memzone == NULL) { +// LSTACK_EXIT(EXIT_FAILURE, "%s: Cannot get ethdev_table, have you started up the primary process?\n", __func__); +// } +// eth_user = (struct ethdev*)(memzone->addr); +// } +} -- Gitee From ab23fad5975ec0333949be3c21794da56ededa3c Mon Sep 17 00:00:00 2001 From: yinbin Date: Sat, 18 Jan 2025 16:11:41 +0800 Subject: [PATCH 2/6] virtio dev modify --- src/lstack/core/lstack_init.c | 2 +- src/lstack/core/lstack_protocol_stack.c | 3 +- src/lstack/core/lstack_virtio.c | 60 ++++----- src/lstack/include/lstack_ethdev.h | 14 +- src/lstack/netif/lstack_ethdev.c | 166 +++++++++++++++++++++--- 5 files changed, 197 insertions(+), 48 deletions(-) diff --git a/src/lstack/core/lstack_init.c b/src/lstack/core/lstack_init.c index 451d705..a5a0e47 100644 --- a/src/lstack/core/lstack_init.c +++ b/src/lstack/core/lstack_init.c @@ -319,7 +319,7 @@ __attribute__((constructor)) void gazelle_network_init(void) } } - eth_user_init(); + eths_init(); if (!get_global_cfg_params()->stack_mode_rtc) { if (stack_setup_thread() != 0) { gazelle_exit(); diff --git a/src/lstack/core/lstack_protocol_stack.c b/src/lstack/core/lstack_protocol_stack.c index d9f9146..28a0f21 100644 --- a/src/lstack/core/lstack_protocol_stack.c +++ b/src/lstack/core/lstack_protocol_stack.c @@ -585,7 +585,8 @@ int stack_polling(unsigned wakeup_tick) } #endif if (get_global_cfg_params()->flow_bifurcation) { - virtio_tap_process_rx(stack->port_id, stack->queue_id); + stack->eth_virtio->linkinput(stack->eth_virtio, stack->queue_id); + //virtio_tap_process_rx(stack->port_id, stack->queue_id); } return force_quit; } diff --git a/src/lstack/core/lstack_virtio.c b/src/lstack/core/lstack_virtio.c index 75a23f2..42a0819 100644 --- a/src/lstack/core/lstack_virtio.c +++ b/src/lstack/core/lstack_virtio.c @@ -213,36 +213,36 @@ static int virtio_cfg_ip(void) return 0; } -void virtio_tap_process_rx(uint16_t port, uint32_t queue_id) -{ - struct rte_mbuf *pkts_burst[VIRTIO_TX_RX_RING_SIZE]; - uint16_t lstack_net_port = port; - uint32_t pkg_num; - - pkg_num = rte_eth_rx_burst(g_virtio_instance.virtio_port_id, queue_id, pkts_burst, VIRTIO_TX_RX_RING_SIZE); - /* - * For VLAN, the tap device defaults to tx-vlan-ofload as enabled and will not be modified by default, - * so the judgment is skipped. - * For checksum, tap devices are turned off by default, and it is assumed that they will not be modified, - * so no action will be taken. - * For TSO, tap devices do not support it, so no action will be taken. - */ - if (get_global_cfg_params()->vlan_mode != -1) { - for (int i = 0; i< pkg_num; i++) { - pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_VLAN; - pkts_burst[i]->vlan_tci = (u16_t)get_global_cfg_params()->vlan_mode; - } - } - - if (pkg_num > 0) { - g_virtio_instance.rx_pkg[queue_id] += pkg_num; - uint16_t nb_rx = rte_eth_tx_burst(lstack_net_port, queue_id, pkts_burst, pkg_num); - for (uint16_t i = nb_rx; i < pkg_num; ++i) { - rte_pktmbuf_free(pkts_burst[i]); - g_virtio_instance.rx_drop[queue_id]++; - } - } -} +/////////////////////////////////////////////////void virtio_tap_process_rx(uint16_t port, uint32_t queue_id) +/////////////////////////////////////////////////{ +///////////////////////////////////////////////// struct rte_mbuf *pkts_burst[VIRTIO_TX_RX_RING_SIZE]; +///////////////////////////////////////////////// uint16_t lstack_net_port = port; +///////////////////////////////////////////////// uint32_t pkg_num; +///////////////////////////////////////////////// +///////////////////////////////////////////////// pkg_num = rte_eth_rx_burst(g_virtio_instance.virtio_port_id, queue_id, pkts_burst, VIRTIO_TX_RX_RING_SIZE); +///////////////////////////////////////////////// /* +///////////////////////////////////////////////// * For VLAN, the tap device defaults to tx-vlan-ofload as enabled and will not be modified by default, +///////////////////////////////////////////////// * so the judgment is skipped. +///////////////////////////////////////////////// * For checksum, tap devices are turned off by default, and it is assumed that they will not be modified, +///////////////////////////////////////////////// * so no action will be taken. +///////////////////////////////////////////////// * For TSO, tap devices do not support it, so no action will be taken. +///////////////////////////////////////////////// */ +///////////////////////////////////////////////// if (get_global_cfg_params()->vlan_mode != -1) { +///////////////////////////////////////////////// for (int i = 0; i< pkg_num; i++) { +///////////////////////////////////////////////// pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_VLAN; +///////////////////////////////////////////////// pkts_burst[i]->vlan_tci = (u16_t)get_global_cfg_params()->vlan_mode; +///////////////////////////////////////////////// } +///////////////////////////////////////////////// } +///////////////////////////////////////////////// +///////////////////////////////////////////////// if (pkg_num > 0) { +///////////////////////////////////////////////// g_virtio_instance.rx_pkg[queue_id] += pkg_num; +///////////////////////////////////////////////// uint16_t nb_rx = rte_eth_tx_burst(lstack_net_port, queue_id, pkts_burst, pkg_num); +///////////////////////////////////////////////// for (uint16_t i = nb_rx; i < pkg_num; ++i) { +///////////////////////////////////////////////// rte_pktmbuf_free(pkts_burst[i]); +///////////////////////////////////////////////// g_virtio_instance.rx_drop[queue_id]++; +///////////////////////////////////////////////// } +///////////////////////////////////////////////// } +/////////////////////////////////////////////////} void virtio_tap_process_tx(uint16_t queue_id, struct rte_mbuf *mbuf_copy) { diff --git a/src/lstack/include/lstack_ethdev.h b/src/lstack/include/lstack_ethdev.h index bdd22be..1d2cdf6 100644 --- a/src/lstack/include/lstack_ethdev.h +++ b/src/lstack/include/lstack_ethdev.h @@ -35,6 +35,13 @@ void kni_handle_tx(struct rte_mbuf *mbuf); void netif_poll(struct netif *netif); /////////////////////////// loopback add ////////////////// // 定义网卡结构 +enum { + ETH_USER_IDX = 0, + ETH_VIRTIO_IDX, + ETH_LOOPBACK_IDX, + ETH_COUNT, +}; + struct ethdev { int port_id; int queue_num; @@ -47,8 +54,13 @@ struct ethdev { int (*linkoutput)(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num); }; +struct ethdev_array { + struct ethdev eth[ETH_COUNT]; +}; +void eths_init(void); +void eth_virtio_init(void); void eth_user_init(void); - +void virtio_tap_process_rx(uint16_t port, uint32_t queue_id); ///////////////////////// /////////////// //////////////// diff --git a/src/lstack/netif/lstack_ethdev.c b/src/lstack/netif/lstack_ethdev.c index 4da6318..a6bdd3b 100644 --- a/src/lstack/netif/lstack_ethdev.c +++ b/src/lstack/netif/lstack_ethdev.c @@ -47,12 +47,20 @@ static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf); #define ETHDEV_MEMZONE_NAME "gazelle_ethdev_memzone_0001" -static struct ethdev *g_eth_user; -///////////////////////////////////////////////////////////// - - +#define VIRTIO_TX_RX_RING_SIZE 1024 +//enum { +// ETH_USER_IDX = 0, +// ETH_VIRTIO_IDX, +// ETH_LOOPBACK_IDX, +// ETH_COUNT, +//}; +//struct ethdev *g_eth_table[ETH_COUNT]; +struct ethdev_array *g_eth_arr; +static struct ethdev *g_eth_user; +static struct ethdev *g_eth_virtio; +///////////////////////////////////////////////////////////// @@ -398,6 +406,7 @@ int32_t ethdev_init(struct protocol_stack *stack) int ret = 0; stack->eth_user = g_eth_user; + stack->eth_virtio = g_eth_virtio; vdev_dev_ops_init(&stack->dev_ops); if (cfg->send_cache_mode) { ret = tx_cache_init(stack->queue_id, stack, &stack->dev_ops); @@ -598,7 +607,7 @@ int eth_user_linkinput(struct ethdev *dev, int queue_id) dev->input(dev, pbuf_input);//eth_user_input() } else if (transfer_type == TRANSFER_KERNEL) { if (get_global_cfg_params()->flow_bifurcation) { - virtio_tap_process_tx(stack->queue_id, stack->pkts[i]); + g_eth_virtio->linkoutput(g_eth_virtio, stack->queue_id, &stack->pkts[i], 1); } else { #if RTE_VERSION < RTE_VERSION_NUM(23, 11, 0, 0) kni_handle_tx(stack->pkts[i]); @@ -709,24 +718,148 @@ static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf) + + void eth_user_init(void) +{ + struct protocol_stack_group *stack_group; + + stack_group = get_protocol_stack_group(); + + g_eth_user = &g_eth_arr->eth[ETH_USER_IDX]; + g_eth_user->linkinput = eth_user_linkinput; + g_eth_user->input = eth_user_input; + g_eth_user->linkoutput = eth_user_linkoutput; + g_eth_user->output = eth_user_output; + + g_eth_user->port_id = stack_group->port_id; + +} + + +////////////////////////////////////////////////////////////////////////////// + + + + +///////////////////////////////////// eth_vitio ///////////////////////////////// + +uint32_t virtio_tx_xmit(int port_id, int queue_id, struct rte_mbuf **pkts, uint32_t nr_pkts) +{ + uint32_t sent_pkts = 0; + + if (rte_eth_tx_prepare(port_id, queue_id, pkts, nr_pkts) != nr_pkts) { + // stack->stats.tx_prepare_fail++; + LSTACK_LOG(INFO, LSTACK, "rte_eth_tx_prepare failed\n"); + } + const uint32_t tbegin = sys_now(); + + do { + sent_pkts += rte_eth_tx_burst(port_id, queue_id, &pkts[sent_pkts], nr_pkts - sent_pkts); + } while (sent_pkts < nr_pkts && (ENQUEUE_RING_RETRY_TIMEOUT > sys_now() - tbegin)); + + return sent_pkts; +} + +//int (*linkoutput)(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num); +int eth_virtio_linkoutput(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num) +{ + int pkts = 0; + int send_suc = 0; + struct virtio_instance* virtio_inst; + + virtio_inst = virtio_instance_get(); + + //pkts = rte_eth_tx_burst(g_virtio_instance.virtio_port_id, queue_id, mbufs, num); + for (int i = 0; i < num; ++i) { + send_suc = virtio_tx_xmit(dev->port_id, queue_id, &mbufs[i], 1); + if (send_suc > 0) { + pkts++; + } else { + rte_pktmbuf_free(mbufs[i]); + virtio_inst->tx_drop[queue_id]++; + LSTACK_LOG(ERR, LSTACK, "virtio_tap_process_tx failed %d, %d\n", queue_id, pkts); + } + } + virtio_inst->tx_pkg[queue_id] += pkts ; + + return pkts; +} + +int eth_virtio_linkinput(struct ethdev *dev, int queue_id) +{ + struct rte_mbuf *mbufs_input[VIRTIO_TX_RX_RING_SIZE]; + int pkts = 0; + struct virtio_instance* virtio_inst; + + virtio_inst = virtio_instance_get(); + + pkts = rte_eth_rx_burst(dev->port_id, queue_id, mbufs_input, VIRTIO_TX_RX_RING_SIZE); + + /* + * For VLAN, the tap device defaults to tx-vlan-ofload as enabled and will not be modified by default, + * so the judgment is skipped. + * For checksum, tap devices are turned off by default, and it is assumed that they will not be modified, + * so no action will be taken. + * For TSO, tap devices do not support it, so no action will be taken. + */ + if (get_global_cfg_params()->vlan_mode != -1) { + for (int i = 0; i < pkts; ++i) { + mbufs_input[i]->ol_flags |= RTE_MBUF_F_TX_VLAN; + mbufs_input[i]->vlan_tci = (u16_t)get_global_cfg_params()->vlan_mode; + } + } + + if (pkts <= 0) { + return pkts; + } + + virtio_inst->rx_pkg[queue_id] += pkts; + + g_eth_virtio->linkoutput(g_eth_user, queue_id, mbufs_input, pkts); + + return pkts; +} + +//void virtio_tap_process_rx(uint16_t port, uint32_t queue_id) +//{ +// g_eth_virtio->linkinput(g_eth_virtio, queue_id); +// +// if (pkg_num > 0) { +// virtio_inst->rx_pkg[queue_id] += pkg_num; +// g_eth_virtio->linkoutput(g_eth_user, queue_id, pkts_burst, pkg_num); +// } +//} + +void eth_virtio_init(void) +{ + struct virtio_instance* virtio_inst; + + virtio_inst = virtio_instance_get(); + + g_eth_virtio = &g_eth_arr->eth[ETH_VIRTIO_IDX]; + + g_eth_virtio->linkinput = eth_virtio_linkinput; + //g_eth_virtio->input = eth_virtio_input; + g_eth_virtio->linkoutput = eth_virtio_linkoutput; + //g_eth_virtio->output = eth_virtio_output; + g_eth_virtio->port_id = virtio_inst->virtio_port_id; + +} + + +////////////////////////////////////////////////////////////////// +void eths_init(void) { const struct rte_memzone *memzone; - struct ethdev *eth_user; if (get_global_cfg_params()->is_primary) { - memzone = rte_memzone_reserve(ETHDEV_MEMZONE_NAME, sizeof(struct ethdev), rte_socket_id(), RTE_MEMZONE_SIZE_HINT_ONLY); + memzone = rte_memzone_reserve(ETHDEV_MEMZONE_NAME, sizeof(struct ethdev_array), rte_socket_id(), RTE_MEMZONE_SIZE_HINT_ONLY); if (memzone == NULL) { LSTACK_EXIT(EXIT_FAILURE, "%s: rte_memzone_reserve for ethdev_table failed.\n", __func__); } - eth_user = (struct ethdev*)(memzone->addr); - memset_s(eth_user, sizeof(struct ethdev), 0, sizeof(struct ethdev)); - - eth_user->linkinput = eth_user_linkinput; - eth_user->input = eth_user_input; - eth_user->linkoutput = eth_user_linkoutput; - eth_user->output = eth_user_output; - g_eth_user = eth_user; + g_eth_arr = (struct ethdev_array*)(memzone->addr); + memset_s(g_eth_arr, sizeof(struct ethdev_array), 0, sizeof(struct ethdev_array)); } // else { // struct ethdev *eth_user; @@ -737,4 +870,7 @@ void eth_user_init(void) // } // eth_user = (struct ethdev*)(memzone->addr); // } + eth_user_init(); + eth_virtio_init(); } +////////////////////////////////////////////////////////////////// -- Gitee From 95e987fc218270853b8a2e35de9acb589c81750f Mon Sep 17 00:00:00 2001 From: jiangheng Date: Tue, 25 Feb 2025 15:07:23 +0800 Subject: [PATCH 3/6] loopback: add loopback_ring and some interface --- src/lstack/include/lstack_ethdev.h | 25 ++- src/lstack/include/lstack_protocol_stack.h | 1 + src/lstack/netif/lstack_ethdev.c | 233 ++++++++++++++++++++- 3 files changed, 249 insertions(+), 10 deletions(-) diff --git a/src/lstack/include/lstack_ethdev.h b/src/lstack/include/lstack_ethdev.h index 1d2cdf6..185c8df 100644 --- a/src/lstack/include/lstack_ethdev.h +++ b/src/lstack/include/lstack_ethdev.h @@ -44,7 +44,7 @@ enum { struct ethdev { int port_id; - int queue_num; + int queue_idx; int netif_idx; int (*linkinput)(struct ethdev *dev, int queue_id); @@ -54,9 +54,30 @@ struct ethdev { int (*linkoutput)(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num); }; +struct ethdev_user { + struct ethdev dev; + int queue_cnt_max; + int queue_idx_next; // means new thread get the queue index; +}; + +struct ethdev_virtio { + struct ethdev dev; +}; + +struct ethdev_loopback { + struct ethdev dev; + int queue_idx_next; // means new thread get the queue index; +}; + struct ethdev_array { - struct ethdev eth[ETH_COUNT]; + struct ethdev dev[ETH_COUNT]; }; + +//struct ethdev_array { +// struct ethdev_user eth_user; +// struct ethdev_virtio eth_virtio; +// struct ethdev_loopback eth_loopback; +//}; void eths_init(void); void eth_virtio_init(void); void eth_user_init(void); diff --git a/src/lstack/include/lstack_protocol_stack.h b/src/lstack/include/lstack_protocol_stack.h index ba84316..63a8f5b 100644 --- a/src/lstack/include/lstack_protocol_stack.h +++ b/src/lstack/include/lstack_protocol_stack.h @@ -68,6 +68,7 @@ struct protocol_stack { char pad3 __rte_cache_aligned; struct netif netif; + struct netif netif_loopback; struct lstack_dev_ops dev_ops; struct ethdev *eth_user; struct ethdev *eth_virtio; diff --git a/src/lstack/netif/lstack_ethdev.c b/src/lstack/netif/lstack_ethdev.c index a6bdd3b..433bb47 100644 --- a/src/lstack/netif/lstack_ethdev.c +++ b/src/lstack/netif/lstack_ethdev.c @@ -46,8 +46,12 @@ //////////////////////////////////////loopback/////////////// static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf); -#define ETHDEV_MEMZONE_NAME "gazelle_ethdev_memzone_0001" +#define ETHDEV_MEMZONE_NAME "gazelle_shared_ethdevs_mem" #define VIRTIO_TX_RX_RING_SIZE 1024 + +//#define RTE_MAX_ETHPORTS 32 +#define LOOPBACK_PORT_ID 33 // "#define RTE_MAX_ETHPORTS 32", Loopback will not occupy dpdk port. +#define LOOPBACK_RING_SIZE 256 //enum { // ETH_USER_IDX = 0, // ETH_VIRTIO_IDX, @@ -55,7 +59,9 @@ static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf); // ETH_COUNT, //}; //struct ethdev *g_eth_table[ETH_COUNT]; -struct ethdev_array *g_eth_arr; +struct ethdev_array *g_shared_eth_arr; // shared between multi-process. +static PER_THREAD struct ethdev_array g_eth_arr; //use in curr process; +static struct rte_ring *g_loopback_ring[PROTOCOL_STACK_MAX]; //use in curr process; static struct ethdev *g_eth_user; static struct ethdev *g_eth_virtio; @@ -400,6 +406,68 @@ static err_t eth_dev_init(struct netif *netif) return ERR_OK; } +////////////////////////////////////////////// + +err_t +ethloopback_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + return 0; +} + +err_t +ethloopback_ip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ipaddr) +{ + return 0; +} + +static err_t ethloopback_dev_output(struct netif *netif, struct pbuf *pbuf) +{ + return 0; +} + + +static err_t eth_loopback_dev_init(struct netif *netif) +{ + int ret; + //const u8_t loopback_haddr[] = "00:00:00:00:00:00"; + const uint8_t loopback_haddr[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; + + netif->name[0] = 'l'; + netif->name[1] = 'o'; + netif->flags |= NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_IGMP | NETIF_FLAG_MLD6; + netif->mtu = FRAME_MTU; + netif->output = ethloopback_output; + netif->linkoutput = ethloopback_dev_output; + netif->output_ip6 = ethloopback_ip6_output; + + ret = memcpy_s(netif->hwaddr, sizeof(netif->hwaddr), loopback_haddr, ETHER_ADDR_LEN); + if (ret != EOK) { + LSTACK_LOG(ERR, LSTACK, "memcpy_s fail ret=%d\n", ret); + return ERR_MEM; + } + + netif->hwaddr_len = ETHER_ADDR_LEN; + + if (dpdk_nic_is_xdp()) { + netif_set_rxol_flags(netif, RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM); + } else { + netif_set_rxol_flags(netif, get_protocol_stack_group()->rx_offload); + } + netif_set_txol_flags(netif, get_protocol_stack_group()->tx_offload); + if (get_global_cfg_params()->stack_mode_rtc) { + netif_set_rtc_mode(netif); + } + + return ERR_OK; +} + + + +///////////////////////////////////////////// + + int32_t ethdev_init(struct protocol_stack *stack) { struct cfg_params *cfg = get_global_cfg_params(); @@ -454,7 +522,32 @@ int32_t ethdev_init(struct protocol_stack *stack) netif_set_link_up(&stack->netif); netif_set_up(&stack->netif); + +///////////////////////////////////////////////////// + /* set loopback netif */ + //g_config_params.host_addr.addr = inet_addr(host_addr); + //static ip_addr_t loopback_addr, loopback_netmask, loopback_gw; + static ip4_addr_t loopback_addr, loopback_netmask, loopback_gw; + loopback_addr.addr = inet_addr("127.0.0.1"); + loopback_netmask.addr = inet_addr("255.0.0.0"); + loopback_gw.addr = inet_addr("0.0.0.0"); + //IP4_ADDR(&loopback_addr, 127, 0, 0, 1); + //IP4_ADDR(&loopback_netmask, 255, 0, 0, 0); + //IP4_ADDR(&loopback_gw, 0, 0, 0, 0); + + if (/* netif_loopback_is_set */ true) { + netif = netif_add(&stack->netif_loopback, &loopback_addr, &loopback_netmask, + &loopback_gw, NULL, eth_loopback_dev_init, ethernet_input); + if (netif == NULL) { + LSTACK_LOG(ERR, LSTACK, "netif_loopback add failed\n"); + return ERR_IF; + } + + netif_set_link_up(&stack->netif_loopback); + netif_set_up(&stack->netif_loopback); + } +//////////////////////////////////////////// return 0; } @@ -726,7 +819,7 @@ void eth_user_init(void) stack_group = get_protocol_stack_group(); - g_eth_user = &g_eth_arr->eth[ETH_USER_IDX]; + g_eth_user = &g_eth_arr.dev[ETH_USER_IDX]; g_eth_user->linkinput = eth_user_linkinput; g_eth_user->input = eth_user_input; g_eth_user->linkoutput = eth_user_linkoutput; @@ -837,29 +930,153 @@ void eth_virtio_init(void) virtio_inst = virtio_instance_get(); - g_eth_virtio = &g_eth_arr->eth[ETH_VIRTIO_IDX]; + g_eth_virtio = &g_eth_arr.dev[ETH_VIRTIO_IDX]; g_eth_virtio->linkinput = eth_virtio_linkinput; - //g_eth_virtio->input = eth_virtio_input; + g_eth_virtio->linkoutput = eth_virtio_linkoutput; - //g_eth_virtio->output = eth_virtio_output; + g_eth_virtio->port_id = virtio_inst->virtio_port_id; } +/////////////////////////////////////////////loopback///////////////////////////// +static int loopback_ring_init(void) +{ + struct cfg_params *cfg_params = get_global_cfg_params(); + char ring_name[RTE_MEMZONE_NAMESIZE] = {0}; + struct rte_ring *loopback_ring; + int queue_id; + + LSTACK_LOG(INFO, LSTACK, + "config::num_cpu=%d num_process=%d \n", cfg_params->num_cpu, cfg_params->num_process); + + for (int cpu_idx = 0; cpu_idx < cfg_params->num_queue; cpu_idx++) { + for (int process_idx = 0; process_idx < cfg_params->num_process; process_idx++) { + queue_id = cpu_idx * cfg_params->num_process + process_idx; + if (queue_id >= PROTOCOL_STACK_MAX) { + LSTACK_LOG(ERR, LSTACK, "index is over\n"); + return -1; + } + snprintf_s(ring_name, sizeof(ring_name), sizeof(ring_name) - 1, "loopback_queue_%d", queue_id); + /* 主进程时,创建所有进程所需要的loopback_ring。 */ + if (get_global_cfg_params()->is_primary) { + g_loopback_ring[queue_id] = rte_ring_create(ring_name, LOOPBACK_RING_SIZE,rte_socket_id(), RING_F_SC_DEQ); + if (g_loopback_ring[queue_id] == NULL) { + LSTACK_LOG(ERR, LSTACK, "loopback queue create failed, queue_idx=%d\n", queue_id); + return -1; + } + } else { + /*非主进程时,通过查询队列名称,将共享内存ring 存储到g_loopback_ring中 */ + g_loopback_ring[queue_id] = rte_ring_lookup(ring_name); + if (g_loopback_ring[queue_id] == NULL) { + LSTACK_LOG(ERR, LSTACK, "loopback queue lookup failed, queue_idx=%d\n", queue_id); + return -1; + } + } + } + } + return 0; +} + +int eth_loopback_output(struct ethdev *dev, int queue_id, struct pbuf *p) +{ + uint32_t sent_pkts; + struct rte_mbuf *mbuf_output; + + mbuf_output = generate_mbuf_from_pbuf(p); + + sent_pkts = dev->linkoutput(dev, dev->queue_idx, &mbuf_output, 1); + if (sent_pkts < 1) { + rte_pktmbuf_free(mbuf_output); + } + return sent_pkts; +} + +static int loopback_get_target_queue_id(int queue_id) +{ + if (queue_id == 0) { + return 1; + } + return 0; +} + +int eth_loopback_linkoutput(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num) +{ + int target_queue_id; + int pkts = 0; + int sent_pkts = 0; + + target_queue_id = loopback_get_target_queue_id(queue_id); + if (g_loopback_ring[target_queue_id] == NULL) { + LSTACK_LOG(ERR, LSTACK, "loopback queue has't been initialized, queue_idx=%d\n", queue_id); + return -1; + } + sent_pkts = rte_ring_enqueue_bulk(g_loopback_ring[target_queue_id], (void**)mbufs, num, NULL); + + return sent_pkts; +} + +int eth_loopback_input(struct ethdev *dev, struct pbuf *p) +{ + int ret = -1; +// struct netif* netif_input; + struct protocol_stack *stack; + + stack = get_protocol_stack(); +// netif_input = netif_get_by_index(dev->netif_idx); + if (p != NULL) { + ret = stack->netif_loopback.input(p, &stack->netif); + if (ret != ERR_OK) { + LSTACK_LOG(ERR, LSTACK, "eth_dev_recv: failed to handle rx pbuf ret=%d\n", ret); + stack->stats.rx_drop++; + } + } + return ret; +} + +int eth_loopback_linkinput(struct ethdev *dev, int queue_id) +{ + int pkts; + struct pbuf *pbuf_input; + + struct rte_mbuf *mbufs_input[LOOPBACK_RING_SIZE]; + + if (g_loopback_ring[queue_id] == NULL) { + LSTACK_LOG(ERR, LSTACK, "loopback queue has't been initialized, queue_idx=%d\n", queue_id); + return -1; + } + + pkts = rte_ring_dequeue_burst(g_loopback_ring[queue_id], (void**)mbufs_input, LOOPBACK_RING_SIZE, NULL); + + if (pkts <= 0) { + return pkts; + } + + for (int i = 0; i < pkts; ++i) { + pbuf_input = generate_pbuf_from_mbuf(mbufs_input[i]); + dev->input(dev, pbuf_input); + } + + return pkts; +} +/////////////////////////////////////////////loopback end///////////////////////// ////////////////////////////////////////////////////////////////// void eths_init(void) { const struct rte_memzone *memzone; + + // static PER_THREAD struct ethdev_array g_eth_arr; + if (get_global_cfg_params()->is_primary) { memzone = rte_memzone_reserve(ETHDEV_MEMZONE_NAME, sizeof(struct ethdev_array), rte_socket_id(), RTE_MEMZONE_SIZE_HINT_ONLY); if (memzone == NULL) { LSTACK_EXIT(EXIT_FAILURE, "%s: rte_memzone_reserve for ethdev_table failed.\n", __func__); } - g_eth_arr = (struct ethdev_array*)(memzone->addr); - memset_s(g_eth_arr, sizeof(struct ethdev_array), 0, sizeof(struct ethdev_array)); + g_shared_eth_arr = (struct ethdev_array*)(memzone->addr); + memset_s(g_shared_eth_arr, sizeof(struct ethdev_array), 0, sizeof(struct ethdev_array)); } // else { // struct ethdev *eth_user; -- Gitee From 5f4562663d74ea89aa0c6d30eae318885c942eea Mon Sep 17 00:00:00 2001 From: yinbin Date: Thu, 27 Feb 2025 10:41:13 +0800 Subject: [PATCH 4/6] LOOPBACK: adjust loopback interface --- src/lstack/Makefile | 2 +- src/lstack/core/lstack_init.c | 2 + src/lstack/core/lstack_protocol_stack.c | 5 +- src/lstack/include/lstack_ethdev.h | 23 ++- src/lstack/include/lstack_protocol_stack.h | 3 - src/lstack/netif/lstack_ethdev.c | 213 +++++++++------------ 6 files changed, 109 insertions(+), 139 deletions(-) diff --git a/src/lstack/Makefile b/src/lstack/Makefile index ca1cf22..8a659c2 100644 --- a/src/lstack/Makefile +++ b/src/lstack/Makefile @@ -27,7 +27,7 @@ CC ?= gcc RM = rm -f ARCH := $(shell uname -m) -CFLAGS = -O2 -g -Wno-unused-variable +CFLAGS = -O2 -g LDFLAGS = -shared -ldl -lm -lpthread -lrt -lnuma -lconfig -lboundscheck ifeq ($(GAZELLE_COVERAGE_ENABLE), 1) diff --git a/src/lstack/core/lstack_init.c b/src/lstack/core/lstack_init.c index a5a0e47..282ffc9 100644 --- a/src/lstack/core/lstack_init.c +++ b/src/lstack/core/lstack_init.c @@ -319,7 +319,9 @@ __attribute__((constructor)) void gazelle_network_init(void) } } + /* 共享内存创建网卡,创建失败会退出程序 */ eths_init(); + if (!get_global_cfg_params()->stack_mode_rtc) { if (stack_setup_thread() != 0) { gazelle_exit(); diff --git a/src/lstack/core/lstack_protocol_stack.c b/src/lstack/core/lstack_protocol_stack.c index 28a0f21..e0e18c7 100644 --- a/src/lstack/core/lstack_protocol_stack.c +++ b/src/lstack/core/lstack_protocol_stack.c @@ -536,7 +536,8 @@ int stack_polling(unsigned wakeup_tick) force_quit = rpc_poll_msg(&stack->rpc_queue, rpc_number); //eth_dev_poll(); - stack->eth_user->linkinput(stack->eth_user, stack->queue_id); + struct ethdev_array *g_eth_arr = get_g_eth_arr(); + g_eth_arr->dev[ETH_USER_IDX].linkinput(&g_eth_arr->dev[ETH_USER_IDX], stack->queue_id); timeout = sys_timer_run(); if (cfg->stack_interrupt) { @@ -585,7 +586,7 @@ int stack_polling(unsigned wakeup_tick) } #endif if (get_global_cfg_params()->flow_bifurcation) { - stack->eth_virtio->linkinput(stack->eth_virtio, stack->queue_id); + g_eth_arr->dev[ETH_VIRTIO_IDX].linkinput(&g_eth_arr->dev[ETH_VIRTIO_IDX], stack->queue_id); //virtio_tap_process_rx(stack->port_id, stack->queue_id); } return force_quit; diff --git a/src/lstack/include/lstack_ethdev.h b/src/lstack/include/lstack_ethdev.h index 185c8df..f0b0d73 100644 --- a/src/lstack/include/lstack_ethdev.h +++ b/src/lstack/include/lstack_ethdev.h @@ -16,6 +16,8 @@ #include #include +#include "common/gazelle_dfx_msg.h" + struct protocol_stack; struct rte_mbuf; struct lstack_dev_ops { @@ -43,15 +45,17 @@ enum { }; struct ethdev { - int port_id; - int queue_idx; - int netif_idx; + int port_id; + int queue_id; + //int netif_idx; // 使用netif_idx,在收发包时 会调用函数获取netif指针,会降低效率,这里直接使用netif指针,收发包时,通过dev直接获取netif*。 + struct netif *netif; + struct gazelle_stack_stat stats; - int (*linkinput)(struct ethdev *dev, int queue_id); - int (*input)(struct ethdev *dev, struct pbuf *p); + int (*linkinput)(struct ethdev *dev, int queue_id); + int (*input)(struct ethdev *dev, struct pbuf *p); - int (*output)(struct ethdev *dev, int queue_id, struct pbuf *p); - int (*linkoutput)(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num); + int (*output)(struct ethdev *dev, int queue_id, struct pbuf *p); + int (*linkoutput)(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num); }; struct ethdev_user { @@ -78,9 +82,10 @@ struct ethdev_array { // struct ethdev_virtio eth_virtio; // struct ethdev_loopback eth_loopback; //}; +struct ethdev_array *get_g_eth_arr(void); void eths_init(void); -void eth_virtio_init(void); -void eth_user_init(void); +//void eth_virtio_init(void); +//void eth_user_init(void); void virtio_tap_process_rx(uint16_t port, uint32_t queue_id); ///////////////////////// /////////////// //////////////// diff --git a/src/lstack/include/lstack_protocol_stack.h b/src/lstack/include/lstack_protocol_stack.h index 63a8f5b..4cb3174 100644 --- a/src/lstack/include/lstack_protocol_stack.h +++ b/src/lstack/include/lstack_protocol_stack.h @@ -70,9 +70,6 @@ struct protocol_stack { struct netif netif; struct netif netif_loopback; struct lstack_dev_ops dev_ops; - struct ethdev *eth_user; - struct ethdev *eth_virtio; - struct ethdev *eth_loopback; uint32_t rx_ring_used; uint32_t tx_ring_used; diff --git a/src/lstack/netif/lstack_ethdev.c b/src/lstack/netif/lstack_ethdev.c index 433bb47..9af6c1c 100644 --- a/src/lstack/netif/lstack_ethdev.c +++ b/src/lstack/netif/lstack_ethdev.c @@ -59,12 +59,14 @@ static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf); // ETH_COUNT, //}; //struct ethdev *g_eth_table[ETH_COUNT]; -struct ethdev_array *g_shared_eth_arr; // shared between multi-process. +//struct ethdev_array *g_shared_eth_arr; // shared between multi-process. static PER_THREAD struct ethdev_array g_eth_arr; //use in curr process; static struct rte_ring *g_loopback_ring[PROTOCOL_STACK_MAX]; //use in curr process; -static struct ethdev *g_eth_user; -static struct ethdev *g_eth_virtio; +struct ethdev_array *get_g_eth_arr(void) +{ + return &g_eth_arr; +} ///////////////////////////////////////////////////////////// @@ -253,6 +255,7 @@ static uint16_t eth_dev_get_dst_port(struct rte_mbuf *pkt) return dst_port; } +/////////////////////////////// 这个函数在lwip中还在使用,在某些函数中,会调用该函数收包,比如connect时 int32_t eth_dev_poll(void) { uint32_t nr_pkts; @@ -408,24 +411,23 @@ static err_t eth_dev_init(struct netif *netif) ////////////////////////////////////////////// -err_t -ethloopback_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +static err_t ethloopback_dev_output(struct netif *netif, struct pbuf *pbuf) { - return 0; -} + uint32_t sent_pkts; + struct ethdev *dev; -err_t -ethloopback_ip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ipaddr) -{ - return 0; -} -static err_t ethloopback_dev_output(struct netif *netif, struct pbuf *pbuf) -{ - return 0; + dev = &(g_eth_arr.dev[ETH_LOOPBACK_IDX]); + sent_pkts = dev->output(dev, dev->queue_id, pbuf); + dev->stats.tx += sent_pkts; + + if (sent_pkts < 1) { + dev->stats.tx_drop++; + return ERR_MEM; + } + return ERR_OK; } - static err_t eth_loopback_dev_init(struct netif *netif) { int ret; @@ -436,9 +438,9 @@ static err_t eth_loopback_dev_init(struct netif *netif) netif->name[1] = 'o'; netif->flags |= NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_IGMP | NETIF_FLAG_MLD6; netif->mtu = FRAME_MTU; - netif->output = ethloopback_output; + netif->output = etharp_output; netif->linkoutput = ethloopback_dev_output; - netif->output_ip6 = ethloopback_ip6_output; + netif->output_ip6 = ethip6_output; ret = memcpy_s(netif->hwaddr, sizeof(netif->hwaddr), loopback_haddr, ETHER_ADDR_LEN); if (ret != EOK) { @@ -472,9 +474,9 @@ int32_t ethdev_init(struct protocol_stack *stack) { struct cfg_params *cfg = get_global_cfg_params(); int ret = 0; - - stack->eth_user = g_eth_user; - stack->eth_virtio = g_eth_virtio; + ////////////////////////////// 等待重构,需要对网卡array 重新赋值////////////// + // stack->eth_user = g_eth_user; + // stack->eth_virtio = g_eth_virtio; vdev_dev_ops_init(&stack->dev_ops); if (cfg->send_cache_mode) { ret = tx_cache_init(stack->queue_id, stack, &stack->dev_ops); @@ -524,18 +526,14 @@ int32_t ethdev_init(struct protocol_stack *stack) netif_set_up(&stack->netif); ///////////////////////////////////////////////////// - /* set loopback netif */ - //g_config_params.host_addr.addr = inet_addr(host_addr); - //static ip_addr_t loopback_addr, loopback_netmask, loopback_gw; - static ip4_addr_t loopback_addr, loopback_netmask, loopback_gw; - loopback_addr.addr = inet_addr("127.0.0.1"); - loopback_netmask.addr = inet_addr("255.0.0.0"); - loopback_gw.addr = inet_addr("0.0.0.0"); - //IP4_ADDR(&loopback_addr, 127, 0, 0, 1); - //IP4_ADDR(&loopback_netmask, 255, 0, 0, 0); - //IP4_ADDR(&loopback_gw, 0, 0, 0, 0); - + if (/* netif_loopback_is_set */ true) { + /* set loopback netif */ + static ip4_addr_t loopback_addr, loopback_netmask, loopback_gw; + loopback_addr.addr = inet_addr("127.0.0.1"); + loopback_netmask.addr = inet_addr("255.0.0.0"); + loopback_gw.addr = inet_addr("0.0.0.0"); + netif = netif_add(&stack->netif_loopback, &loopback_addr, &loopback_netmask, &loopback_gw, NULL, eth_loopback_dev_init, ethernet_input); if (netif == NULL) { @@ -553,29 +551,6 @@ int32_t ethdev_init(struct protocol_stack *stack) //////////////////////////////////////// new eth //////////////////////////////// -//// 定义网卡结构 -//struct ethdev { -// int port_id; -// int queue_num; -// int netif_idx; -// -// int (*linkinput)(struct ethdev *dev, int queue_id); -// int (*input)(struct ethdev *dev, struct pbuf *p); -// -// int (*output)(struct ethdev *dev, queue_id, struct pbuf *p); -// int (*linkoutput)(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], int num); -//}; -// -//int ethdev_create(void); -//void ethdev_destroy(int port_id); -//struct ethdev *ethdev_get(int port_id); - - -struct ethdev *get_g_eth_user() -{ - return g_eth_user; -} - enum PACKET_TRANSFER_TYPE get_packet_transfer_type(struct rte_mbuf *pkt, bool *is_arp) { @@ -649,16 +624,15 @@ struct pbuf *generate_pbuf_from_mbuf(struct rte_mbuf *mbuf) int eth_user_input(struct ethdev *dev, struct pbuf *pbuf_input) { int32_t ret = -1; - struct protocol_stack *stack; - - stack = get_protocol_stack(); + if (pbuf_input != NULL) { - ret = stack->netif.input(pbuf_input, &stack->netif); + ret = dev->netif->input(pbuf_input,dev->netif); if (ret != ERR_OK) { LSTACK_LOG(ERR, LSTACK, "eth_dev_recv: failed to handle rx pbuf ret=%d\n", ret); - stack->stats.rx_drop++; + dev->stats.rx_drop++; } } + return ret; } @@ -700,7 +674,8 @@ int eth_user_linkinput(struct ethdev *dev, int queue_id) dev->input(dev, pbuf_input);//eth_user_input() } else if (transfer_type == TRANSFER_KERNEL) { if (get_global_cfg_params()->flow_bifurcation) { - g_eth_virtio->linkoutput(g_eth_virtio, stack->queue_id, &stack->pkts[i], 1); + g_eth_arr.dev[ETH_VIRTIO_IDX].linkoutput(&g_eth_arr.dev[ETH_VIRTIO_IDX], + stack->queue_id, &stack->pkts[i], 1); } else { #if RTE_VERSION < RTE_VERSION_NUM(23, 11, 0, 0) kni_handle_tx(stack->pkts[i]); @@ -779,13 +754,11 @@ int eth_user_linkoutput(struct ethdev *dev, int queue_id, struct rte_mbuf **pkts int eth_user_output(struct ethdev *dev, int queue_id, struct pbuf *p) { uint32_t sent_pkts; - struct protocol_stack *stack; struct rte_mbuf *mbuf_output; - stack = get_protocol_stack(); mbuf_output = generate_mbuf_from_pbuf(p); - sent_pkts = stack->eth_user->linkoutput(stack->eth_user, stack->queue_id, &mbuf_output, 1); + sent_pkts = dev->linkoutput(dev, queue_id, &mbuf_output, 1); if (sent_pkts < 1) { rte_pktmbuf_free(mbuf_output); } @@ -794,39 +767,32 @@ int eth_user_output(struct ethdev *dev, int queue_id, struct pbuf *p) static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf) { - struct ethdev *dev; - struct protocol_stack *stack; uint32_t sent_pkts; + struct ethdev *dev; + + + dev = &(g_eth_arr.dev[ETH_USER_IDX]); + sent_pkts = dev->output(dev, dev->queue_id, pbuf); + dev->stats.tx += sent_pkts; - stack = get_protocol_stack(); - dev = stack->eth_user; - sent_pkts = dev->output(dev, stack->queue_id, pbuf); - stack->stats.tx += sent_pkts; if (sent_pkts < 1) { - stack->stats.tx_drop++; + dev->stats.tx_drop++; return ERR_MEM; } return ERR_OK; } - - - - -void eth_user_init(void) +void eth_user_init(struct ethdev *dev) { struct protocol_stack_group *stack_group; stack_group = get_protocol_stack_group(); - g_eth_user = &g_eth_arr.dev[ETH_USER_IDX]; - g_eth_user->linkinput = eth_user_linkinput; - g_eth_user->input = eth_user_input; - g_eth_user->linkoutput = eth_user_linkoutput; - g_eth_user->output = eth_user_output; - - g_eth_user->port_id = stack_group->port_id; - + dev->port_id = stack_group->port_id; + dev->linkinput = eth_user_linkinput; + dev->input = eth_user_input; + dev->linkoutput = eth_user_linkoutput; + dev->output = eth_user_output; } @@ -909,7 +875,7 @@ int eth_virtio_linkinput(struct ethdev *dev, int queue_id) virtio_inst->rx_pkg[queue_id] += pkts; - g_eth_virtio->linkoutput(g_eth_user, queue_id, mbufs_input, pkts); + g_eth_arr.dev[ETH_VIRTIO_IDX].linkoutput(&g_eth_arr.dev[ETH_VIRTIO_IDX], queue_id, mbufs_input, pkts); return pkts; } @@ -924,27 +890,23 @@ int eth_virtio_linkinput(struct ethdev *dev, int queue_id) // } //} -void eth_virtio_init(void) +void eth_virtio_init(struct ethdev *dev) { struct virtio_instance* virtio_inst; virtio_inst = virtio_instance_get(); - - g_eth_virtio = &g_eth_arr.dev[ETH_VIRTIO_IDX]; - - g_eth_virtio->linkinput = eth_virtio_linkinput; - - g_eth_virtio->linkoutput = eth_virtio_linkoutput; - - g_eth_virtio->port_id = virtio_inst->virtio_port_id; + dev->port_id = virtio_inst->virtio_port_id; + dev->linkinput = eth_virtio_linkinput; + dev->linkoutput = eth_virtio_linkoutput; } /////////////////////////////////////////////loopback///////////////////////////// +// loopback_ring 的初始化,需要在进程初始化时进行,所有进程都需要。 static int loopback_ring_init(void) { struct cfg_params *cfg_params = get_global_cfg_params(); char ring_name[RTE_MEMZONE_NAMESIZE] = {0}; - struct rte_ring *loopback_ring; +// struct rte_ring *loopback_ring; int queue_id; LSTACK_LOG(INFO, LSTACK, @@ -985,7 +947,7 @@ int eth_loopback_output(struct ethdev *dev, int queue_id, struct pbuf *p) mbuf_output = generate_mbuf_from_pbuf(p); - sent_pkts = dev->linkoutput(dev, dev->queue_idx, &mbuf_output, 1); + sent_pkts = dev->linkoutput(dev, dev->queue_id, &mbuf_output, 1); if (sent_pkts < 1) { rte_pktmbuf_free(mbuf_output); } @@ -1003,7 +965,6 @@ static int loopback_get_target_queue_id(int queue_id) int eth_loopback_linkoutput(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num) { int target_queue_id; - int pkts = 0; int sent_pkts = 0; target_queue_id = loopback_get_target_queue_id(queue_id); @@ -1019,16 +980,12 @@ int eth_loopback_linkoutput(struct ethdev *dev, int queue_id, struct rte_mbuf *m int eth_loopback_input(struct ethdev *dev, struct pbuf *p) { int ret = -1; -// struct netif* netif_input; - struct protocol_stack *stack; - stack = get_protocol_stack(); -// netif_input = netif_get_by_index(dev->netif_idx); if (p != NULL) { - ret = stack->netif_loopback.input(p, &stack->netif); + ret = dev->netif->input(p, dev->netif); if (ret != ERR_OK) { LSTACK_LOG(ERR, LSTACK, "eth_dev_recv: failed to handle rx pbuf ret=%d\n", ret); - stack->stats.rx_drop++; + dev->stats.rx_drop++; } } return ret; @@ -1059,35 +1016,43 @@ int eth_loopback_linkinput(struct ethdev *dev, int queue_id) return pkts; } + +void eth_loopback_init(struct ethdev *dev) +{ + dev->port_id = LOOPBACK_PORT_ID; + dev->linkinput = eth_loopback_linkinput; + dev->input = eth_loopback_input; + dev->linkoutput = eth_loopback_linkoutput; + dev->output = eth_loopback_output; +} /////////////////////////////////////////////loopback end///////////////////////// ////////////////////////////////////////////////////////////////// +// eths_init 在所有进程中都需要执行,且必须在 stack_group 初始化完成后执行。 void eths_init(void) { const struct rte_memzone *memzone; - - // static PER_THREAD struct ethdev_array g_eth_arr; + struct ethdev_array *tmp_eth_arr; - - if (get_global_cfg_params()->is_primary) { - memzone = rte_memzone_reserve(ETHDEV_MEMZONE_NAME, sizeof(struct ethdev_array), rte_socket_id(), RTE_MEMZONE_SIZE_HINT_ONLY); - if (memzone == NULL) { - LSTACK_EXIT(EXIT_FAILURE, "%s: rte_memzone_reserve for ethdev_table failed.\n", __func__); - } - g_shared_eth_arr = (struct ethdev_array*)(memzone->addr); - memset_s(g_shared_eth_arr, sizeof(struct ethdev_array), 0, sizeof(struct ethdev_array)); + loopback_ring_init(); + + /* If the process is not the primary process, directly return after initializing loopback ring. */ + if (!(get_global_cfg_params()->is_primary)) { + return; } -// else { -// struct ethdev *eth_user; -// u64_t init_id = 0; -// memzone = rte_memzone_lookup(ETHDEV_MEMZONE_NAME); -// if (memzone == NULL) { -// LSTACK_EXIT(EXIT_FAILURE, "%s: Cannot get ethdev_table, have you started up the primary process?\n", __func__); -// } -// eth_user = (struct ethdev*)(memzone->addr); -// } - eth_user_init(); - eth_virtio_init(); + + memzone = rte_memzone_reserve(ETHDEV_MEMZONE_NAME, sizeof(struct ethdev_array), + rte_socket_id(), RTE_MEMZONE_SIZE_HINT_ONLY); + if (memzone == NULL) { + LSTACK_EXIT(EXIT_FAILURE, "%s: rte_memzone_reserve for ethdev_table failed.\n", __func__); + } + + tmp_eth_arr = (struct ethdev_array*)(memzone->addr); + memset_s(tmp_eth_arr, sizeof(struct ethdev_array), 0, sizeof(struct ethdev_array)); + + eth_user_init(&tmp_eth_arr->dev[ETH_USER_IDX]); + eth_virtio_init(&tmp_eth_arr->dev[ETH_VIRTIO_IDX]); + eth_loopback_init(&tmp_eth_arr->dev[ETH_LOOPBACK_IDX]); } ////////////////////////////////////////////////////////////////// -- Gitee From 46bce4423fdd8b22b13ea22728d029d0c679f168 Mon Sep 17 00:00:00 2001 From: yinbin Date: Fri, 28 Feb 2025 11:25:16 +0800 Subject: [PATCH 5/6] loopback: loopback ready before gazelle multi-process --- src/lstack/api/lstack_epoll.c | 2 +- src/lstack/core/lstack_dpdk.c | 10 +- src/lstack/core/lstack_protocol_stack.c | 14 +-- src/lstack/include/lstack_ethdev.h | 31 +------ src/lstack/include/lstack_protocol_stack.h | 9 +- src/lstack/netif/lstack_ethdev.c | 102 ++++++++++----------- src/lstack/netif/lstack_tx_cache.c | 2 +- src/lstack/netif/lstack_vdev.c | 10 +- 8 files changed, 78 insertions(+), 102 deletions(-) diff --git a/src/lstack/api/lstack_epoll.c b/src/lstack/api/lstack_epoll.c index 644efc0..649cd3e 100644 --- a/src/lstack/api/lstack_epoll.c +++ b/src/lstack/api/lstack_epoll.c @@ -803,7 +803,7 @@ static void poll_bind_statck(struct wakeup_poll *wakeup, int32_t *stack_count) struct protocol_stack_group *stack_group = get_protocol_stack_group(); uint16_t bind_id = find_max_cnt_stack(stack_count, stack_group->stack_num, wakeup->bind_stack); - if (wakeup->bind_stack && wakeup->bind_stack->queue_id == bind_id) { + if (wakeup->bind_stack && wakeup->bind_stack->user_queue_id == bind_id) { return; } diff --git a/src/lstack/core/lstack_dpdk.c b/src/lstack/core/lstack_dpdk.c index 3023a6c..deb12f7 100644 --- a/src/lstack/core/lstack_dpdk.c +++ b/src/lstack/core/lstack_dpdk.c @@ -205,14 +205,14 @@ static struct reg_ring_msg *create_reg_mempool(const char *name, uint16_t queue_ int32_t pktmbuf_pool_init(struct protocol_stack *stack) { - stack->rxtx_mbuf_pool = get_pktmbuf_mempool("rxtx_mbuf", stack->queue_id); + stack->rxtx_mbuf_pool = get_pktmbuf_mempool("rxtx_mbuf", stack->user_queue_id); if (stack->rxtx_mbuf_pool == NULL) { LSTACK_LOG(ERR, LSTACK, "rxtx_mbuf_pool is NULL\n"); return -1; } if (use_ltran()) { - stack->reg_buf = create_reg_mempool("reg_ring_msg", stack->queue_id); + stack->reg_buf = create_reg_mempool("reg_ring_msg", stack->user_queue_id); if (stack->reg_buf == NULL) { LSTACK_LOG(ERR, LSTACK, "rxtx_mbuf_pool is NULL\n"); return -1; @@ -245,8 +245,8 @@ struct rte_mempool *create_mempool(const char *name, uint32_t count, uint32_t si int32_t create_shared_ring(struct protocol_stack *stack) { - rpc_queue_init(&stack->rpc_queue, stack->queue_id); - rpc_queue_init(&stack->dfx_rpc_queue, stack->queue_id); + rpc_queue_init(&stack->rpc_queue, stack->user_queue_id); + rpc_queue_init(&stack->dfx_rpc_queue, stack->user_queue_id); if (use_ltran()) { stack->rx_ring = gazelle_ring_create_fast("RING_RX", VDEV_RX_QUEUE_SZ, RING_F_SP_ENQ | RING_F_SC_DEQ); @@ -883,7 +883,7 @@ bool port_in_stack_queue(gz_addr_t *src_ip, gz_addr_t *dst_ip, uint16_t src_port uint32_t reta_index = hash & stack_group->eth_params->reta_mask; struct protocol_stack *stack = get_protocol_stack(); - return (reta_index % stack_group->eth_params->nb_queues) == stack->queue_id; + return (reta_index % stack_group->eth_params->nb_queues) == stack->user_queue_id; } static int dpdk_nic_xstats_value_get(uint64_t *values, unsigned int len, uint16_t *ports, unsigned int count) diff --git a/src/lstack/core/lstack_protocol_stack.c b/src/lstack/core/lstack_protocol_stack.c index e0e18c7..775ecc6 100644 --- a/src/lstack/core/lstack_protocol_stack.c +++ b/src/lstack/core/lstack_protocol_stack.c @@ -166,7 +166,7 @@ void bind_to_stack_numa(struct protocol_stack *stack) ret = pthread_setaffinity_np(tid, sizeof(stack->idle_cpuset), &stack->idle_cpuset); if (ret != 0) { - LSTACK_LOG(ERR, LSTACK, "thread %d setaffinity to stack %hu failed\n", rte_gettid(), stack->queue_id); + LSTACK_LOG(ERR, LSTACK, "thread %d setaffinity to stack %hu failed\n", rte_gettid(), stack->user_queue_id); return; } } @@ -397,7 +397,7 @@ static int32_t init_stack_value(struct protocol_stack *stack, void *arg) struct cfg_params *cfg_params = get_global_cfg_params(); stack->tid = rte_gettid(); - stack->queue_id = t_params->queue_id; + stack->user_queue_id = t_params->queue_id; stack->port_id = stack_group->port_id; stack->stack_idx = t_params->idx; stack->lwip_stats = &lwip_stats; @@ -537,7 +537,7 @@ int stack_polling(unsigned wakeup_tick) //eth_dev_poll(); struct ethdev_array *g_eth_arr = get_g_eth_arr(); - g_eth_arr->dev[ETH_USER_IDX].linkinput(&g_eth_arr->dev[ETH_USER_IDX], stack->queue_id); + g_eth_arr->dev[ETH_USER_IDX].linkinput(&g_eth_arr->dev[ETH_USER_IDX], stack->user_queue_id); timeout = sys_timer_run(); if (cfg->stack_interrupt) { @@ -557,7 +557,7 @@ int stack_polling(unsigned wakeup_tick) if ((wakeup_tick & 0xf) == 0) { wakeup_stack_epoll(stack); if (get_global_cfg_params()->send_cache_mode) { - tx_cache_send(stack->queue_id); + tx_cache_send(stack->user_queue_id); } } @@ -578,7 +578,7 @@ int stack_polling(unsigned wakeup_tick) /* run to completion mode currently does not support kni */ /* KNI requests are generally low-rate I/Os, * so processing KNI requests only in the thread with queue_id No.0 is sufficient. */ - if (kni_switch && !stack->queue_id && !(wakeup_tick & 0xfff)) { + if (kni_switch && !stack->user_queue_id && !(wakeup_tick & 0xfff)) { rte_kni_handle_request(get_gazelle_kni()); if (get_kni_started()) { kni_handle_rx(stack->port_id); @@ -586,7 +586,7 @@ int stack_polling(unsigned wakeup_tick) } #endif if (get_global_cfg_params()->flow_bifurcation) { - g_eth_arr->dev[ETH_VIRTIO_IDX].linkinput(&g_eth_arr->dev[ETH_VIRTIO_IDX], stack->queue_id); + g_eth_arr->dev[ETH_VIRTIO_IDX].linkinput(&g_eth_arr->dev[ETH_VIRTIO_IDX], stack->user_queue_id); //virtio_tap_process_rx(stack->port_id, stack->queue_id); } return force_quit; @@ -599,7 +599,7 @@ static bool stack_local_event_get(uint16_t stack_id) !lockless_queue_empty(&stack->rpc_queue.queue) || !list_head_empty(&stack->recv_list) || !list_head_empty(&stack->wakeup_list) || - tx_cache_count(stack->queue_id)) { + tx_cache_count(stack->user_queue_id)) { return true; } return false; diff --git a/src/lstack/include/lstack_ethdev.h b/src/lstack/include/lstack_ethdev.h index f0b0d73..5714a96 100644 --- a/src/lstack/include/lstack_ethdev.h +++ b/src/lstack/include/lstack_ethdev.h @@ -44,12 +44,13 @@ enum { ETH_COUNT, }; +#define ETH_QUEUE_MAX 32 // equal to PROTOCOL_STACK_MAX struct ethdev { int port_id; - int queue_id; - //int netif_idx; // 使用netif_idx,在收发包时 会调用函数获取netif指针,会降低效率,这里直接使用netif指针,收发包时,通过dev直接获取netif*。 - struct netif *netif; - struct gazelle_stack_stat stats; + int queue_num; + int netif_id; // 使用netif_idx,在收发包时 会调用函数获取netif指针,会降低效率,这里直接使用netif指针,收发包时,通过dev直接获取netif*。 + //struct netif *netif; + struct gazelle_stack_stat stats[ETH_QUEUE_MAX]; int (*linkinput)(struct ethdev *dev, int queue_id); int (*input)(struct ethdev *dev, struct pbuf *p); @@ -58,34 +59,12 @@ struct ethdev { int (*linkoutput)(struct ethdev *dev, int queue_id, struct rte_mbuf *mbufs[], unsigned int num); }; -struct ethdev_user { - struct ethdev dev; - int queue_cnt_max; - int queue_idx_next; // means new thread get the queue index; -}; - -struct ethdev_virtio { - struct ethdev dev; -}; - -struct ethdev_loopback { - struct ethdev dev; - int queue_idx_next; // means new thread get the queue index; -}; - struct ethdev_array { struct ethdev dev[ETH_COUNT]; }; -//struct ethdev_array { -// struct ethdev_user eth_user; -// struct ethdev_virtio eth_virtio; -// struct ethdev_loopback eth_loopback; -//}; struct ethdev_array *get_g_eth_arr(void); void eths_init(void); -//void eth_virtio_init(void); -//void eth_user_init(void); void virtio_tap_process_rx(uint16_t port, uint32_t queue_id); ///////////////////////// /////////////// //////////////// diff --git a/src/lstack/include/lstack_protocol_stack.h b/src/lstack/include/lstack_protocol_stack.h index 4cb3174..2a49541 100644 --- a/src/lstack/include/lstack_protocol_stack.h +++ b/src/lstack/include/lstack_protocol_stack.h @@ -38,8 +38,6 @@ struct protocol_stack { uint32_t tid; - uint16_t queue_id; - uint16_t port_id; uint16_t numa_id; uint16_t cpu_id; uint32_t stack_idx; @@ -67,7 +65,12 @@ struct protocol_stack { int32_t kernel_event_num; char pad3 __rte_cache_aligned; - struct netif netif; + uint16_t port_id; // dpdk port_id + uint16_t user_queue_id; + uint16_t virtio_queue_id; + uint16_t loopback_queue_id; + + struct netif netif; // user_netif struct netif netif_loopback; struct lstack_dev_ops dev_ops; uint32_t rx_ring_used; diff --git a/src/lstack/netif/lstack_ethdev.c b/src/lstack/netif/lstack_ethdev.c index 9af6c1c..9d14385 100644 --- a/src/lstack/netif/lstack_ethdev.c +++ b/src/lstack/netif/lstack_ethdev.c @@ -43,7 +43,6 @@ #define MBUF_MAX_LEN 1514 #define PACKET_READ_SIZE 32 -//////////////////////////////////////loopback/////////////// static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf); #define ETHDEV_MEMZONE_NAME "gazelle_shared_ethdevs_mem" @@ -52,26 +51,15 @@ static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf); //#define RTE_MAX_ETHPORTS 32 #define LOOPBACK_PORT_ID 33 // "#define RTE_MAX_ETHPORTS 32", Loopback will not occupy dpdk port. #define LOOPBACK_RING_SIZE 256 -//enum { -// ETH_USER_IDX = 0, -// ETH_VIRTIO_IDX, -// ETH_LOOPBACK_IDX, -// ETH_COUNT, -//}; -//struct ethdev *g_eth_table[ETH_COUNT]; -//struct ethdev_array *g_shared_eth_arr; // shared between multi-process. -static PER_THREAD struct ethdev_array g_eth_arr; //use in curr process; + +static struct ethdev_array *g_eth_arr; //use in curr process; static struct rte_ring *g_loopback_ring[PROTOCOL_STACK_MAX]; //use in curr process; struct ethdev_array *get_g_eth_arr(void) { - return &g_eth_arr; + return g_eth_arr; } -///////////////////////////////////////////////////////////// - - - /* any protocol stack thread receives arp packet and sync it to other threads, * so that it can have the arp table */ static void stack_broadcast_arp(struct rte_mbuf *mbuf, struct protocol_stack *cur_stack) @@ -123,7 +111,7 @@ static void stack_broadcast_arp(struct rte_mbuf *mbuf, struct protocol_stack *cu return; } copy_mbuf(mbuf_copy, mbuf); - virtio_tap_process_tx(cur_stack->queue_id, mbuf_copy); + virtio_tap_process_tx(cur_stack->user_queue_id, mbuf_copy); } return; } @@ -283,7 +271,7 @@ int32_t eth_dev_poll(void) /* copy arp into other process */ transfer_arp_to_other_process(stack->pkts[i]); } else { - if (get_global_cfg_params()->tuple_filter && stack->queue_id == 0) { + if (get_global_cfg_params()->tuple_filter && stack->user_queue_id == 0) { transfer_type = distribute_pakages(stack->pkts[i]); } if (get_global_cfg_params()->flow_bifurcation) { @@ -299,7 +287,7 @@ int32_t eth_dev_poll(void) eth_dev_recv(stack->pkts[i], stack); } else if (transfer_type == TRANSFER_KERNEL) { if (get_global_cfg_params()->flow_bifurcation) { - virtio_tap_process_tx(stack->queue_id, stack->pkts[i]); + virtio_tap_process_tx(stack->user_queue_id, stack->pkts[i]); } else { #if RTE_VERSION < RTE_VERSION_NUM(23, 11, 0, 0) kni_handle_tx(stack->pkts[i]); @@ -415,14 +403,14 @@ static err_t ethloopback_dev_output(struct netif *netif, struct pbuf *pbuf) { uint32_t sent_pkts; struct ethdev *dev; + uint16_t queue_id = get_protocol_stack()->user_queue_id; - - dev = &(g_eth_arr.dev[ETH_LOOPBACK_IDX]); - sent_pkts = dev->output(dev, dev->queue_id, pbuf); - dev->stats.tx += sent_pkts; + dev = &g_eth_arr->dev[ETH_LOOPBACK_IDX]; + sent_pkts = dev->output(dev, queue_id, pbuf); + dev->stats[queue_id].tx += sent_pkts; if (sent_pkts < 1) { - dev->stats.tx_drop++; + dev->stats[queue_id].tx_drop++; return ERR_MEM; } return ERR_OK; @@ -479,7 +467,7 @@ int32_t ethdev_init(struct protocol_stack *stack) // stack->eth_virtio = g_eth_virtio; vdev_dev_ops_init(&stack->dev_ops); if (cfg->send_cache_mode) { - ret = tx_cache_init(stack->queue_id, stack, &stack->dev_ops); + ret = tx_cache_init(stack->user_queue_id, stack, &stack->dev_ops); if (ret < 0) { return ret; } @@ -493,7 +481,7 @@ int32_t ethdev_init(struct protocol_stack *stack) return ret; } } else { - if (cfg->tuple_filter && stack->queue_id == 0) { + if (cfg->tuple_filter && stack->user_queue_id == 0) { flow_init(); } } @@ -566,7 +554,7 @@ enum PACKET_TRANSFER_TYPE get_packet_transfer_type(struct rte_mbuf *pkt, bool *i return transfer_type; } - if (get_global_cfg_params()->tuple_filter && get_protocol_stack()->queue_id == 0) { + if (get_global_cfg_params()->tuple_filter && get_protocol_stack()->user_queue_id == 0) { transfer_type = distribute_pakages(pkt); } @@ -624,12 +612,13 @@ struct pbuf *generate_pbuf_from_mbuf(struct rte_mbuf *mbuf) int eth_user_input(struct ethdev *dev, struct pbuf *pbuf_input) { int32_t ret = -1; + struct protocol_stack *stack = get_protocol_stack(); if (pbuf_input != NULL) { - ret = dev->netif->input(pbuf_input,dev->netif); + ret = stack->netif.input(pbuf_input,&stack->netif); if (ret != ERR_OK) { LSTACK_LOG(ERR, LSTACK, "eth_dev_recv: failed to handle rx pbuf ret=%d\n", ret); - dev->stats.rx_drop++; + dev->stats[stack->user_queue_id].rx_drop++; } } @@ -674,8 +663,8 @@ int eth_user_linkinput(struct ethdev *dev, int queue_id) dev->input(dev, pbuf_input);//eth_user_input() } else if (transfer_type == TRANSFER_KERNEL) { if (get_global_cfg_params()->flow_bifurcation) { - g_eth_arr.dev[ETH_VIRTIO_IDX].linkoutput(&g_eth_arr.dev[ETH_VIRTIO_IDX], - stack->queue_id, &stack->pkts[i], 1); + g_eth_arr->dev[ETH_VIRTIO_IDX].linkoutput(&g_eth_arr->dev[ETH_VIRTIO_IDX], + stack->user_queue_id, &stack->pkts[i], 1); } else { #if RTE_VERSION < RTE_VERSION_NUM(23, 11, 0, 0) kni_handle_tx(stack->pkts[i]); @@ -769,14 +758,14 @@ static err_t eth_dev_output(struct netif *netif, struct pbuf *pbuf) { uint32_t sent_pkts; struct ethdev *dev; + uint16_t queue_id = get_protocol_stack()->user_queue_id; - - dev = &(g_eth_arr.dev[ETH_USER_IDX]); - sent_pkts = dev->output(dev, dev->queue_id, pbuf); - dev->stats.tx += sent_pkts; + dev = &g_eth_arr->dev[ETH_USER_IDX]; + sent_pkts = dev->output(dev, queue_id, pbuf); + dev->stats[queue_id].tx += sent_pkts; if (sent_pkts < 1) { - dev->stats.tx_drop++; + dev->stats[queue_id].tx_drop++; return ERR_MEM; } return ERR_OK; @@ -875,7 +864,7 @@ int eth_virtio_linkinput(struct ethdev *dev, int queue_id) virtio_inst->rx_pkg[queue_id] += pkts; - g_eth_arr.dev[ETH_VIRTIO_IDX].linkoutput(&g_eth_arr.dev[ETH_VIRTIO_IDX], queue_id, mbufs_input, pkts); + g_eth_arr->dev[ETH_VIRTIO_IDX].linkoutput(&g_eth_arr->dev[ETH_VIRTIO_IDX], queue_id, mbufs_input, pkts); return pkts; } @@ -947,7 +936,7 @@ int eth_loopback_output(struct ethdev *dev, int queue_id, struct pbuf *p) mbuf_output = generate_mbuf_from_pbuf(p); - sent_pkts = dev->linkoutput(dev, dev->queue_id, &mbuf_output, 1); + sent_pkts = dev->linkoutput(dev, queue_id, &mbuf_output, 1); if (sent_pkts < 1) { rte_pktmbuf_free(mbuf_output); } @@ -980,12 +969,14 @@ int eth_loopback_linkoutput(struct ethdev *dev, int queue_id, struct rte_mbuf *m int eth_loopback_input(struct ethdev *dev, struct pbuf *p) { int ret = -1; - + struct protocol_stack *stack = get_protocol_stack(); + uint16_t queue_id = stack->loopback_queue_id; + if (p != NULL) { - ret = dev->netif->input(p, dev->netif); + ret = stack->netif_loopback.input(p, &stack->netif_loopback); if (ret != ERR_OK) { LSTACK_LOG(ERR, LSTACK, "eth_dev_recv: failed to handle rx pbuf ret=%d\n", ret); - dev->stats.rx_drop++; + dev->stats[queue_id].rx_drop++; } } return ret; @@ -995,7 +986,6 @@ int eth_loopback_linkinput(struct ethdev *dev, int queue_id) { int pkts; struct pbuf *pbuf_input; - struct rte_mbuf *mbufs_input[LOOPBACK_RING_SIZE]; if (g_loopback_ring[queue_id] == NULL) { @@ -1033,26 +1023,30 @@ void eth_loopback_init(struct ethdev *dev) void eths_init(void) { const struct rte_memzone *memzone; - struct ethdev_array *tmp_eth_arr; loopback_ring_init(); - /* If the process is not the primary process, directly return after initializing loopback ring. */ - if (!(get_global_cfg_params()->is_primary)) { + if (get_global_cfg_params()->is_primary) { + memzone = rte_memzone_reserve(ETHDEV_MEMZONE_NAME, sizeof(struct ethdev_array), + rte_socket_id(), RTE_MEMZONE_SIZE_HINT_ONLY); + if (memzone == NULL) { + LSTACK_EXIT(EXIT_FAILURE, "%s: rte_memzone_reserve for ethdev_table failed.\n", __func__); + } + + g_eth_arr = (struct ethdev_array*)(memzone->addr); + memset_s(g_eth_arr, sizeof(struct ethdev_array), 0, sizeof(struct ethdev_array)); + + eth_user_init(&g_eth_arr->dev[ETH_USER_IDX]); + eth_virtio_init(&g_eth_arr->dev[ETH_VIRTIO_IDX]); + eth_loopback_init(&g_eth_arr->dev[ETH_LOOPBACK_IDX]); + return; } - memzone = rte_memzone_reserve(ETHDEV_MEMZONE_NAME, sizeof(struct ethdev_array), - rte_socket_id(), RTE_MEMZONE_SIZE_HINT_ONLY); + memzone = rte_memzone_lookup(ETHDEV_MEMZONE_NAME); if (memzone == NULL) { - LSTACK_EXIT(EXIT_FAILURE, "%s: rte_memzone_reserve for ethdev_table failed.\n", __func__); + LSTACK_EXIT(EXIT_FAILURE, "%s: Cannot get ethdev_table, have you started up the primary process?\n", __func__); } - - tmp_eth_arr = (struct ethdev_array*)(memzone->addr); - memset_s(tmp_eth_arr, sizeof(struct ethdev_array), 0, sizeof(struct ethdev_array)); - - eth_user_init(&tmp_eth_arr->dev[ETH_USER_IDX]); - eth_virtio_init(&tmp_eth_arr->dev[ETH_VIRTIO_IDX]); - eth_loopback_init(&tmp_eth_arr->dev[ETH_LOOPBACK_IDX]); + g_eth_arr = (struct ethdev_array*)(memzone->addr); } ////////////////////////////////////////////////////////////////// diff --git a/src/lstack/netif/lstack_tx_cache.c b/src/lstack/netif/lstack_tx_cache.c index 17d3c3b..0dd24b9 100644 --- a/src/lstack/netif/lstack_tx_cache.c +++ b/src/lstack/netif/lstack_tx_cache.c @@ -113,7 +113,7 @@ static uint32_t tx_cache_recv(struct protocol_stack *stack, struct rte_mbuf **pk LSTACK_LOG(ERR, LSTACK, "arg not support, nr_pkts is %d\n", nr_pkts); return 0; } - uint16_t queue_id = stack->queue_id; + uint16_t queue_id = stack->user_queue_id; struct tx_cache *tx_cache = g_tx_cache[queue_id]; if (tx_cache == NULL) { LSTACK_LOG(ERR, LSTACK, "queue(%d) tx cache get failed\n", queue_id); diff --git a/src/lstack/netif/lstack_vdev.c b/src/lstack/netif/lstack_vdev.c index b1d1a1b..6e9c399 100644 --- a/src/lstack/netif/lstack_vdev.c +++ b/src/lstack/netif/lstack_vdev.c @@ -140,7 +140,7 @@ static uint32_t vdev_rx_poll(struct protocol_stack *stack, struct rte_mbuf **pkt .max_item_per_flow = 16, }; - uint32_t pkt_num = rte_eth_rx_burst(stack->port_id, stack->queue_id, pkts, max_mbuf); + uint32_t pkt_num = rte_eth_rx_burst(stack->port_id, stack->user_queue_id, pkts, max_mbuf); vdev_pkts_parse(pkts, pkt_num); if (pkt_num <= 1) { return pkt_num; @@ -184,14 +184,14 @@ uint32_t vdev_tx_xmit(struct protocol_stack *stack, struct rte_mbuf **pkts, uint { uint32_t sent_pkts = 0; - if (rte_eth_tx_prepare(stack->port_id, stack->queue_id, pkts, nr_pkts) != nr_pkts) { + if (rte_eth_tx_prepare(stack->port_id, stack->user_queue_id, pkts, nr_pkts) != nr_pkts) { stack->stats.tx_prepare_fail++; LSTACK_LOG(INFO, LSTACK, "rte_eth_tx_prepare failed\n"); } const uint32_t tbegin = sys_now(); do { - sent_pkts += rte_eth_tx_burst(stack->port_id, stack->queue_id, &pkts[sent_pkts], nr_pkts - sent_pkts); + sent_pkts += rte_eth_tx_burst(stack->port_id, stack->user_queue_id, &pkts[sent_pkts], nr_pkts - sent_pkts); } while (sent_pkts < nr_pkts && (ENQUEUE_RING_RETRY_TIMEOUT > sys_now() - tbegin)); return sent_pkts; @@ -229,7 +229,7 @@ int32_t vdev_reg_xmit(enum reg_ring_type type, struct gazelle_quintuple *qtuple) if (type == REG_RING_TCP_CONNECT_CLOSE) { if (get_global_cfg_params()->is_primary) { delete_user_process_port(qtuple->src_port, PORT_CONNECT); - uint16_t queue_id = get_protocol_stack()->queue_id; + uint16_t queue_id = get_protocol_stack()->user_queue_id; if (queue_id != 0) { transfer_delete_rule_info_to_process0(qtuple->dst_ip.u_addr.ip4.addr, qtuple->src_port, qtuple->dst_port); @@ -241,7 +241,7 @@ int32_t vdev_reg_xmit(enum reg_ring_type type, struct gazelle_quintuple *qtuple) } if (type == REG_RING_TCP_CONNECT) { - uint16_t queue_id = get_protocol_stack()->queue_id; + uint16_t queue_id = get_protocol_stack()->user_queue_id; if (get_global_cfg_params()->is_primary) { add_user_process_port(qtuple->src_port, get_global_cfg_params()->process_idx, PORT_CONNECT); if (queue_id != 0) { -- Gitee From 185c4ea0f715d93900381c75ee9b41f0f99199d8 Mon Sep 17 00:00:00 2001 From: yinbin Date: Tue, 4 Mar 2025 17:26:23 +0800 Subject: [PATCH 6/6] loopback: gazelle multi process enable --- src/lstack/core/lstack_dpdk.c | 21 +++++++++++++-------- src/lstack/core/lstack_init.c | 6 ------ src/lstack/core/lstack_protocol_stack.c | 8 +++----- src/lstack/netif/lstack_ethdev.c | 2 +- 4 files changed, 17 insertions(+), 20 deletions(-) diff --git a/src/lstack/core/lstack_dpdk.c b/src/lstack/core/lstack_dpdk.c index deb12f7..824c670 100644 --- a/src/lstack/core/lstack_dpdk.c +++ b/src/lstack/core/lstack_dpdk.c @@ -156,9 +156,16 @@ struct rte_mempool *create_pktmbuf_mempool(const char *name, uint32_t nb_mbuf, /* time stamp before pbuf_custom as priv_data */ uint16_t private_size = RTE_ALIGN(sizeof(struct mbuf_private), RTE_CACHE_LINE_SIZE); - pool = rte_pktmbuf_pool_create(pool_name, nb_mbuf, mbuf_cache_size, private_size, MBUF_SZ, numa_id); - if (pool == NULL) { - LSTACK_LOG(ERR, LSTACK, "cannot create %s pool rte_err=%d\n", pool_name, rte_errno); + if (get_global_cfg_params()->is_primary) { + pool = rte_pktmbuf_pool_create(pool_name, nb_mbuf, mbuf_cache_size, private_size, MBUF_SZ, numa_id); + if (pool == NULL) { + LSTACK_LOG(ERR, LSTACK, "cannot create %s pool rte_err=%d\n", pool_name, rte_errno); + } + } else { + pool = rte_mempool_lookup(pool_name); + if (pool == NULL) { + LSTACK_LOG(ERR, LSTACK, "lookup %s pool failed, rte_err=%d\n", pool_name, rte_errno); + } } return pool; @@ -603,11 +610,9 @@ static int32_t dpdk_ethdev_setup(const struct eth_params *eth_params, uint16_t i struct cfg_params *cfg = get_global_cfg_params(); struct rte_mempool *rxtx_mbuf_pool = get_protocol_stack_group()->total_rxtx_pktmbuf_pool[idx]; - if (!cfg->use_ltran && cfg->num_process == 1) { - numa_id = (cfg->stack_num > 0) ? cfg->numa_id : numa_node_of_cpu(cfg->cpus[idx]); - } else { - numa_id = cfg->process_numa[idx]; - } + + numa_id = (cfg->stack_num > 0) ? cfg->numa_id : numa_node_of_cpu(cfg->cpus[idx]); + ret = rte_eth_rx_queue_setup(eth_params->port_id, idx, eth_params->nb_rx_desc, numa_id, ð_params->rx_conf, rxtx_mbuf_pool); if (ret < 0) { diff --git a/src/lstack/core/lstack_init.c b/src/lstack/core/lstack_init.c index 282ffc9..91efe72 100644 --- a/src/lstack/core/lstack_init.c +++ b/src/lstack/core/lstack_init.c @@ -269,12 +269,6 @@ __attribute__((constructor)) void gazelle_network_init(void) return; } - /* check lstack num */ - if (check_params_from_primary() < 0) { - LSTACK_PRE_LOG(LSTACK_ERR, "lstack num error, not same to primary process!\n"); - LSTACK_EXIT(1, "lstack num error, not same to primary process!\n"); - } - /* save initial affinity */ if (!get_global_cfg_params()->main_thread_affinity) { if (thread_affinity_default() < 0) { diff --git a/src/lstack/core/lstack_protocol_stack.c b/src/lstack/core/lstack_protocol_stack.c index 775ecc6..22713bb 100644 --- a/src/lstack/core/lstack_protocol_stack.c +++ b/src/lstack/core/lstack_protocol_stack.c @@ -694,11 +694,9 @@ int stack_group_init(void) stack_group->stack_setup_fail = 0; - if (get_global_cfg_params()->is_primary) { - if (stack_group_init_mempool() != 0) { - LSTACK_LOG(ERR, LSTACK, "stack group init mempool failed\n"); - return -1; - } + if (stack_group_init_mempool() != 0) { + LSTACK_LOG(ERR, LSTACK, "stack group init mempool failed\n"); + return -1; } return 0; diff --git a/src/lstack/netif/lstack_ethdev.c b/src/lstack/netif/lstack_ethdev.c index 9d14385..8a63200 100644 --- a/src/lstack/netif/lstack_ethdev.c +++ b/src/lstack/netif/lstack_ethdev.c @@ -656,7 +656,7 @@ int eth_user_linkinput(struct ethdev *dev, int queue_id) if (is_arp) { stack_broadcast_arp(stack->pkts[i], stack); /* copy arp into other process */ - transfer_arp_to_other_process(stack->pkts[i]); + //transfer_arp_to_other_process(stack->pkts[i]); } pbuf_input = generate_pbuf_from_mbuf(stack->pkts[i]); -- Gitee