diff --git a/Migration-support-devices-parallel-feature.patch b/Migration-support-devices-parallel-feature.patch new file mode 100644 index 0000000000000000000000000000000000000000..dc80050c162bf8699615c042dd2ea69cb615fb6e --- /dev/null +++ b/Migration-support-devices-parallel-feature.patch @@ -0,0 +1,94 @@ +From 3574a2344dc47311ba62a3a0aff4f2fe33d8767c Mon Sep 17 00:00:00 2001 +From: GQX <2290721782@qq.com> +Date: Mon, 24 Nov 2025 14:55:06 +0800 +Subject: [PATCH 5/5] Migration: support devices-parallel feature + +Currently, just reserve the qmp interface here. + +Signed-off-by: GQX <2290721782@qq.com> +--- + migration/options.c | 9 +++++++++ + migration/options.h | 1 + + qapi/migration.json | 6 +++++- + 3 files changed, 15 insertions(+), 1 deletion(-) + +diff --git a/migration/options.c b/migration/options.c +index a1c29293d3..a234ba72b7 100644 +--- a/migration/options.c ++++ b/migration/options.c +@@ -221,6 +221,7 @@ Property migration_properties[] = { + DEFINE_PROP_MIG_CAP("x-background-snapshot", + MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT), + DEFINE_PROP_MIG_CAP("x-onecopy", MIGRATION_CAPABILITY_ONECOPY), ++ DEFINE_PROP_MIG_CAP("x-devices-parallel", MIGRATION_CAPABILITY_DEVICES_PARALLEL), + #ifdef CONFIG_LINUX + DEFINE_PROP_MIG_CAP("x-zero-copy-send", + MIGRATION_CAPABILITY_ZERO_COPY_SEND), +@@ -346,6 +347,13 @@ bool migrate_urma(void) + } + #endif + ++bool migrate_devices_parallel(void) ++{ ++ MigrationState *s = migrate_get_current(); ++ ++ return s->capabilities[MIGRATION_CAPABILITY_DEVICES_PARALLEL]; ++} ++ + bool migrate_onecopy_ram(void) + { + MigrationState *s = migrate_get_current(); +@@ -507,6 +515,7 @@ INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot, + MIGRATION_CAPABILITY_VALIDATE_UUID, + MIGRATION_CAPABILITY_ZERO_COPY_SEND, + MIGRATION_CAPABILITY_ONECOPY, ++ MIGRATION_CAPABILITY_DEVICES_PARALLEL, + MIGRATION_CAPABILITY_LDST); + + static bool migrate_incoming_started(void) +diff --git a/migration/options.h b/migration/options.h +index 59bfb7e854..3dcfde3031 100644 +--- a/migration/options.h ++++ b/migration/options.h +@@ -41,6 +41,7 @@ bool migrate_postcopy_ram(void); + #ifdef CONFIG_URMA_MIGRATION + bool migrate_urma(void); + #endif ++bool migrate_devices_parallel(void); + bool migrate_onecopy_ram(void); + bool migrate_rdma_pin_all(void); + bool migrate_release_ram(void); +diff --git a/qapi/migration.json b/qapi/migration.json +index 61b89a27d1..6a435f4c74 100644 +--- a/qapi/migration.json ++++ b/qapi/migration.json +@@ -539,6 +539,9 @@ + # and enter to the completion phase:suspend the source VM and + # synchronize all VM states to the destination host. (Since 8.2) + # ++# @devices-parallel: If enabled, parallel save vmstate with stop cpu. ++# (Since 8.2) ++# + # Features: + # + # @deprecated: Member @block is deprecated. Use blockdev-mirror with +@@ -554,7 +557,7 @@ + { 'enum': 'MigrationCapability', + 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', + { 'name': 'compress', 'features': [ 'deprecated' ] }, +- 'events', 'postcopy-ram', 'onecopy', 'ldst', ++ 'events', 'postcopy-ram', 'onecopy', 'devices-parallel', 'ldst', + { 'name': 'x-colo', 'features': [ 'unstable' ] }, + 'release-ram', + { 'name': 'block', 'features': [ 'deprecated' ] }, +@@ -619,6 +622,7 @@ + # {"state": false, "capability": "postcopy-ram"}, + # {"state": false, "capability": "x-colo"}, + # {"state": false, "capability": "onecopy"}, ++# {"state": false, "capability": "devices-parallel"}, + # {"state": false, "capability": "ldst"} + # ]} + ## +-- +2.33.0 + diff --git a/Migration-support-onecopy-migration.patch b/Migration-support-onecopy-migration.patch new file mode 100644 index 0000000000000000000000000000000000000000..7d2a063d471f68d09584ca55f61c97bf3889832f --- /dev/null +++ b/Migration-support-onecopy-migration.patch @@ -0,0 +1,247 @@ +From d3ac131f5e154568a617a0c68db40d99c9940350 Mon Sep 17 00:00:00 2001 +From: GQX <2290721782@qq.com> +Date: Mon, 17 Nov 2025 14:41:49 +0800 +Subject: [PATCH 3/5] Migration: support onecopy migration + +If onecopy is enabled, migration will skip the iteration phase +and enter to the completion phase:suspend the source vm +and copy all vm rams to the destination host. + +This can improve the UB bandwidth utilization of urma migration. +So we can enable the onecopy feature when the UB bandwidth is +very large. For example, when the UB bandwidth is 200GBps and the +expected migration downtime is less than 500ms, we can enable the +onecopy feature when the VM size is less than 80GB (some time for +device save/load). + +Signed-off-by: GQX <2290721782@qq.com> +--- + migration/migration.c | 21 ++++++++++++++++----- + migration/options.c | 9 +++++++++ + migration/options.h | 1 + + migration/ram.c | 16 ++++++++++++++++ + migration/urma.c | 43 +++++++++++++++++++++++++++++++++++++++++++ + migration/urma.h | 1 + + qapi/migration.json | 7 ++++++- + 7 files changed, 92 insertions(+), 6 deletions(-) + +diff --git a/migration/migration.c b/migration/migration.c +index 837e0471cb..10060cdb70 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -3166,6 +3166,21 @@ static bool migration_can_switchover(MigrationState *s) + return s->switchover_acked; + } + ++static bool migration_should_complete(MigrationState *s) ++{ ++#ifdef CONFIG_HAM_MIGRATION ++ if (ham_should_complete_migration(s)) { ++ return true; ++ } ++#endif ++ ++ if (migrate_onecopy_ram()) { ++ return true; ++ } ++ ++ return false; ++} ++ + /* Migration thread iteration status */ + typedef enum { + MIG_ITERATE_RESUME, /* Resume current iteration */ +@@ -3195,12 +3210,8 @@ static MigIterateState migration_iteration_run(MigrationState *s) + trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); + } + +-#ifdef CONFIG_HAM_MIGRATION + if (((!pending_size || pending_size < s->threshold_size) && can_switchover) || +- ham_should_complete_migration(s)) { +-#else +- if ((!pending_size || pending_size < s->threshold_size) && can_switchover) { +-#endif ++ migration_should_complete(s)) { + trace_migration_thread_low_pending(pending_size); + migration_completion(s); + return MIG_ITERATE_BREAK; +diff --git a/migration/options.c b/migration/options.c +index c2695aee65..a1c29293d3 100644 +--- a/migration/options.c ++++ b/migration/options.c +@@ -220,6 +220,7 @@ Property migration_properties[] = { + DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD), + DEFINE_PROP_MIG_CAP("x-background-snapshot", + MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT), ++ DEFINE_PROP_MIG_CAP("x-onecopy", MIGRATION_CAPABILITY_ONECOPY), + #ifdef CONFIG_LINUX + DEFINE_PROP_MIG_CAP("x-zero-copy-send", + MIGRATION_CAPABILITY_ZERO_COPY_SEND), +@@ -345,6 +346,13 @@ bool migrate_urma(void) + } + #endif + ++bool migrate_onecopy_ram(void) ++{ ++ MigrationState *s = migrate_get_current(); ++ ++ return s->capabilities[MIGRATION_CAPABILITY_ONECOPY]; ++} ++ + bool migrate_use_ldst(void) + { + MigrationState *s = migrate_get_current(); +@@ -498,6 +506,7 @@ INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot, + MIGRATION_CAPABILITY_X_COLO, + MIGRATION_CAPABILITY_VALIDATE_UUID, + MIGRATION_CAPABILITY_ZERO_COPY_SEND, ++ MIGRATION_CAPABILITY_ONECOPY, + MIGRATION_CAPABILITY_LDST); + + static bool migrate_incoming_started(void) +diff --git a/migration/options.h b/migration/options.h +index 78f4af0ac8..59bfb7e854 100644 +--- a/migration/options.h ++++ b/migration/options.h +@@ -41,6 +41,7 @@ bool migrate_postcopy_ram(void); + #ifdef CONFIG_URMA_MIGRATION + bool migrate_urma(void); + #endif ++bool migrate_onecopy_ram(void); + bool migrate_rdma_pin_all(void); + bool migrate_release_ram(void); + bool migrate_return_path(void); +diff --git a/migration/ram.c b/migration/ram.c +index ace13801d1..6422b3e4b6 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -3707,6 +3707,19 @@ static int ram_save_complete(QEMUFile *f, void *opaque) + + rs->last_stage = !migration_in_colo_state(); + ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma() && migrate_onecopy_ram()) { ++ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); ++ ret = qemu_urma_write_all(s->urma_ctx); ++ if (ret < 0) { ++ qemu_file_set_error(f, ret); ++ return ret; ++ } ++ ++ goto finish; ++ } ++#endif ++ + WITH_RCU_READ_LOCK_GUARD() { + if (!migration_in_postcopy()) { + migration_bitmap_sync_precopy(rs, true); +@@ -3769,6 +3782,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque) + } + } + ++#ifdef CONFIG_URMA_MIGRATION ++finish: ++#endif + ret = multifd_send_sync_main(); + if (ret < 0) { + return ret; +diff --git a/migration/urma.c b/migration/urma.c +index 80af7d8fca..fcff8a1a42 100644 +--- a/migration/urma.c ++++ b/migration/urma.c +@@ -990,6 +990,49 @@ int qemu_flush_urma_write(URMAContext *urma) + return 0; + } + ++int qemu_urma_write_all(URMAContext *urma) ++{ ++ int i; ++ URMALocalBlocks *local = &urma->local_ram_blocks; ++ uint64_t local_addr, remote_addr, offset, length; ++ uint64_t chunk_length = 1UL << URMA_REG_CHUNK_SHIFT; ++ urma_jfs_wr_flag_t flag = { ++ .bs.complete_enable = 1 ++ }; ++ ++ for (i = 0; i < local->nb_blocks; i++) { ++ URMALocalBlock *block = &local->block[i]; ++ ++ if (!block->is_ram_block) { ++ continue; ++ } ++ ++ for (offset = 0; offset < block->length; offset += chunk_length) { ++ local_addr = (uint64_t)block->local_host_addr + offset; ++ remote_addr = (uint64_t)block->remote_seg.ubva.va + offset; ++ length = (block->length - offset) > chunk_length ? chunk_length : (block->length - offset); ++ ++ if (urma_write_p(urma->jfs, urma->tjfr, block->import_tseg, block->local_tseg, ++ remote_addr, local_addr, length, ++ flag, urma->rid) != URMA_SUCCESS) { ++ qemu_log("Failed to do urma_write, local addr: %lx, remote addr: %lx, size: %lx, errno: %d\n", ++ local_addr, remote_addr, length, errno); ++ return -EINVAL; ++ } ++ ++ urma->nb_polling++; ++ if (urma->nb_polling >= urma->max_jfs_depth) { ++ if (qemu_flush_urma_write(urma) < 0) { ++ qemu_log("Failed to flush urma write, errno: %d\n", errno); ++ return -EINVAL; ++ } ++ } ++ } ++ } ++ ++ return 0; ++} ++ + static int qemu_urma_write_one(URMAContext *urma, + int current_index, uint64_t current_addr, + uint64_t length) +diff --git a/migration/urma.h b/migration/urma.h +index 62cd8cb489..63b9774e3b 100644 +--- a/migration/urma.h ++++ b/migration/urma.h +@@ -150,5 +150,6 @@ int qemu_exchange_urma_info(QEMUFile *f, URMAContext *urma, bool server); + int qemu_urma_import(URMAContext *urma); + void urma_migration_cleanup(void); + void record_migration_log(MigrationState *s); ++int qemu_urma_write_all(URMAContext *urma); + + #endif +diff --git a/qapi/migration.json b/qapi/migration.json +index 12d9040620..61b89a27d1 100644 +--- a/qapi/migration.json ++++ b/qapi/migration.json +@@ -535,6 +535,10 @@ + # and can result in more stable read performance. Requires KVM + # with accelerator property "dirty-ring-size" set. (Since 8.1) + # ++# @onecopy: If enabled, live migration will skip the iteration phase ++# and enter to the completion phase:suspend the source VM and ++# synchronize all VM states to the destination host. (Since 8.2) ++# + # Features: + # + # @deprecated: Member @block is deprecated. Use blockdev-mirror with +@@ -550,7 +554,7 @@ + { 'enum': 'MigrationCapability', + 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', + { 'name': 'compress', 'features': [ 'deprecated' ] }, +- 'events', 'postcopy-ram', 'ldst', ++ 'events', 'postcopy-ram', 'onecopy', 'ldst', + { 'name': 'x-colo', 'features': [ 'unstable' ] }, + 'release-ram', + { 'name': 'block', 'features': [ 'deprecated' ] }, +@@ -614,6 +618,7 @@ + # {"state": true, "capability": "events"}, + # {"state": false, "capability": "postcopy-ram"}, + # {"state": false, "capability": "x-colo"}, ++# {"state": false, "capability": "onecopy"}, + # {"state": false, "capability": "ldst"} + # ]} + ## +-- +2.33.0 + diff --git a/Migration-support-send-data-through-urma-protocol-du.patch b/Migration-support-send-data-through-urma-protocol-du.patch new file mode 100644 index 0000000000000000000000000000000000000000..c889212675e2667781966c4cf67604d729a8d4b9 --- /dev/null +++ b/Migration-support-send-data-through-urma-protocol-du.patch @@ -0,0 +1,1320 @@ +From 8673f3f731b668ed3b774683215e0680362ff10b Mon Sep 17 00:00:00 2001 +From: GQX <2290721782@qq.com> +Date: Sun, 23 Nov 2025 23:08:35 +0800 +Subject: [PATCH 2/5] Migration: support send data through urma protocol during + migration +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This patch introduces early initialization of the URMA (Ultra Reliable +Memory Access) transport context at the beginning of the migration +process, enabling efficient data transfer over URMA from the outset. + +Specifically: + +The URMA communication channel is now established during migration setup +(migration_init), ensuring resources are ready before bulk data transfer +begins. +Migration data (e.g., RAM pages, device state) can be transmitted using +URMA’s zero-copy urma_write operation, reducing CPU overhead and +improving throughput compared to traditional socket-based transmission. +This lays the groundwork for full URMA-based live migration, with +subsequent patches expected to enable features like one-copy migration +and GPU state offload. +The change maintains backward compatibility: URMA is only used when +explicitly enabled via the urma_migration build option and runtime +configuration. + +Signed-off-by: GQX <2290721782@qq.com> +--- + migration/urma.c | 1240 +++++++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 1229 insertions(+), 11 deletions(-) + +diff --git a/migration/urma.c b/migration/urma.c +index 8fb85e4123..80af7d8fca 100644 +--- a/migration/urma.c ++++ b/migration/urma.c +@@ -50,50 +50,1268 @@ + #include + #include "crypto/random.h" + +-int qemu_flush_urma_write(URMAContext *urma) ++#define URMA_REG_CHUNK_SHIFT 24 /* 16 MB */ ++ ++/* Do not merge data if larger than this. */ ++#define URMA_CHUNK_MERGE_MAX (1 << URMA_REG_CHUNK_SHIFT) ++ ++#define URMA_MAX_POLL_TIME 100000000 /* ms */ ++ ++#define URMA_DEV_DEFAULT_NAME "bonding_dev_0" /* default use bonding dev */ ++#define URMA_DEV_DEFAULT_IDX 0 ++ ++void *handle_urma = NULL; ++static const char *urma_dev_name = URMA_DEV_DEFAULT_NAME; ++static int urma_dev_idx = URMA_DEV_DEFAULT_IDX; ++ ++urma_status_t (*urma_init_p)(urma_init_attr_t *conf); ++urma_status_t (*urma_uninit_p)(void); ++urma_device_t **(*urma_get_device_list_p)(int *num_devices); ++void (*urma_free_device_list_p)(urma_device_t **device_list); ++urma_eid_info_t *(*urma_get_eid_list_p)(urma_device_t *dev, uint32_t *cnt); ++void (*urma_free_eid_list_p)(urma_eid_info_t *eid_list); ++urma_status_t (*urma_query_device_p)(urma_device_t *dev, urma_device_attr_t *dev_attr); ++urma_context_t *(*urma_create_context_p)(urma_device_t *dev, uint32_t eid_index); ++urma_status_t (*urma_delete_context_p)(urma_context_t *ctx); ++urma_jfc_t *(*urma_create_jfc_p)(urma_context_t *ctx, urma_jfc_cfg_t *jfc_cfg); ++urma_status_t (*urma_delete_jfc_p)(urma_jfc_t *jfc); ++urma_jfs_t *(*urma_create_jfs_p)(urma_context_t *ctx, urma_jfs_cfg_t *jfs_cfg); ++urma_status_t (*urma_delete_jfs_p)(urma_jfs_t *jfs); ++urma_jfr_t *(*urma_create_jfr_p)(urma_context_t *ctx, urma_jfr_cfg_t *jfr_cfg); ++urma_status_t (*urma_delete_jfr_p)(urma_jfr_t *jfr); ++urma_target_jetty_t *(*urma_import_jfr_p)(urma_context_t *ctx, urma_rjfr_t *rjfr, urma_token_t *token_value); ++urma_status_t (*urma_unimport_jfr_p)(urma_target_jetty_t *target_jfr); ++urma_status_t (*urma_advise_jfr_p)(urma_jfs_t *jfs, urma_target_jetty_t *tjfr); ++urma_jfce_t *(*urma_create_jfce_p)(urma_context_t *ctx); ++urma_status_t (*urma_delete_jfce_p)(urma_jfce_t *jfce); ++urma_target_seg_t *(*urma_register_seg_p)(urma_context_t *ctx, urma_seg_cfg_t *seg_cfg); ++urma_status_t (*urma_unregister_seg_p)(urma_target_seg_t *target_seg); ++urma_target_seg_t *(*urma_import_seg_p)( ++ urma_context_t *ctx, urma_seg_t *seg, urma_token_t *token_value, uint64_t addr, urma_import_seg_flag_t flag); ++urma_status_t (*urma_unimport_seg_p)(urma_target_seg_t *tseg); ++urma_status_t (*urma_write_p)(urma_jfs_t *jfs, urma_target_jetty_t *target_jfr, urma_target_seg_t *dst_tseg, ++ urma_target_seg_t *src_tseg, uint64_t dst, uint64_t src, uint32_t len, urma_jfs_wr_flag_t flag, uint64_t user_ctx); ++int (*urma_poll_jfc_p)(urma_jfc_t *jfc, int cr_cnt, urma_cr_t *cr); ++urma_status_t (*urma_user_ctl_p)(urma_context_t *ctx, urma_user_ctl_in_t *in, urma_user_ctl_out_t *out); ++urma_status_t (*urma_set_context_opt_p)(urma_context_t *ctx, urma_opt_name_t opt_name, ++ const void *opt_value, size_t opt_len); ++ ++typedef struct dl_functions { ++ const char *func_name; ++ void **func; ++} dl_functions; ++ ++dl_functions urma_dlfunc_list[] = { ++ {.func_name = "urma_init", .func = (void **)&urma_init_p}, ++ {.func_name = "urma_uninit", .func = (void **)&urma_uninit_p}, ++ {.func_name = "urma_get_device_list", .func = (void **)&urma_get_device_list_p}, ++ {.func_name = "urma_free_device_list", .func = (void **)&urma_free_device_list_p}, ++ {.func_name = "urma_get_eid_list", .func = (void **)&urma_get_eid_list_p}, ++ {.func_name = "urma_free_eid_list", .func = (void **)&urma_free_eid_list_p}, ++ {.func_name = "urma_query_device", .func = (void **)&urma_query_device_p}, ++ {.func_name = "urma_create_context", .func = (void **)&urma_create_context_p}, ++ {.func_name = "urma_delete_context", .func = (void **)&urma_delete_context_p}, ++ {.func_name = "urma_create_jfc", .func = (void **)&urma_create_jfc_p}, ++ {.func_name = "urma_delete_jfc", .func = (void **)&urma_delete_jfc_p}, ++ {.func_name = "urma_create_jfs", .func = (void **)&urma_create_jfs_p}, ++ {.func_name = "urma_delete_jfs", .func = (void **)&urma_delete_jfs_p}, ++ {.func_name = "urma_create_jfr", .func = (void **)&urma_create_jfr_p}, ++ {.func_name = "urma_delete_jfr", .func = (void **)&urma_delete_jfr_p}, ++ {.func_name = "urma_import_jfr", .func = (void **)&urma_import_jfr_p}, ++ {.func_name = "urma_unimport_jfr", .func = (void **)&urma_unimport_jfr_p}, ++ {.func_name = "urma_advise_jfr", .func = (void **)&urma_advise_jfr_p}, ++ {.func_name = "urma_create_jfce", .func = (void **)&urma_create_jfce_p}, ++ {.func_name = "urma_delete_jfce", .func = (void **)&urma_delete_jfce_p}, ++ {.func_name = "urma_register_seg", .func = (void **)&urma_register_seg_p}, ++ {.func_name = "urma_unregister_seg", .func = (void **)&urma_unregister_seg_p}, ++ {.func_name = "urma_import_seg", .func = (void **)&urma_import_seg_p}, ++ {.func_name = "urma_unimport_seg", .func = (void **)&urma_unimport_seg_p}, ++ {.func_name = "urma_write", .func = (void **)&urma_write_p}, ++ {.func_name = "urma_poll_jfc", .func = (void **)&urma_poll_jfc_p}, ++ {.func_name = "urma_user_ctl", .func = (void **)&urma_user_ctl_p}, ++ {.func_name = "urma_set_context_opt", .func = (void **)&urma_set_context_opt_p}, ++}; ++ ++static void urma_dlfunc_list_set_null(void) ++{ ++ for (int i = 0; i < ARRAY_SIZE(urma_dlfunc_list); i++) { ++ *urma_dlfunc_list[i].func = NULL; ++ } ++} ++ ++static void urma_dlfunc_close(void) ++{ ++ if (handle_urma) { ++ (void)dlclose(handle_urma); ++ handle_urma = NULL; ++ } ++ urma_dlfunc_list_set_null(); ++} ++ ++static int migrate_get_urma_dlfunc(Error **errp) ++{ ++ char *error = NULL; ++ ++ urma_dlfunc_list_set_null(); ++ handle_urma = dlopen(URMA_SO_PATH, RTLD_LAZY | RTLD_GLOBAL); ++ if (!handle_urma) { ++ qemu_log("dlopen error: %s", dlerror()); ++ return -1; ++ } ++ ++ for (int i = 0; i < ARRAY_SIZE(urma_dlfunc_list); i++) { ++ *urma_dlfunc_list[i].func = dlsym(handle_urma, urma_dlfunc_list[i].func_name); ++ if ((error = dlerror()) != NULL) { ++ qemu_log("dlsym error: %s while getting %s", error, urma_dlfunc_list[i].func_name); ++ urma_dlfunc_close(); ++ return -1; ++ } ++ } ++ ++ return 0; ++} ++ ++static int urma_dlfunc_init(Error **errp) ++{ ++ int r; ++ ++ r = migrate_get_urma_dlfunc(errp); ++ if (r < 0) { ++ qemu_log("dlsym error, open urma dlfunc failed\n"); ++ return r; ++ } ++ ++ return r; ++} ++ ++static inline uint64_t urma_ram_chunk_index(const uint8_t *start, ++ const uint8_t *host) ++{ ++ return ((uintptr_t) host - (uintptr_t) start) >> URMA_REG_CHUNK_SHIFT; ++} ++ ++static inline uint8_t *urma_ram_chunk_start(const URMALocalBlock *ram_block, ++ uint64_t i) ++{ ++ return (uint8_t *)(uintptr_t)(ram_block->local_host_addr + ++ (i << URMA_REG_CHUNK_SHIFT)); ++} ++ ++static inline uint8_t *urma_ram_chunk_end(const URMALocalBlock *ram_block, ++ uint64_t i) ++{ ++ uint8_t *result = urma_ram_chunk_start(ram_block, i) + ++ (1UL << URMA_REG_CHUNK_SHIFT); ++ ++ if (result > (ram_block->local_host_addr + ram_block->length)) { ++ result = ram_block->local_host_addr + ram_block->length; ++ } ++ ++ return result; ++} ++ ++static void urma_add_block(URMAContext *urma, const char *block_name, ++ void *host_addr, ++ ram_addr_t block_offset, uint64_t length) ++{ ++ URMALocalBlocks *local = &urma->local_ram_blocks; ++ URMALocalBlock *block; ++ URMALocalBlock *old = local->block; ++ ++ local->block = g_new0(URMALocalBlock, local->nb_blocks + 1); ++ ++ if (local->nb_blocks) { ++ int x; ++ if (urma->blockmap) { ++ for (x = 0; x < local->nb_blocks; x++) { ++ g_hash_table_remove(urma->blockmap, ++ (void *)(uintptr_t)old[x].offset); ++ g_hash_table_insert(urma->blockmap, ++ (void *)(uintptr_t)old[x].offset, ++ &local->block[x]); ++ } ++ } ++ memcpy(local->block, old, sizeof(URMALocalBlock) * local->nb_blocks); ++ g_free(old); ++ } ++ ++ block = &local->block[local->nb_blocks]; ++ ++ block->block_name = g_strdup(block_name); ++ block->local_host_addr = host_addr; ++ block->offset = block_offset; ++ block->length = length; ++ block->index = local->nb_blocks; ++ block->src_index = ~0U; /* Filled in by the receipt of the block list */ ++ block->nb_chunks = urma_ram_chunk_index(host_addr, host_addr + length) + 1UL; ++ block->transit_bitmap = bitmap_new(block->nb_chunks); ++ bitmap_clear(block->transit_bitmap, 0, block->nb_chunks); ++ block->unregister_bitmap = bitmap_new(block->nb_chunks); ++ bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks); ++ ++ block->is_ram_block = local->init ? false : true; ++ ++ if (urma->blockmap) { ++ g_hash_table_insert(urma->blockmap, (void *)(uintptr_t)block_offset, block); ++ } ++ ++ local->nb_blocks++; ++} ++ ++static int qemu_urma_init_one_block(RAMBlock *rb, void *opaque) ++{ ++ const char *block_name = qemu_ram_get_idstr(rb); ++ void *host_addr = qemu_ram_get_host_addr(rb); ++ ram_addr_t block_offset = qemu_ram_get_offset(rb); ++ ram_addr_t length = qemu_ram_get_used_length(rb); ++ urma_add_block(opaque, block_name, host_addr, block_offset, length); ++ return 0; ++} ++ ++static void qemu_urma_free_blocks(URMAContext *urma) ++{ ++ URMALocalBlocks *local = &urma->local_ram_blocks; ++ int i; ++ ++ for (i = 0; i < local->nb_blocks; i++) { ++ URMALocalBlock *block = &local->block[i]; ++ ++ if (urma->blockmap) { ++ g_hash_table_remove(urma->blockmap, (void *)(uintptr_t)block->offset); ++ } ++ ++ g_free(block->transit_bitmap); ++ block->transit_bitmap = NULL; ++ ++ g_free(block->unregister_bitmap); ++ block->unregister_bitmap = NULL; ++ ++ g_free(block->block_name); ++ block->block_name = NULL; ++ } ++ ++ g_free(local->block); ++ local->block = NULL; ++ local->nb_blocks = 0; ++ ++ g_free(urma->dest_blocks); ++ urma->dest_blocks = NULL; ++} ++ ++static int qemu_urma_init_ram_blocks(URMAContext *urma) ++{ ++ URMALocalBlocks *local = &urma->local_ram_blocks; ++ int ret; ++ ++ if (urma->blockmap != NULL) { ++ qemu_log("Ram blocks have been inited before! blockmap is %p\n", urma->blockmap); ++ return -EINVAL; ++ } ++ ++ memset(local, 0, sizeof *local); ++ ret = foreach_not_ignored_block(qemu_urma_init_one_block, urma); ++ if (ret) { ++ qemu_log("do qemu_urma_init_one_block failed, %d\n", ret); ++ return ret; ++ } ++ ++ urma->dest_blocks = g_new0(URMADestBlock, ++ urma->local_ram_blocks.nb_blocks); ++ local->init = true; ++ ++ /* Build the hash that maps from offset to RAMBlock */ ++ urma->blockmap = g_hash_table_new(g_direct_hash, g_direct_equal); ++ for (int i = 0; i < urma->local_ram_blocks.nb_blocks; i++) { ++ g_hash_table_insert(urma->blockmap, ++ (void *)(uintptr_t)urma->local_ram_blocks.block[i].offset, ++ &urma->local_ram_blocks.block[i]); ++ } ++ ++ return 0; ++} ++ ++static void qemu_urma_data_free(URMAContext *urma) ++{ ++ if (urma == NULL) { ++ return; ++ } ++ ++ g_free(urma->host); ++ g_free(urma); ++} ++ ++static URMAContext *qemu_urma_data_init(InetSocketAddress *saddr) ++{ ++ URMAContext *urma = NULL; ++ ++ urma = g_new0(URMAContext, 1); ++ urma->current_index = -1; ++ urma->current_chunk = -1; ++ ++ urma->host = g_strdup(saddr->host); ++ urma->port = atoi(saddr->port); ++ ++ return urma; ++} ++ ++static int qemu_get_urma_eid_index(urma_device_t *dev) ++{ ++ urma_eid_info_t *eid_list; ++ uint32_t eid_cnt; ++ int i, eid_index = -1; ++ ++ eid_list = urma_get_eid_list_p(dev, &eid_cnt); ++ if (eid_list == NULL) { ++ return -1; ++ } ++ ++ for (i = 0; eid_list != NULL && i < eid_cnt; i++) { ++ qemu_log("device_name :%s (eid%d: "EID_FMT").\n", dev->name, eid_list[i].eid_index, EID_ARGS(eid_list[i].eid)); ++ } ++ ++ if (eid_cnt > 0) { ++ if (urma_dev_idx >= 0 && urma_dev_idx < eid_cnt) { ++ eid_index = eid_list[urma_dev_idx].eid_index; ++ } else { ++ qemu_log("Invalid urma_dev_idx, use the first one.\n"); ++ eid_index = eid_list[0].eid_index; ++ } ++ ++ qemu_log("Use the eid%d: "EID_FMT".\n", eid_index, EID_ARGS(eid_list[eid_index].eid)); ++ } ++ ++ urma_free_eid_list_p(eid_list); ++ return eid_index; ++} ++ ++static urma_device_t *qemu_get_urma_device(URMAContext *ctx) ++{ ++ int i, device_num = 0; ++ urma_device_t *urma_dev = NULL; ++ urma_device_t **device_list = urma_get_device_list_p(&device_num); ++ ++ if (device_list == NULL || device_num == 0) { ++ qemu_log("Failed to get device list, errno: %d\n", errno); ++ return NULL; ++ } ++ ++ for (i = 0; i < device_num; i++) { ++ if (urma_dev_name != NULL && strcmp(device_list[i]->name, urma_dev_name) == 0) { ++ urma_dev = device_list[i]; ++ break; ++ } ++ } ++ ++ /* If the specified device cannot be found, use the first device */ ++ if (urma_dev == NULL) { ++ qemu_log("Cannot find the device %s, use the first device\n", urma_dev_name); ++ urma_dev = device_list[0]; ++ } ++ ++ urma_free_device_list_p(device_list); ++ return urma_dev; ++} ++ ++static int qemu_get_random_u32(uint32_t *rand_value) + { +- /* TODO */ ++ char random_char[URMA_TOKEN_LEN / CHAR_BIT]; ++ Error *local_err = NULL; ++ ++ if (qcrypto_random_bytes(random_char, sizeof(random_char), &local_err)) { ++ qemu_log("cannot get qcrypto random bytes, %s\n", error_get_pretty(local_err)); ++ error_free(local_err); ++ return -EINVAL; ++ } ++ ++ memcpy(rand_value, random_char, sizeof(uint32_t)); ++ ++ return 0; ++} ++ ++static int qemu_urma_init_context(URMAContext *ctx) ++{ ++ int eid_index, ret; ++ urma_context_aggr_mode_t aggr_mode = URMA_AGGR_MODE_BALANCE; ++ ++ ctx->event_mode = false; ++ ++ urma_device_t *urma_dev = qemu_get_urma_device(ctx); ++ if (urma_dev == NULL) { ++ qemu_log("URMA: urma get device failed, errno: %d\n", errno); ++ return -EINVAL; ++ } ++ ++ ret = urma_query_device_p(urma_dev, &ctx->dev_attr); ++ if (ret) { ++ qemu_log("URMA: Failed to query device %s, ret: %d, errno: %d\n", urma_dev->name, ret, errno); ++ return ret; ++ } ++ ++ eid_index = qemu_get_urma_eid_index(urma_dev); ++ if (eid_index < 0) { ++ qemu_log("URMA: Failed to get eid index, ret: %d, errno: %d.\n", eid_index, errno); ++ return eid_index; ++ } ++ ++ ctx->urma_ctx = urma_create_context_p(urma_dev, (uint32_t)eid_index); ++ if (ctx->urma_ctx == NULL) { ++ qemu_log("URMA: Failed to create instance with eid: %d, errno: %d.\n", eid_index, errno); ++ return -EINVAL; ++ } ++ ++ ret = urma_set_context_opt_p(ctx->urma_ctx, URMA_OPT_AGGR_MODE, &aggr_mode, sizeof(aggr_mode)); ++ if (ret) { ++ qemu_log("URMA: Failed to do urma_set_context_opt, ret: %d, errno: %d\n", ret, errno); ++ } ++ ++ ctx->jfce = urma_create_jfce_p(ctx->urma_ctx); ++ if (ctx->jfce == NULL) { ++ qemu_log("URMA: Failed to create jfce, errno: %d.\n", errno); ++ goto err_del_ctx; ++ } ++ ++ urma_jfc_cfg_t jfc_cfg = { ++ .depth = ctx->dev_attr.dev_cap.max_jfc_depth, ++ .flag = {.value = 0}, ++ .jfce = ctx->jfce, ++ .user_ctx = (uint64_t)NULL, ++ }; ++ ctx->jfc = urma_create_jfc_p(ctx->urma_ctx, &jfc_cfg); ++ if (ctx->jfc == NULL) { ++ qemu_log("URMA: Failed to create jfc, errno: %d\n", errno); ++ goto err_del_jfce; ++ } ++ ++ urma_jfs_cfg_t jfs_cfg = { ++ .depth = ctx->dev_attr.dev_cap.max_jfs_depth, ++ .trans_mode = URMA_TM_RM, ++ .priority = URMA_MAX_PRIORITY, /* Highest priority */ ++ .max_sge = 1, ++ .max_inline_data = 0, ++ .rnr_retry = URMA_TYPICAL_RNR_RETRY, ++ .err_timeout = URMA_TYPICAL_ERR_TIMEOUT, ++ .jfc = ctx->jfc, ++ .flag.bs.multi_path = 1, ++ .user_ctx = (uint64_t)NULL ++ }; ++ ctx->jfs = urma_create_jfs_p(ctx->urma_ctx, &jfs_cfg); ++ if (ctx->jfs == NULL) { ++ qemu_log("URMA: Failed to create jfs, errno: %d\n", errno); ++ goto err_del_jfc; ++ } ++ ++ ctx->max_jfs_depth = ctx->dev_attr.dev_cap.max_jfs_depth; ++ ++ if (qemu_get_random_u32(&ctx->jfr_token.token) < 0) { ++ qemu_log("get jfr random token failed, errno: %d\n", errno); ++ goto err_del_jfs; ++ } ++ ++ urma_jfr_cfg_t jfr_cfg = { ++ .depth = ctx->dev_attr.dev_cap.max_jfr_depth, ++ .max_sge = 1, ++ .flag.bs.tag_matching = URMA_NO_TAG_MATCHING, ++ .trans_mode = URMA_TM_RM, ++ .min_rnr_timer = URMA_TYPICAL_MIN_RNR_TIMER, ++ .jfc = ctx->jfc, ++ .token_value = ctx->jfr_token, ++ .id = 0 ++ }; ++ ctx->jfr = urma_create_jfr_p(ctx->urma_ctx, &jfr_cfg); ++ if (ctx->jfr == NULL) { ++ qemu_log("Failed to create jfr, errno: %d\n", errno); ++ goto err_del_jfs; ++ } ++ ++ qemu_log("init urma context success.\n"); ++ return 0; ++ ++err_del_jfs: ++ urma_delete_jfs_p(ctx->jfs); ++err_del_jfc: ++ urma_delete_jfc_p(ctx->jfc); ++err_del_jfce: ++ urma_delete_jfce_p(ctx->jfce); ++err_del_ctx: ++ (void)urma_delete_context_p(ctx->urma_ctx); ++ + return -EINVAL; + } + ++static void qemu_urma_cleanup_context(URMAContext *ctx) ++{ ++ if (!ctx) { ++ return; ++ } ++ ++ if (ctx->tjfr) { ++ urma_unimport_jfr_p(ctx->tjfr); ++ ctx->tjfr = NULL; ++ } ++ ++ if (ctx->jfr) { ++ urma_delete_jfr_p(ctx->jfr); ++ ctx->jfr = NULL; ++ } ++ ++ if (ctx->jfs) { ++ urma_delete_jfs_p(ctx->jfs); ++ ctx->jfs = NULL; ++ } ++ ++ if (ctx->jfc) { ++ urma_delete_jfc_p(ctx->jfc); ++ ctx->jfc = NULL; ++ } ++ ++ if (ctx->jfce) { ++ urma_delete_jfce_p(ctx->jfce); ++ ctx->jfce = NULL; ++ } ++ ++ if (ctx->urma_ctx) { ++ (void)urma_delete_context_p(ctx->urma_ctx); ++ ctx->urma_ctx = NULL; ++ } ++ ++ qemu_log("clean up urma context success.\n"); ++} ++ ++static int urma_init_lib(void) ++{ ++ int ret; ++ urma_init_attr_t init_attr = { ++ .uasid = 0, ++ }; ++ ++ ret = urma_init_p(&init_attr); ++ if (ret != URMA_SUCCESS) { ++ qemu_log("URMA: urma_init failed, ret: %d, errno: %d\n", ret, errno); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void qemu_urma_unreg_ram_blocks(URMAContext *urma) ++{ ++ int i; ++ URMALocalBlocks *local = &urma->local_ram_blocks; ++ ++ for (i = 0; i < local->nb_blocks; i++) { ++ URMALocalBlock *block = &local->block[i]; ++ ++ if (block->local_tseg) { ++ urma_unregister_seg_p(block->local_tseg); ++ block->local_tseg = NULL; ++ } ++ } ++ ++ ram_block_discard_disable(false); ++ ++ qemu_log("unreg all ram blocks success.\n"); ++} ++ ++static int qemu_urma_reg_whole_ram_blocks(URMAContext *urma) ++{ ++ int i; ++ int64_t start_time; ++ URMALocalBlocks *local = &urma->local_ram_blocks; ++ MigrationState *s = migrate_get_current(); ++ urma_reg_seg_flag_t flag = { ++ .bs.token_policy = URMA_TOKEN_PLAIN_TEXT, ++ .bs.cacheable = URMA_NON_CACHEABLE, ++ .bs.reserved = 0 ++ }; ++ ++ if (!urma->is_incoming) { ++ flag.bs.access = URMA_ACCESS_LOCAL_ONLY; ++ } else { ++ flag.bs.access = URMA_ACCESS_READ | URMA_ACCESS_WRITE | URMA_ACCESS_ATOMIC; ++ } ++ ++ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); ++ ++ /* disable memory ballon before register seg */ ++ ram_block_discard_disable(true); ++ ++ for (i = 0; i < local->nb_blocks; i++) { ++ URMALocalBlock *block = &local->block[i]; ++ if (qemu_get_random_u32(&block->local_seg_token.token) < 0) { ++ qemu_log("get segment random token failed, errno: %d\n", errno); ++ goto err; ++ } ++ ++ urma_seg_cfg_t seg_cfg = { ++ .va = (uint64_t)block->local_host_addr, ++ .len = block->length, ++ .token_value = block->local_seg_token, ++ .flag = flag, ++ .user_ctx = (uintptr_t)NULL, ++ .iova = 0 ++ }; ++ ++ block->local_tseg = urma_register_seg_p(urma->urma_ctx, &seg_cfg); ++ if (block->local_tseg == NULL) { ++ qemu_log("URMA: Failed to register RAM block: %s, va: %p, size: %ld\n", block->block_name, block->local_host_addr, block->length); ++ goto err; ++ } ++ } ++ ++ qemu_log("reigster all ram blocks success.\n"); ++ s->ram_reg_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time; ++ ++ return 0; ++ ++err: ++ qemu_urma_unreg_ram_blocks(urma); ++ return -EINVAL; ++} ++ ++static void qemu_urma_cleanup(URMAContext *urma) ++{ ++ if (urma == NULL) { ++ return; ++ } ++ ++ qemu_urma_unreg_ram_blocks(urma); ++ qemu_urma_free_blocks(urma); ++ qemu_urma_cleanup_context(urma); ++ ++ urma_uninit_p(); ++ qemu_log("clean up urma info success.\n"); ++} ++ ++static int qemu_urma_init_all(URMAContext *urma, bool pin_all) ++{ ++ int ret; ++ ++ urma->pin_all = pin_all; ++ urma->nb_polling = 0; ++ ++ ret = urma_init_lib(); ++ if (ret) { ++ goto err; ++ } ++ ++ ret = qemu_urma_init_context(urma); ++ if (ret) { ++ goto err; ++ } ++ ++ ret = qemu_urma_init_ram_blocks(urma); ++ if (ret) { ++ goto err; ++ } ++ ++ if (urma->pin_all) { ++ ret = qemu_urma_reg_whole_ram_blocks(urma); ++ if (ret) { ++ goto err; ++ } ++ } ++ ++ qemu_log("prepare all urma info success.\n"); ++ return 0; ++err: ++ qemu_log("Get error during prepare urma info, ret: %d, errno: %d\n", ret, errno); ++ qemu_urma_cleanup(urma); ++ return ret; ++} ++ ++static int get_ubbond_seg_info(urma_target_seg_t *tseg, urma_bond_seg_info_out_t *seg_info_out) ++{ ++ urma_bond_seg_info_in_t seg_info_in = { ++ .tseg = tseg, ++ }; ++ urma_user_ctl_in_t user_ctl_in = { ++ .opcode = URMA_USER_CTL_BOND_GET_SEG_INFO, ++ .addr = (uint64_t)&seg_info_in, ++ .len = sizeof(urma_bond_seg_info_in_t), ++ }; ++ urma_user_ctl_out_t user_ctl_out = { ++ .addr = (uint64_t)seg_info_out, ++ .len = sizeof(urma_bond_seg_info_out_t), ++ }; ++ ++ if (urma_user_ctl_p(tseg->urma_ctx, &user_ctl_in, &user_ctl_out)) { ++ qemu_log("urma_user_ctl: get seg info failed, errno: %d\n", errno); ++ return -1; ++ } ++ ++ qemu_log("get ubbond seg info success.\n"); ++ return 0; ++} ++ ++static int add_ubbond_seg_info(urma_context_t *ctx, urma_bond_add_remote_seg_info_in_t *rseg_info_in) ++{ ++ urma_user_ctl_in_t user_ctl_in = { ++ .opcode = URMA_USER_CTL_BOND_ADD_REMOTE_SEG_INFO, ++ .addr = (uint64_t)rseg_info_in, ++ .len = sizeof(urma_bond_add_remote_seg_info_in_t) ++ }; ++ urma_user_ctl_out_t user_ctl_out = {0}; ++ ++ if (urma_user_ctl_p(ctx, &user_ctl_in, &user_ctl_out)) { ++ qemu_log("urma_user_ctl: set seg info failed, errno: %d\n", errno); ++ return -1; ++ } ++ ++ qemu_log("add ubbond seg info success.\n"); ++ return 0; ++} ++ ++static int get_ubbond_jfr_info(urma_jfr_t *jfr, urma_bond_id_info_out_t *info_out) ++{ ++ urma_bond_id_info_in_t in = { ++ .jfr = jfr, ++ .type = URMA_JFR, ++ }; ++ urma_user_ctl_in_t user_ctl_in = { ++ .opcode = URMA_USER_CTL_BOND_GET_ID_INFO, ++ .addr = (uint64_t)&in, ++ .len = sizeof(urma_bond_id_info_in_t), ++ }; ++ urma_user_ctl_out_t user_ctl_out = { ++ .addr = (uint64_t)info_out, ++ .len = sizeof(urma_bond_id_info_out_t), ++ }; ++ ++ if (urma_user_ctl_p(jfr->urma_ctx, &user_ctl_in, &user_ctl_out)) { ++ qemu_log("urma_user_ctl: get jfr info failed, errno: %d\n", errno); ++ return -1; ++ } ++ ++ qemu_log("get ubbond jfr info success.\n"); ++ return 0; ++} ++ ++static int add_ubbond_jfr_info(urma_context_t *ctx, urma_bond_id_info_out_t *info) ++{ ++ urma_user_ctl_in_t user_ctl_in = { ++ .opcode = URMA_USER_CTL_BOND_ADD_RJFR_ID_INFO, ++ .addr = (uint64_t)info, ++ .len = sizeof(urma_bond_id_info_out_t), ++ }; ++ urma_user_ctl_out_t user_ctl_out = { ++ .addr = 0, ++ .len = 0, ++ }; ++ ++ if (urma_user_ctl_p(ctx, &user_ctl_in, &user_ctl_out)) { ++ qemu_log("urma_user_ctl: set jfr info failed, errno: %d\n", errno); ++ return -1; ++ } ++ ++ qemu_log("add ubbond jfr info success.\n"); ++ return 0; ++} ++ ++static void pack_seg_jfr_info(seg_jfr_info_t *info, URMAContext *ctx, URMALocalBlock *block) ++{ ++ urma_bond_seg_info_out_t seg_bond_info; ++ urma_bond_id_info_out_t jfr_bond_info; ++ ++ (void)memset(info, 0, sizeof(seg_jfr_info_t)); ++ info->eid = ctx->urma_ctx->eid; ++ info->uasid = ctx->urma_ctx->uasid; ++ info->seg_va = block->local_tseg->seg.ubva.va; ++ info->seg_len = block->local_tseg->seg.len; ++ info->seg_flag = block->local_tseg->seg.attr.value; ++ info->seg_token_id = block->local_tseg->seg.token_id; ++ info->seg_token.token = block->local_seg_token.token; ++ info->jfr_id = ctx->jfr->jfr_id; ++ info->jfr_token.token = ctx->jfr_token.token; ++ ++ get_ubbond_seg_info(block->local_tseg, &seg_bond_info); ++ get_ubbond_jfr_info(ctx->jfr, &jfr_bond_info); ++ memcpy(&info->seg_bond_info, &seg_bond_info, sizeof(urma_bond_seg_info_out_t)); ++ memcpy(&info->jfr_bond_info, &jfr_bond_info, sizeof(urma_bond_id_info_out_t)); ++} ++ ++static void unpack_seg_jfr_info(seg_jfr_info_t *info, URMAContext *ctx, URMALocalBlock *block) ++{ ++ urma_bond_seg_info_out_t seg_bond_info; ++ urma_bond_id_info_out_t jfr_bond_info; ++ ++ block->remote_seg.ubva.eid = info->eid; ++ block->remote_seg.ubva.uasid = info->uasid; ++ block->remote_seg.ubva.va = info->seg_va; ++ block->remote_seg.len = info->seg_len; ++ block->remote_seg.attr.value = info->seg_flag; ++ block->remote_seg.token_id = info->seg_token_id; ++ block->remote_seg_token.token = info->seg_token.token; ++ ctx->remote_jfr_id = info->jfr_id; ++ ctx->rjfr_token.token = info->jfr_token.token; ++ ++ memcpy(&seg_bond_info, &info->seg_bond_info, sizeof(urma_bond_seg_info_out_t)); ++ memcpy(&jfr_bond_info, &info->jfr_bond_info, sizeof(urma_bond_id_info_out_t)); ++ add_ubbond_seg_info(ctx->urma_ctx, &seg_bond_info); ++ add_ubbond_jfr_info(ctx->urma_ctx, &jfr_bond_info); ++} ++ ++static urma_target_jetty_t *qemu_import_jfr(URMAContext *ctx) ++{ ++ urma_rjfr_t remote_jfr = { ++ .jfr_id = ctx->remote_jfr_id, ++ .trans_mode = URMA_TM_RM ++ }; ++ urma_target_jetty_t *tjfr = urma_import_jfr_p(ctx->urma_ctx, &remote_jfr, &ctx->rjfr_token); ++ if (tjfr == NULL) { ++ qemu_log("Failed to do urma_import_jfr, errno: %d\n", errno); ++ return NULL; ++ } ++ ++ if (urma_advise_jfr_p(ctx->jfs, tjfr) != URMA_SUCCESS) { ++ qemu_log("Failed to advise jfr, errno: %d\n", errno); ++ (void)urma_unimport_jfr_p(tjfr); ++ return NULL; ++ } ++ ++ return tjfr; ++} ++ ++static void qemu_urma_search_ram_block(URMAContext *urma, ++ uintptr_t block_offset, ++ uint64_t offset, ++ uint64_t length, ++ uint64_t *block_index, ++ uint64_t *chunk_index) ++{ ++ uint64_t current_addr = block_offset + offset; ++ URMALocalBlock *block = g_hash_table_lookup(urma->blockmap, ++ (void *) block_offset); ++ assert(block); ++ assert(current_addr >= block->offset); ++ assert((current_addr + length) <= (block->offset + block->length)); ++ ++ *block_index = block->index; ++ *chunk_index = urma_ram_chunk_index(block->local_host_addr, ++ block->local_host_addr + (current_addr - block->offset)); ++} ++ ++static inline int qemu_urma_buffer_mergable(URMAContext *urma, ++ uint64_t offset, uint64_t len) ++{ ++ URMALocalBlock *block; ++ uint8_t *host_addr; ++ uint8_t *chunk_end; ++ ++ if (urma->current_index < 0) { ++ return 0; ++ } ++ ++ if (urma->current_chunk < 0) { ++ return 0; ++ } ++ ++ block = &(urma->local_ram_blocks.block[urma->current_index]); ++ host_addr = block->local_host_addr + (offset - block->offset); ++ chunk_end = urma_ram_chunk_end(block, urma->current_chunk); ++ ++ if (urma->current_length == 0) { ++ return 0; ++ } ++ ++ /* ++ * Only merge into chunk sequentially. ++ */ ++ if (offset != (urma->current_addr + urma->current_length)) { ++ return 0; ++ } ++ ++ if (offset < block->offset) { ++ return 0; ++ } ++ ++ if ((offset + len) > (block->offset + block->length)) { ++ return 0; ++ } ++ ++ if ((host_addr + len) > chunk_end) { ++ return 0; ++ } ++ ++ return 1; ++} ++ ++static int poll_jfc_wait(URMAContext *ctx, urma_cr_t *cr) ++{ ++ int i, j = 0, cnt = 0; ++ ++ for (i = 0; i < URMA_MAX_POLL_TIME; i++) { ++ if (ctx->nb_polling == 0) { ++ return 0; ++ } ++ ++ cnt = urma_poll_jfc_p(ctx->jfc, ctx->nb_polling, cr); ++ if (cnt < 0) { ++ goto err; ++ } else if (cnt > 0) { ++ for (j = 0; j < cnt; j++) { ++ if (cr[j].status != URMA_CR_SUCCESS) { ++ goto err; ++ } ++ } ++ ctx->nb_polling -= cnt; ++ } ++ ++ usleep(1); ++ } ++ ++err: ++ qemu_log("urma_poll_jfc err: loop num: %d, cnt: %d, status: %d, nb_polling: %d, errno: %d\n", ++ i, cnt, cr[j].status, ctx->nb_polling, errno); ++ return -EINVAL; ++} ++ ++int qemu_flush_urma_write(URMAContext *urma) ++{ ++ urma_cr_t *cr = NULL; ++ ++ if (!urma) { ++ qemu_log("enter qemu_flush_urma_write when the urma is uninitialized!\n"); ++ return -EINVAL; ++ } ++ ++ cr = g_new0(urma_cr_t, urma->max_jfs_depth); ++ if (cr == NULL) { ++ qemu_log("Failed to alloc urma cr\n"); ++ return -EINVAL; ++ } ++ ++ if (poll_jfc_wait(urma, cr) != 0) { ++ qemu_log("Failed to poll jfc, errno: %d\n", errno); ++ g_free(cr); ++ return -EINVAL; ++ } ++ ++ g_free(cr); ++ return 0; ++} ++ ++static int qemu_urma_write_one(URMAContext *urma, ++ int current_index, uint64_t current_addr, ++ uint64_t length) ++{ ++ uintptr_t local_addr, remote_addr, offset; ++ URMALocalBlock *block = &(urma->local_ram_blocks.block[current_index]); ++ urma_jfs_wr_flag_t flag = { 0 }; ++ ++ if (block->is_ram_block) { ++ offset = current_addr - block->offset; ++ local_addr = (uintptr_t)(block->local_host_addr + offset); ++ remote_addr = (uintptr_t)(block->remote_seg.ubva.va + offset); ++ flag.bs.complete_enable = 1; ++ ++ if (urma_write_p(urma->jfs, urma->tjfr, block->import_tseg, block->local_tseg, ++ remote_addr, local_addr, length, ++ flag, (uintptr_t)urma->rid) != URMA_SUCCESS) { ++ qemu_log("Failed to do urma_write, local addr: %lx, remote addr: %lx, size: %lx, errno: %d\n", ++ local_addr, remote_addr, length, errno); ++ return -EINVAL; ++ } ++ ++ urma->nb_polling++; ++ if (urma->nb_polling >= urma->max_jfs_depth) { ++ if (qemu_flush_urma_write(urma) < 0) { ++ qemu_log("Failed to flush urma write, errno: %d\n", errno); ++ return -EINVAL; ++ } ++ } ++ } ++ ++ stat64_add(&mig_stats.normal_pages, length / qemu_target_page_size()); ++ stat64_add(&mig_stats.urma_bytes, length); ++ ram_transferred_add(length); ++ ++ return 0; ++} ++ ++static int qemu_urma_write_flush(URMAContext *urma) ++{ ++ int ret; ++ ++ if (!urma->current_length) { ++ return 0; ++ } ++ ++ ret = qemu_urma_write_one(urma, urma->current_index, urma->current_addr, ++ urma->current_length); ++ if (ret < 0) { ++ return ret; ++ } ++ ++ urma->nb_sent++; ++ urma->current_length = 0; ++ urma->current_addr = 0; ++ ++ return 0; ++} ++ ++static int qemu_urma_write(URMAContext *urma, ++ uint64_t block_offset, uint64_t offset, ++ uint64_t len) ++{ ++ uint64_t current_addr = block_offset + offset; ++ uint64_t index = urma->current_index; ++ uint64_t chunk = urma->current_chunk; ++ int ret; ++ ++ /* If we cannot merge it, we flush the current buffer first. */ ++ if (!qemu_urma_buffer_mergable(urma, current_addr, len)) { ++ ret = qemu_urma_write_flush(urma); ++ if (ret) { ++ return ret; ++ } ++ urma->current_length = 0; ++ urma->current_addr = current_addr; ++ qemu_urma_search_ram_block(urma, block_offset, ++ offset, len, &index, &chunk); ++ urma->current_index = index; ++ urma->current_chunk = chunk; ++ } ++ ++ /* merge it */ ++ urma->current_length += len; ++ ++ /* flush it if buffer is too large */ ++ if (urma->current_length >= URMA_CHUNK_MERGE_MAX) { ++ return qemu_urma_write_flush(urma); ++ } ++ ++ return 0; ++} ++ ++static int qemu_urma_save_page(QEMUFile *f, ram_addr_t block_offset, ++ ram_addr_t offset, size_t size) ++{ ++ MigrationState *s = migrate_get_current(); ++ URMAContext *urma = s->urma_ctx; ++ int ret; ++ ++ if (!urma) { ++ return -EINVAL; ++ } ++ ++ if (size > 0) { ++ /* ++ * Add this page to the current 'chunk'. If the chunk ++ * is full, or the page doesn't belong to the current chunk, ++ * an actual urma write will occur and a new chunk will be formed. ++ */ ++ ret = qemu_urma_write(urma, block_offset, offset, size); ++ if (ret < 0) { ++ qemu_log("urma write failed, block offset: %lx, offset: %lx, size: %lx, ret: %d, errno: %d\n", ++ block_offset, offset, size, ret, errno); ++ return ret; ++ } ++ } ++ ++ return RAM_SAVE_CONTROL_DELAYED; ++} ++ ++static void qemu_urma_unimport(URMAContext *urma) ++{ ++ int i; ++ URMALocalBlocks *local_block = &urma->local_ram_blocks; ++ ++ for (i = 0; i < local_block->nb_blocks; i++) { ++ URMALocalBlock *block = &local_block->block[i]; ++ if (block->import_tseg) { ++ urma_unimport_seg_p(block->import_tseg); ++ block->import_tseg = NULL; ++ } ++ } ++ ++ if (urma->tjfr) { ++ urma_unimport_jfr_p(urma->tjfr); ++ urma->tjfr = NULL; ++ } ++ ++ qemu_log("unimport all blocks and jfr success.\n"); ++} ++ + int qemu_urma_import(URMAContext *urma) + { +- /* TODO */ ++ int i; ++ URMALocalBlocks *local_block = &urma->local_ram_blocks; ++ urma_import_seg_flag_t flag = { ++ .bs.cacheable = URMA_NON_CACHEABLE, ++ .bs.access = URMA_ACCESS_READ | URMA_ACCESS_WRITE | URMA_ACCESS_ATOMIC, ++ .bs.mapping = URMA_SEG_NOMAP, ++ .bs.reserved = 0 ++ }; ++ ++ for (i = 0; i < local_block->nb_blocks; i++) { ++ URMALocalBlock *block = &local_block->block[i]; ++ ++ block->import_tseg = urma_import_seg_p(urma->urma_ctx, &block->remote_seg, &block->remote_seg_token, 0, flag); ++ if (block->import_tseg == NULL) { ++ qemu_log("Failed to import segment, block name: %s, va: %p, size: %ld, errono: %d\n", ++ block->block_name, block->local_host_addr, block->length, errno); ++ goto err; ++ } ++ } ++ ++ urma->tjfr = qemu_import_jfr(urma); ++ if (urma->tjfr == NULL) { ++ qemu_log("Failed to import jfr, errno: %d\n", errno); ++ goto err; ++ } ++ ++ qemu_log("import all blocks and jfr success.\n"); ++ return 0; ++ ++err: ++ qemu_urma_unimport(urma); + return -EINVAL; + } + + int qemu_exchange_urma_info(QEMUFile *f, URMAContext *urma, bool server) + { +- /* TODO */ +- return -EINVAL; ++ int i; ++ URMALocalBlocks *local_block = &urma->local_ram_blocks; ++ seg_jfr_info_t local = {0}, remote = {0}; ++ MigrationState *s = migrate_get_current(); ++ int64_t start_time; ++ ++ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); ++ ++ qemu_log("start to exchange urma segment info.\n"); ++ ++ for (i = 0; i < local_block->nb_blocks; i++) { ++ URMALocalBlock *block = &local_block->block[i]; ++ ++ if (server) { ++ pack_seg_jfr_info(&local, urma, block); ++ qemu_put_buffer(f, (uint8_t *)&local, sizeof(seg_jfr_info_t)); ++ if (qemu_fflush(f) < 0) { ++ qemu_log("Failed to flush qemu file, errno: %d\n", errno); ++ return -EINVAL; ++ } ++ } else { ++ if (qemu_get_buffer(f, (uint8_t *)&remote, sizeof(seg_jfr_info_t)) != sizeof(seg_jfr_info_t)) { ++ qemu_log("get urma info failed, block name: %s, errno: %d\n", block->block_name, errno); ++ return -EINVAL; ++ } ++ unpack_seg_jfr_info(&remote, urma, block); ++ } ++ } ++ ++ s->urma_exchange_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time; ++ return 0; + } + + void urma_start_outgoing_migration(void *opaque, + SocketAddress *saddr, + Error **errp) + { +- /* TODO */ ++ MigrationState *s = opaque; ++ URMAContext *urma = NULL; ++ int ret; ++ int64_t start_time; ++ ++ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); ++ ++ ret = urma_dlfunc_init(errp); ++ if (ret < 0) { ++ goto err; ++ } ++ ++ urma = qemu_urma_data_init(&saddr->u.inet); ++ if (urma == NULL) { ++ qemu_log("migration: qemu_urma_data_init failed\n"); ++ goto err; ++ } ++ ++ ret = qemu_urma_init_all(urma, true); ++ if (ret) { ++ qemu_log("migration: qemu_urma_init_all failed, ret: %d\n", ret); ++ goto err; ++ } ++ ++ s->urma_init_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time; ++ s->urma_migration = true; ++ s->urma_ctx = urma; ++ socket_start_outgoing_migration(s, saddr, errp); ++ ++ qemu_log("migration: start urma_start_outgoing_migration\n"); + return; ++ ++err: ++ error_setg(errp, "migration: urma start outgoing migration failed"); ++ qemu_urma_data_free(urma); + } + + void urma_start_incoming_migration(SocketAddress *saddr, + Error **errp) + { +- /* TODO */ ++ MigrationState *s = migrate_get_current(); ++ URMAContext *urma; ++ int ret; ++ ++ urma = qemu_urma_data_init(&saddr->u.inet); ++ if (urma == NULL) { ++ qemu_log("migration: qemu_urma_data_init failed\n"); ++ goto err; ++ } ++ ++ ret = urma_dlfunc_init(errp); ++ if (ret < 0) { ++ goto err; ++ } ++ ++ urma->is_incoming = true; ++ ++ ret = qemu_urma_init_all(urma, true); ++ if (ret) { ++ qemu_log("migration: qemu_urma_init_all failed, ret: %d\n", ret); ++ goto err; ++ } ++ ++ s->urma_migration = true; ++ s->urma_ctx = urma; ++ socket_start_incoming_migration(saddr, errp); ++ ++ qemu_log("migration: start urma_start_incoming_migration\n"); + return; ++ ++err: ++ error_setg(errp, "migration: urma start incoming migration failed"); ++ qemu_urma_data_free(urma); + } + + void urma_migration_cleanup(void) + { +- /* TODO */ +- return; ++ MigrationState *s = migrate_get_current(); ++ ++ if (s->urma_ctx == NULL) { ++ return; ++ } ++ ++ qemu_urma_unimport(s->urma_ctx); ++ qemu_urma_cleanup(s->urma_ctx); ++ qemu_urma_data_free(s->urma_ctx); ++ s->urma_ctx = NULL; ++ ++ qemu_log("urma migration cleanup success.\n"); + } + + int urma_control_save_page(QEMUFile *f, ram_addr_t block_offset, + ram_addr_t offset, size_t size) + { +- /* TODO */ +- return RAM_SAVE_CONTROL_NOT_SUPP; ++ int ret; ++ ++ ret = qemu_urma_save_page(f, block_offset, offset, size); ++ ++ if (ret != RAM_SAVE_CONTROL_DELAYED && ++ ret != RAM_SAVE_CONTROL_NOT_SUPP) { ++ if (ret < 0) { ++ qemu_file_set_error(f, ret); ++ } ++ } ++ return ret; + } + + void record_migration_log(MigrationState *s) +-- +2.33.0 + diff --git a/Migration-support-skip-GPU-pixman-image-I-O-during-H.patch b/Migration-support-skip-GPU-pixman-image-I-O-during-H.patch new file mode 100644 index 0000000000000000000000000000000000000000..3fb548372c4118e7c00bad6cda5790c0ab39b268 --- /dev/null +++ b/Migration-support-skip-GPU-pixman-image-I-O-during-H.patch @@ -0,0 +1,32 @@ +From e78fc70b18b628a5e709f0a5bb1be5829551df2c Mon Sep 17 00:00:00 2001 +From: lucas <1358497393@qq.com> +Date: Mon, 24 Nov 2025 19:08:05 +0800 +Subject: [PATCH] Migration: support skip GPU pixman image I/O during HAM + migration + +Avoid GPU pixman image save/load during ham migration to reduce GPU device save and load time. + +Signed-off-by: lujun +--- + migration/migration.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/migration/migration.c b/migration/migration.c +index fcbf6f247c..19c6cf3768 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -3902,6 +3902,11 @@ bool need_fast_migrate(void) + return true; + } + #endif ++#ifdef CONFIG_HAM_MIGRATION ++ if (migrate_use_ldst()) { ++ return true; ++ } ++#endif + + return false; + } +-- +2.33.0 + diff --git a/Migration-support-skip-GPU-pixman-image-I-O-during-U.patch b/Migration-support-skip-GPU-pixman-image-I-O-during-U.patch new file mode 100644 index 0000000000000000000000000000000000000000..5f29e40ce4b3e308c052d34b70a242bf505f907e --- /dev/null +++ b/Migration-support-skip-GPU-pixman-image-I-O-during-U.patch @@ -0,0 +1,191 @@ +From 91d66b54418e201a8b55a75a645a1f17481d6faf Mon Sep 17 00:00:00 2001 +From: GQX <2290721782@qq.com> +Date: Mon, 24 Nov 2025 09:01:18 +0800 +Subject: [PATCH 4/5] Migration: support skip GPU pixman image I/O during URMA + migration +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +It also addresses a delay introduced by kernel commit 1e9a038b7f, which +added the srcu_get_delay function. This causes synchronize_srcu to incur +a few extra milliseconds of latency when the interval between two grace +periods is less than 25 µs. To avoid this, the code sleeps for 20 µs +here. + +Additionally, since the VM is paused immediately after VCPU stop, +downtime measurement starts at this point. + +Signed-off-by: GQX <2290721782@qq.com> +--- + hw/display/virtio-gpu.c | 22 ++++++++++++++++++---- + hw/virtio/virtio-pci.c | 22 ++++++++++++++++++++++ + migration/migration.c | 13 ++++++++++++- + migration/migration.h | 3 +++ + system/cpus.c | 2 ++ + 5 files changed, 57 insertions(+), 5 deletions(-) + +diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c +index a714638822..64313ece75 100644 +--- a/hw/display/virtio-gpu.c ++++ b/hw/display/virtio-gpu.c +@@ -31,6 +31,7 @@ + #include "qemu/module.h" + #include "qapi/error.h" + #include "qemu/error-report.h" ++#include "migration/migration.h" + + #define VIRTIO_GPU_VM_VERSION 1 + +@@ -1213,8 +1214,11 @@ static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, + qemu_put_be64(f, res->addrs[i]); + qemu_put_be32(f, res->iov[i].iov_len); + } +- qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), +- pixman_image_get_stride(res->image) * res->height); ++ ++ if (!need_fast_migrate()) { ++ qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), ++ pixman_image_get_stride(res->image) * res->height); ++ } + } + qemu_put_be32(f, 0); /* end of list */ + +@@ -1310,8 +1314,11 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, + res->addrs[i] = qemu_get_be64(f); + res->iov[i].iov_len = qemu_get_be32(f); + } +- qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), +- pixman_image_get_stride(res->image) * res->height); ++ ++ if (!need_fast_migrate()) { ++ qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), ++ pixman_image_get_stride(res->image) * res->height); ++ } + + if (!virtio_gpu_load_restore_mapping(g, res)) { + pixman_image_unref(res->image); +@@ -1320,6 +1327,13 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, + } + + resource_id = qemu_get_be32(f); ++ ++ if (need_fast_migrate()) { ++ iov_to_buf(res->iov, res->iov_cnt, 0, ++ pixman_image_get_data(res->image), ++ pixman_image_get_stride(res->image) * ++ pixman_image_get_height(res->image)); ++ } + } + + /* load & apply scanout state */ +diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c +index 13220c258d..1c80139383 100644 +--- a/hw/virtio/virtio-pci.c ++++ b/hw/virtio/virtio-pci.c +@@ -1066,6 +1066,8 @@ static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy) + return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX); + } + ++#define VIRTIO_DEVICE_REQUEST_INTERVAL 20 ++ + static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy, + int queue_no) + { +@@ -1085,6 +1087,16 @@ static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy, + } + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + kvm_virtio_pci_irqfd_release(proxy, n, vector); ++#ifdef __aarch64__ ++ /* kernel commit 1e9a038b7f introduce function srcu_get_delay,which ++ * result in synchronize_srcu cost a few more milliseconds if there are ++ * less than 25us between two grace periods, so we sleep 20us here ++ * to avoid the delay. ++ * ++ * Note: synchronize community plan after community optimizes it ++ */ ++ usleep(VIRTIO_DEVICE_REQUEST_INTERVAL); ++#endif + } + kvm_virtio_pci_vq_vector_release(proxy, vector); + } +@@ -1239,6 +1251,16 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) + break; + } + if (index < proxy->nvqs_with_notifiers) { ++#ifdef __aarch64__ ++ /* kernel commit 1e9a038b7f introduce function srcu_get_delay,which ++ * result in synchronize_srcu cost a few more milliseconds if there are ++ * less than 25us between two grace periods, so we sleep 20us here ++ * to avoid the delay. ++ * ++ * Note: synchronize community plan after community optimizes it ++ */ ++ usleep(VIRTIO_DEVICE_REQUEST_INTERVAL); ++#endif + virtio_pci_one_vector_mask(proxy, index, vector, n); + } + vq = virtio_vector_next_queue(vq); +diff --git a/migration/migration.c b/migration/migration.c +index 10060cdb70..fcbf6f247c 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -109,7 +109,7 @@ static int migration_maybe_pause(MigrationState *s, + int new_state); + static bool close_return_path_on_source(MigrationState *s); + +-static void migration_downtime_start(MigrationState *s) ++void migration_downtime_start(MigrationState *s) + { + trace_vmstate_downtime_checkpoint("src-downtime-start"); + s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); +@@ -3894,3 +3894,14 @@ int ram_init_touched_log(void) + return -EINTR; + } + #endif ++ ++bool need_fast_migrate(void) ++{ ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma()) { ++ return true; ++ } ++#endif ++ ++ return false; ++} +diff --git a/migration/migration.h b/migration/migration.h +index 0fa6c377a8..3d7cb9caa7 100644 +--- a/migration/migration.h ++++ b/migration/migration.h +@@ -575,4 +575,7 @@ bool memcrypt_enabled(void); + int ram_init_touched_log(void); + #endif + ++bool need_fast_migrate(void); ++void migration_downtime_start(MigrationState *s); ++ + #endif +diff --git a/system/cpus.c b/system/cpus.c +index 9af5e22157..99be2a0c83 100644 +--- a/system/cpus.c ++++ b/system/cpus.c +@@ -46,6 +46,7 @@ + #include "hw/boards.h" + #include "hw/hw.h" + #include "trace.h" ++#include "migration/migration.h" + + #ifdef CONFIG_LINUX + +@@ -281,6 +282,7 @@ static int do_vm_stop(RunState state, bool send_stop) + runstate_set(state); + cpu_disable_ticks(); + pause_all_vcpus(); ++ migration_downtime_start(migrate_get_current()); + trace_all_vcpus_paused(); + + vm_state_notify(0, state); +-- +2.33.0 + diff --git a/Migration-support-the-basic-framework-of-URMA-migrat.patch b/Migration-support-the-basic-framework-of-URMA-migrat.patch new file mode 100644 index 0000000000000000000000000000000000000000..21f9cb83c0c59df30278988e2ccdc33158941c8d --- /dev/null +++ b/Migration-support-the-basic-framework-of-URMA-migrat.patch @@ -0,0 +1,847 @@ +From 1bd8d10ac72518e835aecae598105f46f8650d13 Mon Sep 17 00:00:00 2001 +From: GQX <2290721782@qq.com> +Date: Sun, 23 Nov 2025 22:53:33 +0800 +Subject: [PATCH 1/5] Migration: support the basic framework of URMA migration + +URMA migration is a feature that doing live migration via urma protocol +to fully utilize the high bandwidth brought by the UB architecture. +This feature can speed up the live migration, reduce CPU usage during +migration, and indirectly affect the migration downtime. + +We can use --enable-urma-migration/--disable-urma-migration during the +compile time to control whether to support the feature. Currently, the +feature is auto supported on aarch64 architecture. + +This feature can be used during migration as this: + + (monitor) migrate urma://IP + +Signed-off-by: GQX <2290721782@qq.com> +--- + Kconfig.host | 3 + + meson.build | 9 ++ + meson_options.txt | 3 + + migration/meson.build | 4 + + migration/migration-stats.c | 5 +- + migration/migration-stats.h | 4 + + migration/migration.c | 58 +++++++++++++ + migration/migration.h | 14 ++++ + migration/options.c | 14 ++++ + migration/options.h | 3 + + migration/ram.c | 38 ++++++++- + migration/savevm.c | 5 ++ + migration/trace-events | 2 +- + migration/urma.c | 109 ++++++++++++++++++++++++ + migration/urma.h | 154 ++++++++++++++++++++++++++++++++++ + qapi/migration.json | 10 ++- + scripts/meson-buildoptions.sh | 5 ++ + util/qemu-sockets.c | 8 ++ + 18 files changed, 442 insertions(+), 6 deletions(-) + create mode 100644 migration/urma.c + create mode 100644 migration/urma.h + +diff --git a/Kconfig.host b/Kconfig.host +index f60ea6cef9..b03d5c6c0d 100644 +--- a/Kconfig.host ++++ b/Kconfig.host +@@ -57,5 +57,8 @@ config HV_BALLOON_POSSIBLE + config UB + bool + ++config URMA_MIGRATION ++ bool ++ + config HAM_MIGRATION + bool +\ No newline at end of file +diff --git a/meson.build b/meson.build +index 458d8981cc..60752d9d77 100644 +--- a/meson.build ++++ b/meson.build +@@ -595,6 +595,12 @@ else + have_ham_migration = false + endif + ++# urma migration ++have_urma_migration = get_option('urma_migration') \ ++ .require(targetos == 'linux', error_message: 'urma_migration is supported only on Linux') \ ++ .require(cpu == 'aarch64', error_message: 'urma_migration is supported only on aarch64') \ ++ .allowed() ++ + # vhost + have_vhost_user = get_option('vhost_user') \ + .disable_auto_if(targetos != 'linux') \ +@@ -2304,6 +2310,7 @@ config_host_data.set('CONFIG_VHOST_USER', have_vhost_user) + config_host_data.set('CONFIG_VHOST_CRYPTO', have_vhost_user_crypto) + config_host_data.set('CONFIG_UB', have_ub) + config_host_data.set('CONFIG_HAM_MIGRATION', have_ham_migration) ++config_host_data.set('CONFIG_URMA_MIGRATION', have_urma_migration) + config_host_data.set('CONFIG_VHOST_VDPA', have_vhost_vdpa) + config_host_data.set('CONFIG_VMNET', vmnet.found()) + config_host_data.set('CONFIG_VHOST_USER_BLK_SERVER', have_vhost_user_blk_server) +@@ -3018,6 +3025,7 @@ host_kconfig = \ + (x11.found() ? ['CONFIG_X11=y'] : []) + \ + (have_ub ? ['CONFIG_UB=y'] : []) + \ + (have_ham_migration ? ['CONFIG_HAM_MIGRATION=y'] : []) + \ ++ (have_urma_migration ? ['CONFIG_URMA_MIGRATION=y'] : []) + \ + (have_vhost_user ? ['CONFIG_VHOST_USER=y'] : []) + \ + (have_vhost_vdpa ? ['CONFIG_VHOST_VDPA=y'] : []) + \ + (have_vhost_kernel ? ['CONFIG_VHOST_KERNEL=y'] : []) + \ +@@ -4249,6 +4257,7 @@ summary_info += {'QOM debugging': get_option('qom_cast_debug')} + summary_info += {'Relocatable install': get_option('relocatable')} + summary_info += {'ub support': have_ub} + summary_info += {'ham migration support': have_ham_migration} ++summary_info += {'urma migration support': have_urma_migration} + summary_info += {'vhost-kernel support': have_vhost_kernel} + summary_info += {'vhost-net support': have_vhost_net} + summary_info += {'vhost-user support': have_vhost_user} +diff --git a/meson_options.txt b/meson_options.txt +index ea83306b8a..a2fb02d3a9 100644 +--- a/meson_options.txt ++++ b/meson_options.txt +@@ -294,6 +294,9 @@ option('sndio', type: 'feature', value: 'auto', + option('ub', type: 'feature', value: 'auto', + description: 'unify bus support') + ++option('urma_migration', type: 'feature', value: 'disabled', ++ description: 'live migration via urma protocol support') ++ + option('ham_migration', type: 'feature', value: 'auto', + description: 'live migration via memory semantics') + +diff --git a/migration/meson.build b/migration/meson.build +index e8934218c5..ec8cb1a7e9 100644 +--- a/migration/meson.build ++++ b/migration/meson.build +@@ -31,6 +31,10 @@ system_ss.add(files( + 'threadinfo.c', + ), gnutls) + ++if have_urma_migration ++ system_ss.add(files('urma.c')) ++endif ++ + if have_ham_migration + system_ss.add(files('ham.c')) + endif +diff --git a/migration/migration-stats.c b/migration/migration-stats.c +index f690b98a03..fa32f2ee7e 100644 +--- a/migration/migration-stats.c ++++ b/migration/migration-stats.c +@@ -64,7 +64,8 @@ uint64_t migration_transferred_bytes(void) + uint64_t multifd = stat64_get(&mig_stats.multifd_bytes); + uint64_t rdma = stat64_get(&mig_stats.rdma_bytes); + uint64_t qemu_file = stat64_get(&mig_stats.qemu_file_transferred); ++ uint64_t urma = stat64_get(&mig_stats.urma_bytes); + +- trace_migration_transferred_bytes(qemu_file, multifd, rdma); +- return qemu_file + multifd + rdma; ++ trace_migration_transferred_bytes(qemu_file, multifd, rdma, urma); ++ return qemu_file + multifd + rdma + urma; + } +diff --git a/migration/migration-stats.h b/migration/migration-stats.h +index 05290ade76..24911ce067 100644 +--- a/migration/migration-stats.h ++++ b/migration/migration-stats.h +@@ -97,6 +97,10 @@ typedef struct { + * Number of bytes sent through RDMA. + */ + Stat64 rdma_bytes; ++ /* ++ * Number of bytes sent through urma. ++ */ ++ Stat64 urma_bytes; + /* + * Number of pages transferred that were full of zeros. + */ +diff --git a/migration/migration.c b/migration/migration.c +index 9e71a2566b..837e0471cb 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -282,6 +282,12 @@ void migration_incoming_state_destroy(void) + { + struct MigrationIncomingState *mis = migration_incoming_get_current(); + ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma()) { ++ urma_migration_cleanup(); ++ } ++#endif ++ + multifd_recv_cleanup(); + compress_threads_load_cleanup(); + /* +@@ -498,6 +504,17 @@ bool migrate_uri_parse(const char *uri, MigrationChannel **channel, + return false; + } + addr->transport = MIGRATION_ADDRESS_TYPE_RDMA; ++#ifdef CONFIG_URMA_MIGRATION ++ } else if (strstart(uri, "urma:", NULL) || strstart(uri, "hcom:", NULL)) { ++ SocketAddress *saddr = socket_parse(uri, errp); ++ if (!saddr) { ++ return false; ++ } ++ addr->u.socket.type = saddr->type; ++ addr->u.socket.u = saddr->u; ++ addr->transport = MIGRATION_ADDRESS_TYPE_URMA; ++ g_free(saddr); ++#endif + } else if (strstart(uri, "tcp:", NULL) || + strstart(uri, "unix:", NULL) || + strstart(uri, "vsock:", NULL) || +@@ -596,6 +613,10 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels, + return; + } + rdma_start_incoming_migration(&addr->u.rdma, errp); ++#endif ++#ifdef CONFIG_URMA_MIGRATION ++ } else if (addr->transport == MIGRATION_ADDRESS_TYPE_URMA) { ++ urma_start_incoming_migration(&addr->u.socket, errp); + #endif + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { + exec_start_incoming_migration(addr->u.exec.args, errp); +@@ -688,6 +709,16 @@ process_incoming_migration_co(void *opaque) + migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP, + MIGRATION_STATUS_ACTIVE); + ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma()) { ++ if (qemu_exchange_urma_info(qemu_file_get_return_path(mis->from_src_file), ++ migrate_get_current()->urma_ctx, ++ true)) { ++ goto fail; ++ } ++ } ++#endif ++ + mis->loadvm_co = qemu_coroutine_self(); + ret = qemu_loadvm_state(mis->from_src_file); + mis->loadvm_co = NULL; +@@ -739,6 +770,12 @@ fail: + MIGRATION_STATUS_FAILED); + qemu_fclose(mis->from_src_file); + ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma()) { ++ urma_migration_cleanup(); ++ } ++#endif ++ + multifd_recv_cleanup(); + compress_threads_load_cleanup(); + +@@ -1313,6 +1350,12 @@ static void migrate_fd_cleanup(MigrationState *s) + + qemu_savevm_state_cleanup(); + ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma()) { ++ urma_migration_cleanup(); ++ } ++#endif ++ + if (s->to_dst_file) { + QEMUFile *tmp; + +@@ -1623,6 +1666,7 @@ int migrate_init(MigrationState *s, Error **errp) + s->threshold_size = 0; + s->switchover_acked = false; + s->rdma_migration = false; ++ s->urma_migration = false; + s->iteration_num = 0; + /* + * set mig_stats memory to zero for a new migration +@@ -2044,6 +2088,10 @@ void qmp_migrate(const char *uri, bool has_channels, + #ifdef CONFIG_RDMA + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { + rdma_start_outgoing_migration(s, &addr->u.rdma, &local_err); ++#endif ++#ifdef CONFIG_URMA_MIGRATION ++ } else if (addr->transport == MIGRATION_ADDRESS_TYPE_URMA) { ++ urma_start_outgoing_migration(s, &addr->u.socket, &local_err); + #endif + } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { + exec_start_outgoing_migration(s, addr->u.exec.args, &local_err); +@@ -3409,6 +3457,13 @@ static void *migration_thread(void *opaque) + qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, + MIGRATION_STATUS_ACTIVE); + ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma()) { ++ qemu_exchange_urma_info(qemu_file_get_return_path(s->to_dst_file), s->urma_ctx, false); ++ qemu_urma_import(s->urma_ctx); ++ } ++#endif ++ + s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; + + trace_migration_thread_setup_complete(); +@@ -3449,6 +3504,9 @@ out: + object_unref(OBJECT(s)); + rcu_unregister_thread(); + migration_threads_remove(thread); ++#ifdef CONFIG_URMA_MIGRATION ++ record_migration_log(s); ++#endif + #ifdef CONFIG_HAM_MIGRATION + ham_migrate_cleanup(); + #endif +diff --git a/migration/migration.h b/migration/migration.h +index 46f0c37fec..0fa6c377a8 100644 +--- a/migration/migration.h ++++ b/migration/migration.h +@@ -26,6 +26,9 @@ + #include "qom/object.h" + #include "postcopy-ram.h" + #include "sysemu/runstate.h" ++#ifdef CONFIG_URMA_MIGRATION ++#include "urma.h" ++#endif + + struct PostcopyBlocktimeContext; + +@@ -470,6 +473,17 @@ struct MigrationState { + bool switchover_acked; + /* Is this a rdma migration */ + bool rdma_migration; ++ ++ /* Is this a urma migration */ ++ bool urma_migration; ++#ifdef CONFIG_URMA_MIGRATION ++ URMAContext *urma_ctx; ++#endif ++ int64_t urma_init_time; ++ int64_t urma_exchange_time; ++ int64_t last_memcpy_time; ++ int64_t ram_reg_time; ++ int64_t dev_mig_time; + /* Number of migration iterations */ + uint64_t iteration_num; + }; +diff --git a/migration/options.c b/migration/options.c +index 01c9a93adb..c2695aee65 100644 +--- a/migration/options.c ++++ b/migration/options.c +@@ -336,6 +336,15 @@ bool migrate_postcopy_ram(void) + return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; + } + ++#ifdef CONFIG_URMA_MIGRATION ++bool migrate_urma(void) ++{ ++ MigrationState *s = migrate_get_current(); ++ ++ return s->urma_migration; ++} ++#endif ++ + bool migrate_use_ldst(void) + { + MigrationState *s = migrate_get_current(); +@@ -360,6 +369,11 @@ bool migrate_release_ram(void) + bool migrate_return_path(void) + { + MigrationState *s = migrate_get_current(); ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma()) { ++ return false; ++ } ++#endif + + return s->capabilities[MIGRATION_CAPABILITY_RETURN_PATH]; + } +diff --git a/migration/options.h b/migration/options.h +index 92cc79cb26..78f4af0ac8 100644 +--- a/migration/options.h ++++ b/migration/options.h +@@ -38,6 +38,9 @@ bool migrate_pause_before_switchover(void); + bool migrate_postcopy_blocktime(void); + bool migrate_postcopy_preempt(void); + bool migrate_postcopy_ram(void); ++#ifdef CONFIG_URMA_MIGRATION ++bool migrate_urma(void); ++#endif + bool migrate_rdma_pin_all(void); + bool migrate_release_ram(void); + bool migrate_return_path(void); +diff --git a/migration/ram.c b/migration/ram.c +index f8623153cf..ace13801d1 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -1216,8 +1216,19 @@ static bool control_save_page(PageSearchStatus *pss, + { + int ret; + ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma()) { ++ ret = urma_control_save_page(pss->pss_channel, pss->block->offset, offset, ++ TARGET_PAGE_SIZE); ++ } else { ++ ret = rdma_control_save_page(pss->pss_channel, pss->block->offset, offset, ++ TARGET_PAGE_SIZE); ++ } ++#else + ret = rdma_control_save_page(pss->pss_channel, pss->block->offset, offset, + TARGET_PAGE_SIZE); ++#endif ++ + if (ret == RAM_SAVE_CONTROL_NOT_SUPP) { + return false; + } +@@ -3642,6 +3653,19 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) + qemu_file_set_error(f, ret); + } + ++#ifdef CONFIG_URMA_MIGRATION ++ /* ++ * During urma migration, we need wait all data write done ++ * to obtain the actual bandwidth. ++ */ ++ if (migrate_urma()) { ++ ret = qemu_flush_urma_write(migrate_get_current()->urma_ctx); ++ if (ret < 0) { ++ qemu_file_set_error(f, ret); ++ } ++ } ++#endif ++ + out: + if (ret >= 0 + && migration_is_setup_or_active(migrate_get_current()->state)) { +@@ -3678,6 +3702,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque) + RAMState **temp = opaque; + RAMState *rs = *temp; + int ret = 0; ++ MigrationState *s = migrate_get_current(); ++ int64_t start_time = 0; + + rs->last_stage = !migration_in_colo_state(); + +@@ -3694,6 +3720,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) + + /* try transferring iterative blocks of memory */ + ++ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + /* flush all remaining blocks regardless of rate limiting */ + qemu_mutex_lock(&rs->bitmap_mutex); + while (true) { +@@ -3751,7 +3778,16 @@ static int ram_save_complete(QEMUFile *f, void *opaque) + qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); + } + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); +- return qemu_fflush(f); ++ ret = qemu_fflush(f); ++ ++#ifdef CONFIG_URMA_MIGRATION ++ if (migrate_urma()) { ++ ret |= qemu_flush_urma_write(s->urma_ctx); ++ } ++#endif ++ ++ s->last_memcpy_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time; ++ return ret; + } + + static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy, +diff --git a/migration/savevm.c b/migration/savevm.c +index be3bfc1078..4b847060d1 100644 +--- a/migration/savevm.c ++++ b/migration/savevm.c +@@ -1545,6 +1545,9 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, + int vmdesc_len; + SaveStateEntry *se; + int ret; ++ int64_t start_time; ++ ++ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + if (se->vmsd && se->vmsd->early_setup) { +@@ -1602,6 +1605,8 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, + + trace_vmstate_downtime_checkpoint("src-non-iterable-saved"); + ++ ms->dev_mig_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time; ++ + return 0; + } + +diff --git a/migration/trace-events b/migration/trace-events +index f0e1cb80c7..cd041bc308 100644 +--- a/migration/trace-events ++++ b/migration/trace-events +@@ -193,7 +193,7 @@ process_incoming_migration_co_postcopy_end_main(void) "" + postcopy_preempt_enabled(bool value) "%d" + + # migration-stats +-migration_transferred_bytes(uint64_t qemu_file, uint64_t multifd, uint64_t rdma) "qemu_file %" PRIu64 " multifd %" PRIu64 " RDMA %" PRIu64 ++migration_transferred_bytes(uint64_t qemu_file, uint64_t multifd, uint64_t rdma, uint64_t urma) "qemu_file %" PRIu64 " multifd %" PRIu64 " RDMA %" PRIu64 " URMA %" PRIu64 + + # channel.c + migration_set_incoming_channel(void *ioc, const char *ioctype) "ioc=%p ioctype=%s" +diff --git a/migration/urma.c b/migration/urma.c +new file mode 100644 +index 0000000000..8fb85e4123 +--- /dev/null ++++ b/migration/urma.c +@@ -0,0 +1,109 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2020-2025. All rights reserved. ++ * ++ * Description: Support vm migration using the protocol and interfaces provided by the URMA component. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ * ++ */ ++ ++#include "qemu/osdep.h" ++#include "qapi/error.h" ++ ++#include "urma.h" ++#include "migration.h" ++#include "multifd.h" ++#include "migration-stats.h" ++#include "qemu-file.h" ++#include "ram.h" ++#include "rdma.h" ++#include "qemu/error-report.h" ++#include "qemu/main-loop.h" ++#include "qemu/module.h" ++#include "qemu/rcu.h" ++#include "qemu/sockets.h" ++#include "qemu/bitmap.h" ++#include "qemu/coroutine.h" ++#include "exec/memory.h" ++#include ++#include ++#include ++#include "trace.h" ++#include "qom/object.h" ++#include "options.h" ++#include ++#include "qemu/log.h" ++#include ++#include "socket.h" ++#include "exec/target_page.h" ++#include ++#include "crypto/random.h" ++ ++int qemu_flush_urma_write(URMAContext *urma) ++{ ++ /* TODO */ ++ return -EINVAL; ++} ++ ++int qemu_urma_import(URMAContext *urma) ++{ ++ /* TODO */ ++ return -EINVAL; ++} ++ ++int qemu_exchange_urma_info(QEMUFile *f, URMAContext *urma, bool server) ++{ ++ /* TODO */ ++ return -EINVAL; ++} ++ ++void urma_start_outgoing_migration(void *opaque, ++ SocketAddress *saddr, ++ Error **errp) ++{ ++ /* TODO */ ++ return; ++} ++ ++void urma_start_incoming_migration(SocketAddress *saddr, ++ Error **errp) ++{ ++ /* TODO */ ++ return; ++} ++ ++void urma_migration_cleanup(void) ++{ ++ /* TODO */ ++ return; ++} ++ ++int urma_control_save_page(QEMUFile *f, ram_addr_t block_offset, ++ ram_addr_t offset, size_t size) ++{ ++ /* TODO */ ++ return RAM_SAVE_CONTROL_NOT_SUPP; ++} ++ ++void record_migration_log(MigrationState *s) ++{ ++ qemu_log("qmp urma resource initialization and connection cost time: %ld(ms)\n", s->urma_init_time); ++ qemu_log("qmp urma exchange info cost time: %ld(ms)\n", s->urma_exchange_time); ++ qemu_log("qmp ram registration cost time: %ld(ms)\n", s->ram_reg_time); ++ qemu_log("qmp device migration cost time: %ld(ms)\n", s->dev_mig_time); ++ qemu_log("qmp last memcpy cost time: %ld(ms)\n", s->last_memcpy_time); ++ qemu_log("qmp downtime %ld(ms)\n", s->downtime); ++ qemu_log("qmp setup time %ld(ms)\n", s->setup_time); ++ qemu_log("qmp total time %ld(ms)\n", s->total_time); ++} +diff --git a/migration/urma.h b/migration/urma.h +new file mode 100644 +index 0000000000..62cd8cb489 +--- /dev/null ++++ b/migration/urma.h +@@ -0,0 +1,154 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. ++ * ++ * Description: Support vm migration using the protocol and interfaces provided by the URMA component. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ * ++ */ ++ ++#ifndef QEMU_MIGRATION_URMA_H ++#define QEMU_MIGRATION_URMA_H ++ ++#include "qemu/sockets.h" ++#include "exec/memory.h" ++ ++#include ++#include ++ ++#define URMA_SO_PATH "liburma.so.0" ++#define URMA_TOKEN_LEN 32 ++ ++typedef struct QEMU_PACKED URMADestBlock { ++ uint64_t remote_host_addr; ++ uint64_t offset; ++ uint64_t length; ++ uint32_t remote_rkey; ++ uint32_t padding; ++} URMADestBlock; ++ ++typedef struct URMALocalBlock { ++ char *block_name; ++ uint8_t *local_host_addr; /* local virtual address */ ++ uint64_t remote_host_addr; /* remote virtual address */ ++ uint64_t offset; ++ uint64_t length; ++ int index; /* which block are we */ ++ unsigned int src_index; /* (Only used on dest) */ ++ bool is_ram_block; ++ int nb_chunks; ++ unsigned long *transit_bitmap; ++ unsigned long *unregister_bitmap; ++ ++ urma_target_seg_t *local_tseg; /* tseg for non-chunk-level registration */ ++ urma_token_t local_seg_token; ++ urma_seg_t remote_seg; /* remote seg for non-chunk-level registration */ ++ urma_token_t remote_seg_token; ++ urma_target_seg_t *import_tseg; /* Imported target segment for read/write/atomic */ ++} URMALocalBlock; ++ ++typedef struct URMALocalBlocks { ++ int nb_blocks; ++ bool init; /* main memory init complete */ ++ URMALocalBlock *block; ++} URMALocalBlocks; ++ ++ ++typedef struct URMAContext { ++ char *host; ++ int port; ++ int id; ++ ++ int is_incoming; ++ ++ /* number of outstanding writes */ ++ int nb_sent; ++ ++ /* number of polling writes */ ++ int nb_polling; ++ ++ /* store info about current buffer so that we can ++ merge it with future sends */ ++ uint64_t current_addr; ++ uint64_t current_length; ++ /* index of ram block the current buffer belongs to */ ++ int current_index; ++ /* index of the chunk in the current ram block */ ++ int current_chunk; ++ ++ bool pin_all; ++ ++ GHashTable *blockmap; ++ ++ /* ++ * Description of ram blocks used throughout the code. ++ */ ++ URMALocalBlocks local_ram_blocks; ++ ++ ++ URMADestBlock *dest_blocks; ++ ++ /* urma info */ ++ urma_context_t *urma_ctx; ++ urma_device_attr_t dev_attr; ++ ++ urma_jfce_t *jfce; ++ urma_jfc_t *jfc; ++ urma_jfs_t *jfs; ++ urma_jfr_t *jfr; ++ uint64_t rid; ++ urma_token_t jfr_token; ++ bool event_mode; ++ int max_jfs_depth; ++ ++ int client_sockfd; ++ int listen_fd; ++ ++ urma_jfr_id_t remote_jfr_id; ++ urma_token_t rjfr_token; ++ urma_target_jetty_t *tjfr; ++} URMAContext; ++ ++typedef struct seg_jfr_info_t { ++ /* Common */ ++ urma_eid_t eid; ++ uint32_t uasid; ++ /* segment */ ++ uint64_t seg_va; ++ uint64_t seg_len; ++ uint32_t seg_flag; ++ uint32_t seg_token_id; ++ urma_token_t seg_token; ++ /* jfr */ ++ urma_jfr_id_t jfr_id; ++ urma_token_t jfr_token; ++ ++ /* bond info */ ++ urma_bond_seg_info_out_t seg_bond_info; ++ urma_bond_id_info_out_t jfr_bond_info; ++} __attribute__((packed)) seg_jfr_info_t; ++ ++ ++void urma_start_outgoing_migration(void *opaque, SocketAddress *saddr, ++ Error **errp); ++void urma_start_incoming_migration(SocketAddress *saddr, Error **errp); ++int urma_control_save_page(QEMUFile *f, ram_addr_t block_offset, ++ ram_addr_t offset, size_t size); ++int qemu_flush_urma_write(URMAContext *urma); ++int qemu_exchange_urma_info(QEMUFile *f, URMAContext *urma, bool server); ++int qemu_urma_import(URMAContext *urma); ++void urma_migration_cleanup(void); ++void record_migration_log(MigrationState *s); ++ ++#endif +diff --git a/qapi/migration.json b/qapi/migration.json +index a22e6df695..12d9040620 100644 +--- a/qapi/migration.json ++++ b/qapi/migration.json +@@ -1766,12 +1766,16 @@ + # + # @rdma: Migrate via RDMA. + # ++# @urma: Migrate via URMA. ++# ++# @hcom: Migrate via URMA. ++# + # @file: Direct the migration stream to a file. + # + # Since 8.2 + ## + { 'enum': 'MigrationAddressType', +- 'data': [ 'socket', 'exec', 'rdma', 'file' ] } ++ 'data': [ 'socket', 'exec', 'rdma', 'file', 'urma', 'hcom' ] } + + ## + # @FileMigrationArgs: +@@ -1810,7 +1814,9 @@ + 'socket': 'SocketAddress', + 'exec': 'MigrationExecCommand', + 'rdma': 'InetSocketAddress', +- 'file': 'FileMigrationArgs' } } ++ 'file': 'FileMigrationArgs', ++ 'hcom': 'SocketAddress', ++ 'urma': 'SocketAddress' } } + + ## + # @MigrationChannelType: +diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh +index 3b5a146afd..fd3eecbb5e 100644 +--- a/scripts/meson-buildoptions.sh ++++ b/scripts/meson-buildoptions.sh +@@ -54,6 +54,8 @@ meson_options_help() { + printf "%s\n" ' --enable-tsan enable thread sanitizer' + printf "%s\n" ' --enable-ub enable unify bus' + printf "%s\n" ' --disable-ub disable unify bus' ++ printf "%s\n" ' --enable-urma-migration enable urma migration' ++ printf "%s\n" ' --disable-urma-migration disable urma migration' + printf "%s\n" ' --enable-ham-migration enable ham migration' + printf "%s\n" ' --disable-ham-migration disable ham migration' + printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-' +@@ -195,6 +197,7 @@ meson_options_help() { + printf "%s\n" ' u2f U2F emulation support' + printf "%s\n" ' ub unify bus support' + printf "%s\n" ' ham-migration live migration via memory semantics' ++ printf "%s\n" ' urma-migration live migration via urma protocol support' + printf "%s\n" ' usb-redir libusbredir support' + printf "%s\n" ' vde vde network backend support' + printf "%s\n" ' vdi vdi image format support' +@@ -523,6 +526,8 @@ _meson_option_parse() { + --disable-ub) printf "%s" -Dub=disabled ;; + --enable-ham-migration) printf "%s" -Dham_migration=enabled ;; + --disable-ham-migration) printf "%s" -Dham_migration=disabled ;; ++ --enable-urma-migration) printf "%s" -Durma_migration=enabled ;; ++ --disable-urma-migration) printf "%s" -Durma_migration=disabled ;; + --enable-usb-redir) printf "%s" -Dusb_redir=enabled ;; + --disable-usb-redir) printf "%s" -Dusb_redir=disabled ;; + --enable-vde) printf "%s" -Dvde=enabled ;; +diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c +index 83e84b1186..3ed8997753 100644 +--- a/util/qemu-sockets.c ++++ b/util/qemu-sockets.c +@@ -1122,12 +1122,20 @@ SocketAddress *socket_parse(const char *str, Error **errp) + if (inet_parse(&addr->u.inet, str + strlen("tcp:"), errp)) { + goto fail; + } ++#ifdef CONFIG_URMA_MIGTAION ++ } else if (strstart(str, "urma:", NULL) || strstart(str, "hcom:", NULL)) { ++ addr->type = SOCKET_ADDRESS_TYPE_INET; ++ if (inet_parse(&addr->u.inet, str + strlen("urma:"), errp)) { ++ goto fail; ++ } ++#endif + } else { + addr->type = SOCKET_ADDRESS_TYPE_INET; + if (inet_parse(&addr->u.inet, str, errp)) { + goto fail; + } + } ++ + return addr; + + fail: +-- +2.33.0 + diff --git a/backends-Add-support-of-one-guest-numa-node-alloc-me2.patch b/backends-Add-support-of-one-guest-numa-node-alloc-me2.patch new file mode 100644 index 0000000000000000000000000000000000000000..f4eecdcf0acc751b6b9daf608f3b4de9cc6e9a29 --- /dev/null +++ b/backends-Add-support-of-one-guest-numa-node-alloc-me2.patch @@ -0,0 +1,37 @@ +From feb293a058f98e45962097e254813b3fcccfa741 Mon Sep 17 00:00:00 2001 +From: leizongkun +Date: Thu, 20 Nov 2025 21:33:24 +0800 +Subject: [PATCH] backends: Add support of one guest numa node alloc memory + from multi host nodes + +Enable the function of providing QEMU with a more flexible memory NUMA binding approach, +allowing a guest NUMA node to allocate memory +to different host NUMA nodes according to specified proportions. + +Signed-off-by: leizongkun +--- + backends/hostmem.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/backends/hostmem.c b/backends/hostmem.c +index 9c5162760f..9d251cc0b7 100644 +--- a/backends/hostmem.c ++++ b/backends/hostmem.c +@@ -657,6 +657,14 @@ host_memory_backend_class_init(ObjectClass *oc, void *data) + NULL, NULL); + object_class_property_set_description(oc, "host-nodes", + "Binds memory to the list of NUMA host nodes"); ++#ifdef CONFIG_MBIND_PROPORTION ++ object_class_property_add(oc, "host-nodes-propertion", "str", ++ NULL, ++ host_memory_backend_set_propertion, ++ NULL, NULL); ++ object_class_property_set_description(oc, "host-nodes-propertion", ++ "Mark the memory bind to host node by propertion"); ++#endif + object_class_property_add_enum(oc, "policy", "HostMemPolicy", + &HostMemPolicy_lookup, + host_memory_backend_get_policy, +-- +2.33.0 + diff --git a/backends-fix-memory-leak-in-the-function-host_memory.patch b/backends-fix-memory-leak-in-the-function-host_memory.patch new file mode 100644 index 0000000000000000000000000000000000000000..a2313906b0a9b3e69a8e84d53a459cb7ad2eaeae --- /dev/null +++ b/backends-fix-memory-leak-in-the-function-host_memory.patch @@ -0,0 +1,40 @@ +From 67a32461251c8d42e05573758d0b8c9c55bc24ec Mon Sep 17 00:00:00 2001 +From: leizongkun +Date: Tue, 25 Nov 2025 11:18:37 +0800 +Subject: [PATCH] backends: fix memory leak in the function + host_memory_backend_memory_complete + +In the function of host_memory_backend_memory_complete, one mbind_by_proportions +failed, backend->propertion should be free + +Signed-off-by: wangzhigang +Signed-off-by: zhangliang +Signed-off-by: leizongkun +--- + backends/hostmem.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/backends/hostmem.c b/backends/hostmem.c +index 9d251cc0b7..925ce3cd7e 100644 +--- a/backends/hostmem.c ++++ b/backends/hostmem.c +@@ -393,7 +393,7 @@ static int mbind_by_proportions(void *ptr, const char *bind_proportions, uint64_ + long size_token; + DECLARE_BITMAP(tmp_host_nodes, MAX_NODES + 1) = {0}; + +- ptr = (void*)((char*)ptr + size); ++ ptr = (void*)((char *)ptr + size); + if (memcpy(prop, proportions[i], strlen(proportions[i]) + 1) == NULL) { + qemu_log("failed to copy propertion"); + return -1; +@@ -474,6 +474,7 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) + if (proportion != NULL) { + if (mbind_by_proportions(ptr, proportion, sz) < 0) { + error_setg(errp, "failed to mbind_by_proportions"); ++ free(backend->propertion); + return; + } + free(backend->propertion); +-- +2.33.0 + diff --git a/migration-CONFIG_HAM_MIGRATION-is-associated-with-th.patch b/migration-CONFIG_HAM_MIGRATION-is-associated-with-th.patch new file mode 100644 index 0000000000000000000000000000000000000000..bd4439b6eb613935e119691a49854cc4ffb2dab7 --- /dev/null +++ b/migration-CONFIG_HAM_MIGRATION-is-associated-with-th.patch @@ -0,0 +1,128 @@ +From a4577c03383902ef17c50e8e767d2d8a565084fc Mon Sep 17 00:00:00 2001 +From: lucas <1358497393@qq.com> +Date: Thu, 20 Nov 2025 11:30:34 +0800 +Subject: [PATCH 6/6] migration: CONFIG_HAM_MIGRATION is associated with the + aarch64 architecture. + +Signed-off-by: lujun +Reviewed-by: jindou +Reviewed-by: zhaobing +Reviewed-by: lizhilong +--- + meson.build | 12 ++++++++---- + migration/meson.build | 2 +- + qapi/migration.json | 21 ++++++++++++++------- + 3 files changed, 23 insertions(+), 12 deletions(-) + +diff --git a/meson.build b/meson.build +index 1a09fefc6b..b03869810f 100644 +--- a/meson.build ++++ b/meson.build +@@ -585,10 +585,14 @@ have_ub = get_option('ub') \ + .require(targetos == 'linux', error_message: 'UB is supported only on Linux') \ + .allowed() + +-# ham migration +-have_ham_migration = get_option('ham_migration') \ +- .require(targetos == 'linux', error_message: 'ham_migration is supported only on Linux') \ +- .allowed() ++if cpu in ['aarch64'] ++ # ham migration ++ have_ham_migration = get_option('ham_migration') \ ++ .require(targetos == 'linux', error_message: 'ham_migration is supported only on Linux') \ ++ .allowed() ++else ++ have_ham_migration = false ++endif + + # vhost + have_vhost_user = get_option('vhost_user') \ +diff --git a/migration/meson.build b/migration/meson.build +index 22e92bcedc..e8934218c5 100644 +--- a/migration/meson.build ++++ b/migration/meson.build +@@ -31,7 +31,7 @@ system_ss.add(files( + 'threadinfo.c', + ), gnutls) + +-if get_option('ham_migration').allowed() ++if have_ham_migration + system_ss.add(files('ham.c')) + endif + +diff --git a/qapi/migration.json b/qapi/migration.json +index c836bb3f85..a22e6df695 100644 +--- a/qapi/migration.json ++++ b/qapi/migration.json +@@ -2771,7 +2771,8 @@ + { 'struct': 'RamInfo', + 'data': { 'uuid': 'uint32', + 'hva': 'uint64', +-'size': 'size' } } ++'size': 'size' }, ++'if': { 'any': ['CONFIG_HAM_MIGRATION'] } } + + ## + # @VmInfo: +@@ -2786,7 +2787,8 @@ + ## + { 'struct': 'VmInfo', + 'data': { 'pid': 'int', +-'block': ['RamInfo'] } } ++'block': ['RamInfo'] }, ++'if': { 'any': ['CONFIG_HAM_MIGRATION'] } } + + ## + # @query-ramblock: +@@ -2801,7 +2803,8 @@ + # + # Since: 8.2 + ## +-{ 'command': 'query-ramblock', 'returns': 'VmInfo' } ++{ 'command': 'query-ramblock', 'returns': 'VmInfo', ++'if': { 'any': ['CONFIG_HAM_MIGRATION'] } } + + ## + # @NumaInfo: +@@ -2816,7 +2819,8 @@ + ## + { 'struct': 'NumaInfo', + 'data': { 'numa-id': 'uint32', +-'size': 'size' } } ++'size': 'size' }, ++'if': { 'any': ['CONFIG_HAM_MIGRATION'] } } + + ## + # @recv-rmtnuma: +@@ -2839,7 +2843,8 @@ + { 'command': 'recv-rmtnuma', + 'data': { 'pid': 'int', + 'scna': 'uint16', +-'block': ['NumaInfo'] } } ++'block': ['NumaInfo'] }, ++'if': { 'any': ['CONFIG_HAM_MIGRATION'] } } + + ## + # @rollback-pages: +@@ -2852,7 +2857,8 @@ + # + # Since: 8.2 + ## +-{ 'command': 'rollback-pages' } ++{ 'command': 'rollback-pages', ++'if': { 'any': ['CONFIG_HAM_MIGRATION'] } } + + ## + # @modify-pgtable: +@@ -2865,4 +2871,5 @@ + # + # Since: 8.2 + ## +-{ 'command': 'modify-pgtable' } +\ No newline at end of file ++{ 'command': 'modify-pgtable', ++'if': { 'any': ['CONFIG_HAM_MIGRATION'] } } +\ No newline at end of file +-- +2.33.0 + diff --git a/migration-ham-HAM-migration-memory-cancellation-dirt.patch b/migration-ham-HAM-migration-memory-cancellation-dirt.patch new file mode 100644 index 0000000000000000000000000000000000000000..b557b1363ce1791048ae7b8ddae1ca3af7400130 --- /dev/null +++ b/migration-ham-HAM-migration-memory-cancellation-dirt.patch @@ -0,0 +1,156 @@ +From 0b14d0e594adc2c8c8075acef421cff456864aa5 Mon Sep 17 00:00:00 2001 +From: lucas <1358497393@qq.com> +Date: Wed, 19 Nov 2025 20:28:08 +0800 +Subject: [PATCH 5/6] migration/ham: HAM migration memory cancellation dirty + page synchronization + +HAM migration does not require page dirtying; to improve performance and reduce downtime, related logic is skipped. + +Signed-off-by: lujun +Reviewed-by: jindou +Reviewed-by: zhaobing +Reviewed-by: lizhilong +--- + accel/kvm/kvm-all.c | 9 ++++++++- + migration/ham.c | 11 +++++++++++ + migration/ham.h | 2 ++ + migration/ram.c | 40 ++++++++++++++++++++++++++++++++++++++-- + 4 files changed, 59 insertions(+), 3 deletions(-) + +diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c +index 8fb3f2eee7..94634a1804 100644 +--- a/accel/kvm/kvm-all.c ++++ b/accel/kvm/kvm-all.c +@@ -53,7 +53,9 @@ + #include "sysemu/stats.h" + + #include "sysemu/kvm.h" +- ++#ifdef CONFIG_HAM_MIGRATION ++#include "migration/ham.h" ++#endif + /* This check must be after config-host.h is included */ + #ifdef CONFIG_EVENTFD + #include +@@ -897,6 +899,11 @@ static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, + /* We don't have a slot if we want to trap every access. */ + return; + } ++#ifdef CONFIG_HAM_MIGRATION ++ if (ham_should_skip_dirty_log(mem)) { ++ return; ++ } ++#endif + if (kvm_slot_get_dirty_log(s, mem)) { + kvm_slot_sync_dirty_pages(mem); + } +diff --git a/migration/ham.c b/migration/ham.c +index 3e90091834..b2c66ce098 100644 +--- a/migration/ham.c ++++ b/migration/ham.c +@@ -281,6 +281,17 @@ bool ham_should_complete_migration(MigrationState *s) + return migrate_use_ldst() && s->iteration_num >= 1; + } + ++bool ham_should_skip_dirty_log(KVMSlot *mem) ++{ ++ /* ++ * In QEMU's memory layout, VIRT_MEM ususally starts at 1 GiB. ++ * Since the HAM mode doesn't require dirty marking, this ++ * function avoids unnecessarily tracking dirty pages in ++ * memory regions, thereby improving performance. ++ */ ++ return migrate_use_ldst() && mem->start_addr >= GiB; ++} ++ + static void free_ram_info_list(RamInfoList *list) + { + RamInfoList *tmp; +diff --git a/migration/ham.h b/migration/ham.h +index 5a815a61f6..e732607706 100644 +--- a/migration/ham.h ++++ b/migration/ham.h +@@ -78,6 +78,8 @@ void ham_madvise_page(void); + + bool ham_should_complete_migration(MigrationState *s); + ++bool ham_should_skip_dirty_log(KVMSlot *mem); ++ + #ifdef __cplusplus + } + #endif +diff --git a/migration/ram.c b/migration/ram.c +index 609c85f7a7..f8623153cf 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -1060,6 +1060,24 @@ static void migration_trigger_throttle(RAMState *rs) + } + } + ++#ifdef CONFIG_HAM_MIGRATION ++static bool ham_sync_vm_ram_bitmap(RAMState *rs, RAMBlock *block) ++{ ++ /* ++ * In HAM migration mode, QEMU dirty page synchronization is not ++ * performed for memory blocks; instead, a page is marked as dirty ++ * and the process proceeds directly to the HAM migration stage. ++ */ ++ if (!migrate_use_ldst() || !ham_is_vm_ram(block->page_size)) { ++ return false; ++ } ++ unsigned long *bmap = block->bmap; ++ memset(bmap, 0, sizeof(unsigned long)); ++ bmap[0] = ~0UL; ++ return true; ++} ++#endif ++ + static void migration_bitmap_sync(RAMState *rs, bool last_stage) + { + RAMBlock *block; +@@ -1077,6 +1095,11 @@ static void migration_bitmap_sync(RAMState *rs, bool last_stage) + qemu_mutex_lock(&rs->bitmap_mutex); + WITH_RCU_READ_LOCK_GUARD() { + RAMBLOCK_FOREACH_NOT_IGNORED(block) { ++#ifdef CONFIG_HAM_MIGRATION ++ if (ham_sync_vm_ram_bitmap(rs, block)) { ++ continue; ++ } ++#endif + ramblock_sync_dirty_bitmap(rs, block); + } + stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining()); +@@ -1468,7 +1491,12 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) + { + /* Update pss->page for the next dirty bit in ramblock */ + pss_find_next_dirty(pss); +- ++#ifdef CONFIG_HAM_MIGRATION ++ /* In HAM migration mode, only one round of dirty page traversal is needed */ ++ if (migrate_use_ldst() && pss->complete_round) { ++ return PAGE_ALL_CLEAN; ++ } ++#endif + if (pss->complete_round && pss->block == rs->last_seen_block && + pss->page >= rs->last_page) { + /* +@@ -3577,7 +3605,15 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) + if (migrate_postcopy_ram()) { + compress_flush_data(); + } +- ++#ifdef CONFIG_HAM_MIGRATION ++ /* ++ * In HAM migration mode, there is no need to check for timeouts; ++ * the process can be completed in one iteration. ++ */ ++ if (migrate_use_ldst()) { ++ continue; ++ } ++#endif + /* + * we want to check in the 1st loop, just in case it was the 1st + * time and we had to sync the dirty bitmap. +-- +2.33.0 + diff --git a/migration-ham-a-new-approach-to-vm-live-migration-de.patch b/migration-ham-a-new-approach-to-vm-live-migration-de.patch new file mode 100644 index 0000000000000000000000000000000000000000..cc85c11a910fd947685347a4c9dbf59e604956a1 --- /dev/null +++ b/migration-ham-a-new-approach-to-vm-live-migration-de.patch @@ -0,0 +1,689 @@ +From 0326fac9c49bbe93da71127485b8cedc735c5257 Mon Sep 17 00:00:00 2001 +From: lucas <1358497393@qq.com> +Date: Wed, 19 Nov 2025 19:50:37 +0800 +Subject: [PATCH 2/6] migration/ham: a new approach to vm live migration: + deterministic migration + +HAM (High-Availability Migration) is a migration component, that enables the migration of vm under high load. + +Key features: +- Using the neighbor node feature, borrow the destination virtual machine's memmory to the source end. +- Uses the kernel's 'migrate_pages' interface for efficient page migration. +- No need to mark pages as dirty, achieving deterministic migration. + +Interface, For detailed information, please visit https://gitee.com/openeuler/ham/blob/master/doc/Developer_Guide.md: + /** + * Migration preprocessing. + * Mainly configure huge tables, establish mapping relationships between the src and dst pages + * and allocate new folios for migration. + */ + int32_t ubturbo_ham_register(HamRamInfo *src, HamNumaInfo *dst); + + /** + * Migration processing. + * @ramList and @ramNum are deprecated. + */ + int32_t ubturbo_ham_migrate(HamRamPages *ramList, size_t ramNum, int32_t step); + + /** + * Migration processing. + * Currently used only at the destination end for restoring pgtable attributes. + */ + int32_t ubturbo_ham_pgtable_modify(bool cacheable); + + /** + * Migration postprocessing. + * Mainly clean temporary resources, such as close the HAM device... + */ + void ubturbo_ham_unregister(void); + + /** + * Migration failed. + * Provides resource cleanup interfaces to libvirt + * (including page rollback or management resource cleanup). + */ + int32_t ubturbo_ham_rollback(pid_t srcPid); + + /** + * Used to set a custom log printing function with the same type as the function defined above + * @param logFunc + */ + typedef void (*ExternalLog)(int level, const char *funcname, int linenr, const char *logBuf); + +Usage: +1. Configure the environment according to the HAM user manual: http://gitee.com/openeuler/ham/blob/master/doc/User_Guide.md +2. Add the '--ldst' flag to enable HAM-based migration + +Example: +virsh migrate $vm_name --live qemu+tcp://$ip/system tcp://$ip/system --verbose --unsafe --p2p --ldst + +Signed-off-by: lujun +Reviewed-by: jindou +Reviewed-by: zhaobing +Reviewed-by: lizhilong +--- + Kconfig.host | 3 + + meson.build | 8 + + meson_options.txt | 3 + + migration/ham.c | 273 ++++++++++++++++++++++++++++++++++ + migration/ham.h | 82 ++++++++++ + migration/meson.build | 4 + + migration/migration.c | 11 +- + migration/ram.c | 35 ++++- + migration/savevm.c | 8 + + scripts/meson-buildoptions.sh | 5 + + 10 files changed, 427 insertions(+), 5 deletions(-) + create mode 100644 migration/ham.c + create mode 100644 migration/ham.h + +diff --git a/Kconfig.host b/Kconfig.host +index e124f33231..f60ea6cef9 100644 +--- a/Kconfig.host ++++ b/Kconfig.host +@@ -56,3 +56,6 @@ config HV_BALLOON_POSSIBLE + + config UB + bool ++ ++config HAM_MIGRATION ++ bool +\ No newline at end of file +diff --git a/meson.build b/meson.build +index 1dcfb794fa..1a09fefc6b 100644 +--- a/meson.build ++++ b/meson.build +@@ -585,6 +585,11 @@ have_ub = get_option('ub') \ + .require(targetos == 'linux', error_message: 'UB is supported only on Linux') \ + .allowed() + ++# ham migration ++have_ham_migration = get_option('ham_migration') \ ++ .require(targetos == 'linux', error_message: 'ham_migration is supported only on Linux') \ ++ .allowed() ++ + # vhost + have_vhost_user = get_option('vhost_user') \ + .disable_auto_if(targetos != 'linux') \ +@@ -2293,6 +2298,7 @@ config_host_data.set('CONFIG_VHOST_KERNEL', have_vhost_kernel) + config_host_data.set('CONFIG_VHOST_USER', have_vhost_user) + config_host_data.set('CONFIG_VHOST_CRYPTO', have_vhost_user_crypto) + config_host_data.set('CONFIG_UB', have_ub) ++config_host_data.set('CONFIG_HAM_MIGRATION', have_ham_migration) + config_host_data.set('CONFIG_VHOST_VDPA', have_vhost_vdpa) + config_host_data.set('CONFIG_VMNET', vmnet.found()) + config_host_data.set('CONFIG_VHOST_USER_BLK_SERVER', have_vhost_user_blk_server) +@@ -3006,6 +3012,7 @@ host_kconfig = \ + (opengl.found() ? ['CONFIG_OPENGL=y'] : []) + \ + (x11.found() ? ['CONFIG_X11=y'] : []) + \ + (have_ub ? ['CONFIG_UB=y'] : []) + \ ++ (have_ham_migration ? ['CONFIG_HAM_MIGRATION=y'] : []) + \ + (have_vhost_user ? ['CONFIG_VHOST_USER=y'] : []) + \ + (have_vhost_vdpa ? ['CONFIG_VHOST_VDPA=y'] : []) + \ + (have_vhost_kernel ? ['CONFIG_VHOST_KERNEL=y'] : []) + \ +@@ -4236,6 +4243,7 @@ summary_info += {'D-Bus display': dbus_display} + summary_info += {'QOM debugging': get_option('qom_cast_debug')} + summary_info += {'Relocatable install': get_option('relocatable')} + summary_info += {'ub support': have_ub} ++summary_info += {'ham migration support': have_ham_migration} + summary_info += {'vhost-kernel support': have_vhost_kernel} + summary_info += {'vhost-net support': have_vhost_net} + summary_info += {'vhost-user support': have_vhost_user} +diff --git a/meson_options.txt b/meson_options.txt +index 6152543e5d..ea83306b8a 100644 +--- a/meson_options.txt ++++ b/meson_options.txt +@@ -294,6 +294,9 @@ option('sndio', type: 'feature', value: 'auto', + option('ub', type: 'feature', value: 'auto', + description: 'unify bus support') + ++option('ham_migration', type: 'feature', value: 'auto', ++ description: 'live migration via memory semantics') ++ + option('vhost_kernel', type: 'feature', value: 'auto', + description: 'vhost kernel backend support') + option('vhost_net', type: 'feature', value: 'auto', +diff --git a/migration/ham.c b/migration/ham.c +new file mode 100644 +index 0000000000..d7fd7f0134 +--- /dev/null ++++ b/migration/ham.c +@@ -0,0 +1,273 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Description: HAM: Migrate Operations ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu/cutils.h" ++#include "qemu/log.h" ++#include "qemu/units.h" ++#include "qapi/error.h" ++#include "migration.h" ++#include "dlfcn.h" ++#include "ram.h" ++#include "qapi/qapi-commands-migration.h" ++#include "exec/ramblock.h" ++#include "options.h" ++#include "qemu-file.h" ++#include "ham.h" ++ ++#define HAM_LIB_PATH "libham.so" ++ ++static void *handle_ham = NULL; ++ ++static ham_migration_ops ham_migration_ops_instance; ++ ++typedef struct dl_funcs { ++ const char *func_name; ++ void **func; ++} dl_funcs; ++ ++static dl_funcs ham_dlfunc_list[] = { ++ {.func_name = "ubturbo_ham_external_log_set", .func = (void**)&ham_migration_ops_instance.external_log_set}, ++ {.func_name = "ubturbo_ham_register", .func = (void**)&ham_migration_ops_instance.ham_register}, ++ {.func_name = "ubturbo_ham_migrate", .func = (void**)&ham_migration_ops_instance.migrate}, ++ {.func_name = "ubturbo_ham_pgtable_modify", .func = (void**)&ham_migration_ops_instance.pgtable_modify}, ++ {.func_name = "ubturbo_ham_unregister", .func = (void**)&ham_migration_ops_instance.ham_unregister}, ++ {.func_name = "ubturbo_ham_rollback", .func = (void**)&ham_migration_ops_instance.rollback}, ++}; ++ ++const char *log_level_str[] = { ++ "DEBUG", ++ "INFO", ++ "WARN", ++ "ERROR" ++}; ++ ++static int32_t g_migrate_round = 0; ++static HamNumaInfo g_dst_numa = { .num = 0 }; ++static HamRamInfo g_src_ram = { .num = 0 }; ++ ++bool ham_is_vm_ram(size_t page_size) ++{ ++ return page_size == PAGE_SIZE_2M; ++} ++ ++static void ham_dlfunc_list_set_null(void) ++{ ++ int num = sizeof(ham_dlfunc_list) / sizeof(ham_dlfunc_list[0]); ++ for (int i = 0; i < num; i++) { ++ *ham_dlfunc_list[i].func = NULL; ++ } ++} ++ ++static void ham_dlfunc_close(void) ++{ ++ if (handle_ham) { ++ (void)dlclose(handle_ham); ++ handle_ham = NULL; ++ } ++ ham_dlfunc_list_set_null(); ++} ++ ++static int ham_dlfunc_open(void) ++{ ++ char *error = NULL; ++ int num = sizeof(ham_dlfunc_list) / sizeof(ham_dlfunc_list[0]); ++ ++ ham_dlfunc_list_set_null(); ++ handle_ham = dlopen(HAM_LIB_PATH, RTLD_LAZY); ++ if (!handle_ham) { ++ qemu_log("HAM: dlopen error: %s\n", dlerror()); ++ return -1; ++ } ++ ++ for (size_t i = 0; i < num; i++) { ++ *ham_dlfunc_list[i].func = dlsym(handle_ham, ham_dlfunc_list[i].func_name); ++ if ((error = dlerror()) != NULL) { ++ qemu_log("HAM: dlsym error: %s while getting %s\n", error, ham_dlfunc_list[i].func_name); ++ ham_dlfunc_close(); ++ return -1; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ham_dlfunc_init(void) ++{ ++ int ret; ++ ++ ret = ham_dlfunc_open(); ++ if (ret < 0) { ++ qemu_log("HAM: open ham dlfunc failed\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void ham_external_log(int level, const char *funcname, int linenr, const char *logBuf) ++{ ++ if (level >= HAM_LOG_DEBUG && level <= HAM_LOG_ERROR) { ++ qemu_log("[%s][%s:%d]:%s", log_level_str[level], funcname, linenr, logBuf); ++ } else { ++ qemu_log("[UNKNOWN][%s:%d]:%s", funcname, linenr, logBuf); ++ } ++} ++ ++static int ham_prepare(HamRamInfo *src) ++{ ++ int ret; ++ ++ ret = ham_dlfunc_init(); ++ if (ret) { ++ qemu_log("HAM: dlfunc init fail, ret:%d\n", ret); ++ return ret; ++ } ++ ++ g_migrate_round = 0; ++ ham_migration_ops_instance.external_log_set(ham_external_log); ++ ++ ret = ham_migration_ops_instance.ham_register(src, &g_dst_numa); ++ if (ret) { ++ qemu_log("HAM: start migration fail, ret:%d\n", ret); ++ return ret; ++ } ++ return 0; ++} ++ ++int ham_pages_commit(void) ++{ ++ int ret = ham_migration_ops_instance.migrate(NULL, 0, g_migrate_round); ++ if (ret != 0) { ++ qemu_log("HAM: page migration failed, ret:%d\n", ret); ++ return ret; ++ } ++ ++ g_migrate_round++; ++ return 0; ++} ++ ++static int ham_pages_rollback(void) ++{ ++ pid_t pid = getpid(); ++ int ret = ham_dlfunc_init(); ++ if (ret) { ++ return ret; ++ } ++ ++ ret = ham_migration_ops_instance.rollback(pid); ++ ham_dlfunc_close(); ++ return ret; ++} ++ ++static int ham_init_ram_blocks(HamRamInfo *ram_info) ++{ ++ RAMBlock *ram_block = NULL; ++ uint32_t uuid = 0; ++ ++ ram_info->pid = getpid(); ++ ram_info->num = 0; ++ WITH_RCU_READ_LOCK_GUARD() { ++ RAMBLOCK_FOREACH_MIGRATABLE(ram_block) { ++ if (!ham_is_vm_ram(ram_block->page_size)) { ++ continue; ++ } ++ if (uuid >= BATCH_NUM) { ++ qemu_log("HAM: ram block num exceeds, limit:%u\n", BATCH_NUM); ++ return -E2BIG; ++ } ++ ram_info->blockList[ram_info->num].uuid = uuid++; ++ ram_info->blockList[ram_info->num].hva = (uintptr_t)ram_block->host; ++ ram_info->blockList[ram_info->num].size = ram_block->used_length; ++ ram_info->num++; ++ } ++ } ++ return 0; ++} ++ ++void ham_migrate_prepare(MigrationState *s) ++{ ++ Error *err = NULL; ++ int ret; ++ ++ if (!migrate_use_ldst()) { ++ return; ++ } ++ ++ ret = ham_init_ram_blocks(&g_src_ram); ++ if (ret) { ++ error_setg(&err, "init ram block fail, ret:%d", ret); ++ goto fail; ++ } ++ ++ ret = ham_prepare(&g_src_ram); ++ if (ret) { ++ error_setg(&err, "migrate ham prepare fail, ret:%d", ret); ++ goto fail; ++ } ++ return; ++ ++fail: ++ migrate_set_error(s, err); ++ error_report_err(err); ++ qemu_file_set_error(s->to_dst_file, ret); ++} ++ ++void ham_migrate_cleanup(void) ++{ ++ if (!migrate_use_ldst()) { ++ return; ++ } ++ ++ ham_migration_ops_instance.ham_unregister(); ++ ham_dlfunc_close(); ++} ++ ++void ham_madvise_page(void) ++{ ++ RAMBlock *ram_block; ++ int64_t start, end; ++ ++ start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); ++ WITH_RCU_READ_LOCK_GUARD() { ++ RAMBLOCK_FOREACH_MIGRATABLE(ram_block) { ++ if (!ham_is_vm_ram(ram_block->page_size)) { ++ continue; ++ } ++ madvise(ram_block->host, ram_block->used_length, MADV_POPULATE_WRITE); ++ } ++ } ++ end = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); ++ qemu_log("HAM: madvise cost time:%ld ms\n", end - start); ++} ++ ++static int ham_modify_pgtable(void) ++{ ++ HamRamInfo ramInfo = { .num = 0 }; ++ ++ int ret = ham_dlfunc_init(); ++ if (ret) { ++ return ret; ++ } ++ ret = ham_init_ram_blocks(&ramInfo); ++ if (ret) { ++ goto close_dlfunc; ++ } ++ ret = ham_migration_ops_instance.ham_register(&ramInfo, NULL); ++ if (ret) { ++ qemu_log("HAM: start migration fail, ret:%d\n", ret); ++ goto stop_mig; ++ } ++ ret = ham_migration_ops_instance.pgtable_modify(true); ++ if (ret) { ++ qemu_log("HAM: modify pgtable fail, ret:%d\n", ret); ++ } ++ ++stop_mig: ++ ham_migration_ops_instance.ham_unregister(); ++close_dlfunc: ++ ham_dlfunc_close(); ++ return ret; ++} +\ No newline at end of file +diff --git a/migration/ham.h b/migration/ham.h +new file mode 100644 +index 0000000000..e1df3cc52f +--- /dev/null ++++ b/migration/ham.h +@@ -0,0 +1,82 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * Description: HAM: Migrate Operations ++ */ ++ ++#ifndef HAM_H ++#define HAM_H ++ ++#include ++#include ++#include ++#include ++#include "sysemu/kvm_int.h" ++#include "qapi/qapi-types-migration.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#define PAGE_SIZE_2M (INT64_C(1) << 21) ++#define BATCH_NUM 4 ++ ++typedef enum { ++ HAM_LOG_DEBUG = 0, ++ HAM_LOG_ERROR = 3, ++} HamLogLevel; ++ ++typedef struct { ++ uint32_t uuid; ++ uintptr_t hva; ++ size_t size; ++} HamRamBlock; ++ ++typedef struct { ++ pid_t pid; ++ uint16_t scna; ++ uint32_t num; ++ HamRamBlock blockList[BATCH_NUM]; ++} HamRamInfo; ++ ++typedef struct { ++ uint32_t numaId; ++ size_t size; ++} HamNuma; ++ ++typedef struct { ++ pid_t pid; ++ uint32_t num; ++ HamNuma numaList[BATCH_NUM]; ++} HamNumaInfo; ++ ++typedef struct { ++ int32_t uuid; ++ size_t hvaNum; ++ uintptr_t *hvaList; ++} HamRamPages; ++ ++typedef void (*ExternalLog)(int level, const char *funcname, int linenr, const char *logBuf); ++ ++typedef struct ham_migration_ops { ++ void (*external_log_set)(ExternalLog logFunc); ++ int32_t (*ham_register)(HamRamInfo *src, HamNumaInfo *dst); ++ int32_t (*migrate)(HamRamPages *ramList, size_t ram_num, int32_t step); ++ void (*ham_unregister)(void); ++ int32_t (*rollback)(pid_t pid); ++ int32_t (*pgtable_modify)(bool cacheable); ++} ham_migration_ops; ++ ++int ham_pages_commit(void); ++ ++void ham_migrate_prepare(MigrationState *s); ++ ++void ham_migrate_cleanup(void); ++ ++bool ham_is_vm_ram(size_t page_size); ++ ++void ham_madvise_page(void); ++ ++#ifdef __cplusplus ++} ++#endif ++#endif +diff --git a/migration/meson.build b/migration/meson.build +index aba2581705..22e92bcedc 100644 +--- a/migration/meson.build ++++ b/migration/meson.build +@@ -31,6 +31,10 @@ system_ss.add(files( + 'threadinfo.c', + ), gnutls) + ++if get_option('ham_migration').allowed() ++ system_ss.add(files('ham.c')) ++endif ++ + if get_option('replication').allowed() + system_ss.add(files('colo-failover.c', 'colo.c')) + endif +diff --git a/migration/migration.c b/migration/migration.c +index 91b2267c3f..9feaa17a97 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -71,7 +71,9 @@ + #include "qemu/log-for-trace.h" + #include "sysemu/kvm.h" + #endif +- ++#ifdef CONFIG_HAM_MIGRATION ++#include "ham.h" ++#endif + #define DEFAULT_FD_MAX 4096 + + static NotifierList migration_state_notifiers = +@@ -3364,7 +3366,9 @@ static void *migration_thread(void *opaque) + qemu_mutex_lock_iothread(); + qemu_savevm_state_header(s->to_dst_file); + qemu_mutex_unlock_iothread(); +- ++#ifdef CONFIG_HAM_MIGRATION ++ ham_migrate_prepare(s); ++#endif + /* + * If we opened the return path, we need to make sure dst has it + * opened as well. +@@ -3438,6 +3442,9 @@ out: + object_unref(OBJECT(s)); + rcu_unregister_thread(); + migration_threads_remove(thread); ++#ifdef CONFIG_HAM_MIGRATION ++ ham_migrate_cleanup(); ++#endif + return NULL; + } + +diff --git a/migration/ram.c b/migration/ram.c +index b46de7cd6d..609c85f7a7 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -65,7 +65,9 @@ + #include "sysemu/dirtylimit.h" + #include "sysemu/kvm.h" + #include "exec/confidential-guest-support.h" +- ++#ifdef CONFIG_HAM_MIGRATION ++#include "ham.h" ++#endif + /* Defines RAM_SAVE_ENCRYPTED_PAGE and RAM_SAVE_SHARED_REGION_LIST */ + #include "target/i386/sev.h" + #include "target/i386/csv.h" +@@ -2591,6 +2593,23 @@ static int ram_save_csv3_pages(RAMState *rs, PageSearchStatus *pss) + return pages; + } + ++#ifdef CONFIG_HAM_MIGRATION ++static int ram_ham_migrate_page(PageSearchStatus *pss) ++{ ++ int pages = 0; ++ int ret; ++ ++ pss->page += (pss->block->used_length >> TARGET_PAGE_BITS); ++ pages += (pss->block->used_length >> TARGET_PAGE_BITS); ++ ret = ham_pages_commit(); ++ if (ret) { ++ return ret; ++ } ++ ++ return pages; ++} ++#endif ++ + /** + * ram_save_host_page: save a whole host page + * +@@ -2643,7 +2662,15 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) + + /* Update host page boundary information */ + pss_host_page_prepare(pss); +- ++#ifdef CONFIG_HAM_MIGRATION ++ if (migrate_use_ldst() && ham_is_vm_ram(pss->block->page_size)) { ++ pages = ram_ham_migrate_page(pss); ++ if (pages < 0) { ++ return pages; ++ } ++ goto completed; ++ } ++#endif + do { + page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); + +@@ -2682,7 +2709,9 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) + + pss_find_next_dirty(pss); + } while (pss_within_range(pss)); +- ++#ifdef CONFIG_HAM_MIGRATION ++completed: ++#endif + pss_host_page_finish(pss); + + res = ram_save_release_protection(rs, pss, start_page); +diff --git a/migration/savevm.c b/migration/savevm.c +index bde05eebe4..be3bfc1078 100644 +--- a/migration/savevm.c ++++ b/migration/savevm.c +@@ -70,6 +70,9 @@ + #include "yank_functions.h" + #include "sysemu/qtest.h" + #include "options.h" ++#ifdef CONFIG_HAM_MIGRATION ++#include "ham.h" ++#endif + + const unsigned int postcopy_ram_discard_version; + +@@ -2766,6 +2769,11 @@ static int qemu_loadvm_state_setup(QEMUFile *f) + return ret; + } + } ++#ifdef CONFIG_HAM_MIGRATION ++ if (migrate_use_ldst()) { ++ ham_madvise_page(); ++ } ++#endif + return 0; + } + +diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh +index 7f8f043039..3b5a146afd 100644 +--- a/scripts/meson-buildoptions.sh ++++ b/scripts/meson-buildoptions.sh +@@ -54,6 +54,8 @@ meson_options_help() { + printf "%s\n" ' --enable-tsan enable thread sanitizer' + printf "%s\n" ' --enable-ub enable unify bus' + printf "%s\n" ' --disable-ub disable unify bus' ++ printf "%s\n" ' --enable-ham-migration enable ham migration' ++ printf "%s\n" ' --disable-ham-migration disable ham migration' + printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-' + printf "%s\n" ' firmware]' + printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler' +@@ -192,6 +194,7 @@ meson_options_help() { + printf "%s\n" ' tpm TPM support' + printf "%s\n" ' u2f U2F emulation support' + printf "%s\n" ' ub unify bus support' ++ printf "%s\n" ' ham-migration live migration via memory semantics' + printf "%s\n" ' usb-redir libusbredir support' + printf "%s\n" ' vde vde network backend support' + printf "%s\n" ' vdi vdi image format support' +@@ -518,6 +521,8 @@ _meson_option_parse() { + --disable-u2f) printf "%s" -Du2f=disabled ;; + --enable-ub) printf "%s" -Dub=enabled ;; + --disable-ub) printf "%s" -Dub=disabled ;; ++ --enable-ham-migration) printf "%s" -Dham_migration=enabled ;; ++ --disable-ham-migration) printf "%s" -Dham_migration=disabled ;; + --enable-usb-redir) printf "%s" -Dusb_redir=enabled ;; + --disable-usb-redir) printf "%s" -Dusb_redir=disabled ;; + --enable-vde) printf "%s" -Dvde=enabled ;; +-- +2.33.0 + diff --git a/migration-ham-add-deterministic-migration-informatio.patch b/migration-ham-add-deterministic-migration-informatio.patch new file mode 100644 index 0000000000000000000000000000000000000000..9227be42306f2d09b69c998959451546bfb3b755 --- /dev/null +++ b/migration-ham-add-deterministic-migration-informatio.patch @@ -0,0 +1,227 @@ +From ec1cc2039e6b3412cf311c8919288175bf9114eb Mon Sep 17 00:00:00 2001 +From: lucas <1358497393@qq.com> +Date: Wed, 19 Nov 2025 19:59:44 +0800 +Subject: [PATCH 3/6] migration/ham: add deterministic migration information + transmission QMP interface + +Add the following QMP interface: +qmp_query_ramblock: Query virtual machine memory information so that libvirt can initiate memory borrowing later. +qmp_recv_rmtnuma: Receive NUMA information for borrowed memory coming online. +qmp_rollback_pages: If migration fails, roll back the migrated pages. +qmp_modify_pgtable: Modify page table attributes + +Signed-off-by: lujun +Reviewed-by: jindou +Reviewed-by: zhaobing +Reviewed-by: lizhilong +--- + migration/ham.c | 72 ++++++++++++++++++++++++++++ + qapi/migration.json | 112 ++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 184 insertions(+) + +diff --git a/migration/ham.c b/migration/ham.c +index d7fd7f0134..be487892da 100644 +--- a/migration/ham.c ++++ b/migration/ham.c +@@ -270,4 +270,76 @@ stop_mig: + close_dlfunc: + ham_dlfunc_close(); + return ret; ++} ++ ++static void free_ram_info_list(RamInfoList *list) ++{ ++ RamInfoList *tmp; ++ for (tmp = list; tmp; tmp = tmp->next) { ++ g_free(tmp->value); ++ } ++} ++ ++struct VmInfo *qmp_query_ramblock(Error **errp) ++{ ++ RAMBlock *ram_block = NULL; ++ VmInfo *vm_info = g_new0(VmInfo, 1); ++ RamInfo *info = NULL; ++ RamInfoList *head = NULL, **tail = &head; ++ uint32_t uuid = 0; ++ WITH_RCU_READ_LOCK_GUARD() { ++ RAMBLOCK_FOREACH_MIGRATABLE(ram_block) { ++ if (!ham_is_vm_ram(ram_block->page_size)) { ++ continue; ++ } ++ if (uuid >= BATCH_NUM) { ++ free_ram_info_list(head); ++ g_free(vm_info); ++ error_setg(errp, "Ram block num exceeds, limit:%u", BATCH_NUM); ++ return NULL; ++ } ++ info = g_malloc0(sizeof(*info)); ++ info->uuid = uuid++; ++ info->hva = (uintptr_t)ram_block->host; ++ info->size = ram_block->used_length; ++ qemu_log("HAM: uuid:%d size:%lu\n", info->uuid, info->size); ++ QAPI_LIST_APPEND(tail, info); ++ } ++ } ++ vm_info->pid = getpid(); ++ vm_info->block = head; ++ qemu_log("HAM: pid = %ld\n", vm_info->pid); ++ return vm_info; ++} ++ ++void qmp_recv_rmtnuma(int64_t pid, uint16_t scna, NumaInfoList *numa_info, Error **errp) ++{ ++ int num = 0; ++ g_dst_numa.pid = pid; ++ g_src_ram.scna = scna; ++ while (numa_info != NULL) { ++ g_dst_numa.numaList[num].numaId = numa_info->value->numa_id; ++ g_dst_numa.numaList[num].size = numa_info->value->size; ++ num++; ++ numa_info = numa_info->next; ++ } ++ g_dst_numa.num = num; ++} ++ ++void qmp_rollback_pages(Error **errp) ++{ ++ if (ham_pages_rollback()) { ++ error_setg(errp, "rollback pages failed"); ++ return; ++ } ++ qemu_log("HAM: completed rollback pages\n"); ++} ++ ++void qmp_modify_pgtable(Error **errp) ++{ ++ if (ham_modify_pgtable()) { ++ error_setg(errp, "modify pgtable failed"); ++ return; ++ } ++ qemu_log("HAM: completed modify pgtable\n"); + } +\ No newline at end of file +diff --git a/qapi/migration.json b/qapi/migration.json +index 4ff473b680..c836bb3f85 100644 +--- a/qapi/migration.json ++++ b/qapi/migration.json +@@ -2754,3 +2754,115 @@ + 'data': { 'job-id': 'str', + 'tag': 'str', + 'devices': ['str'] } } ++ ++## ++# @RamInfo: ++# ++# Specifies the memory address segment of the VM. ++# ++# @uuid: indicates the NUMA index of the VM. ++# ++# @hva: indicates the start address of each NUMA segment of a VM. ++# ++# @size: size of each NUMA segment on a VM. ++# ++# Since: 8.2 ++## ++{ 'struct': 'RamInfo', ++'data': { 'uuid': 'uint32', ++'hva': 'uint64', ++'size': 'size' } } ++ ++## ++# @VmInfo: ++# ++# Basic VM information. ++# ++# @pid: pid of the VM. ++# ++# @block: specifies the memory address segment of the VM. ++# ++# Since: 8.2 ++## ++{ 'struct': 'VmInfo', ++'data': { 'pid': 'int', ++'block': ['RamInfo'] } } ++ ++## ++# @query-ramblock: ++# ++# Returns info of RAM blocks in the VM. ++# ++# Example: ++# -> {"execute": "query-ramblock"} ++# <- {"return": {"block": [{"hva": 281470478319616, "uuid": 0, "size": 1073741824}, ++# {"hva": 281469402480640, "uuid": 1, "size": 1073741824}], ++# "pid": 25643}} ++# ++# Since: 8.2 ++## ++{ 'command': 'query-ramblock', 'returns': 'VmInfo' } ++ ++## ++# @NumaInfo: ++# ++# NUMA information reported by the remote memory. ++# ++# @numa-id: NUMA id reported by the remote memory. ++# ++# @size: NUMA size reported by the remote memory. ++# ++# Since: 8.2 ++## ++{ 'struct': 'NumaInfo', ++'data': { 'numa-id': 'uint32', ++'size': 'size' } } ++ ++## ++# @recv-rmtnuma: ++# ++# Returns info of RAM blocks in the VM. ++# ++# Example: ++# -> { "execute": "recv-rmtnuma", ++# "arguments": { ++# "pid": 25643, ++# 'scna': 1, ++# "block":[{"numa-id": 5, "size": 1073741824}, ++# {"numa-id": 6, "size": 1073741824}] ++# } ++# } ++# <- { "return": { } } ++# ++# Since: 8.2 ++## ++{ 'command': 'recv-rmtnuma', ++'data': { 'pid': 'int', ++'scna': 'uint16', ++'block': ['NumaInfo'] } } ++ ++## ++# @rollback-pages: ++# ++# Roll back migrated pages when the migration fails. ++# ++# Example: ++# -> { "execute": "rollback-pages" } ++# <- { "return": { } } ++# ++# Since: 8.2 ++## ++{ 'command': 'rollback-pages' } ++ ++## ++# @modify-pgtable: ++# ++# The attribute of the VM memory page table is changed from invalid to cacheable. ++# ++# Example: ++# -> { "execute": "modify-pgtable" } ++# <- { "return": { } } ++# ++# Since: 8.2 ++## ++{ 'command': 'modify-pgtable' } +\ No newline at end of file +-- +2.33.0 + diff --git a/migration-ham-modify-the-shutdown-criteria-for-deter.patch b/migration-ham-modify-the-shutdown-criteria-for-deter.patch new file mode 100644 index 0000000000000000000000000000000000000000..0c5ef030dcb5acadabcd865d97bff876b2f849d4 --- /dev/null +++ b/migration-ham-modify-the-shutdown-criteria-for-deter.patch @@ -0,0 +1,103 @@ +From 45b5bfb8f951c2d94986562e6a163d153062533c Mon Sep 17 00:00:00 2001 +From: lucas <1358497393@qq.com> +Date: Wed, 19 Nov 2025 20:10:16 +0800 +Subject: [PATCH 4/6] migration/ham: modify the shutdown criteria for + deterministic migration + +Deterministic vm live migration leverages memory pooling capabilities to bring the destination vm's memory online to the source OS and utilizes the kernel's migrate_pages interface to migrate the memory. +During this process, the vm is kept alive through remote memory access capabilities. +No dirty pages are generated during the migration, so only one iteration of migration is required. + +Signed-off-by: lujun +Reviewed-by: jindou +Reviewed-by: zhaobing +Reviewed-by: lizhilong +--- + migration/ham.c | 9 +++++++++ + migration/ham.h | 2 ++ + migration/migration.c | 7 +++++++ + migration/migration.h | 2 ++ + 4 files changed, 20 insertions(+) + +diff --git a/migration/ham.c b/migration/ham.c +index be487892da..3e90091834 100644 +--- a/migration/ham.c ++++ b/migration/ham.c +@@ -272,6 +272,15 @@ close_dlfunc: + return ret; + } + ++bool ham_should_complete_migration(MigrationState *s) ++{ ++ /* Only trigger completion when: ++ * 1. HAM (ldst) mode is enabled ++ * 2. At least one iteration has been completed ++ */ ++ return migrate_use_ldst() && s->iteration_num >= 1; ++} ++ + static void free_ram_info_list(RamInfoList *list) + { + RamInfoList *tmp; +diff --git a/migration/ham.h b/migration/ham.h +index e1df3cc52f..5a815a61f6 100644 +--- a/migration/ham.h ++++ b/migration/ham.h +@@ -76,6 +76,8 @@ bool ham_is_vm_ram(size_t page_size); + + void ham_madvise_page(void); + ++bool ham_should_complete_migration(MigrationState *s); ++ + #ifdef __cplusplus + } + #endif +diff --git a/migration/migration.c b/migration/migration.c +index 9feaa17a97..9e71a2566b 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -1623,6 +1623,7 @@ int migrate_init(MigrationState *s, Error **errp) + s->threshold_size = 0; + s->switchover_acked = false; + s->rdma_migration = false; ++ s->iteration_num = 0; + /* + * set mig_stats memory to zero for a new migration + */ +@@ -3146,7 +3147,12 @@ static MigIterateState migration_iteration_run(MigrationState *s) + trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); + } + ++#ifdef CONFIG_HAM_MIGRATION ++ if (((!pending_size || pending_size < s->threshold_size) && can_switchover) || ++ ham_should_complete_migration(s)) { ++#else + if ((!pending_size || pending_size < s->threshold_size) && can_switchover) { ++#endif + trace_migration_thread_low_pending(pending_size); + migration_completion(s); + return MIG_ITERATE_BREAK; +@@ -3162,6 +3168,7 @@ static MigIterateState migration_iteration_run(MigrationState *s) + return MIG_ITERATE_SKIP; + } + ++ s->iteration_num++; + /* Just another iteration step */ + qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); + return MIG_ITERATE_RESUME; +diff --git a/migration/migration.h b/migration/migration.h +index 66fe4dd799..46f0c37fec 100644 +--- a/migration/migration.h ++++ b/migration/migration.h +@@ -470,6 +470,8 @@ struct MigrationState { + bool switchover_acked; + /* Is this a rdma migration */ + bool rdma_migration; ++ /* Number of migration iterations */ ++ uint64_t iteration_num; + }; + + void migrate_set_state(int *state, int old_state, int new_state); +-- +2.33.0 + diff --git a/migration-support-ldst-migration.patch b/migration-support-ldst-migration.patch new file mode 100644 index 0000000000000000000000000000000000000000..eaf86af34de5c18620898f1e4fb73d69bce8e563 --- /dev/null +++ b/migration-support-ldst-migration.patch @@ -0,0 +1,105 @@ +From dd448f423355f1b9ea282d67bd130dfffcd434f7 Mon Sep 17 00:00:00 2001 +From: lucas <1358497393@qq.com> +Date: Wed, 19 Nov 2025 19:22:12 +0800 +Subject: [PATCH 1/6] migration: support ldst migration + +Adding the ldst parameter enables the virtual machine's memory to be migrated to borrowed memory via migrate_pages, thus completing the live migration of the virtual machine. + +Example: +virsh migrate $vm_name --live qemu+tcp://$ip/system tcp://$ip/system --verbose --unsafe --p2p --ldst + +Signed-off-by: lujun +Reviewed-by: jindou +Reviewed-by: zhaobing +Reviewed-by: lizhilong +--- + migration/options.c | 11 ++++++++++- + migration/options.h | 1 + + qapi/migration.json | 9 +++++++-- + 3 files changed, 18 insertions(+), 3 deletions(-) + +diff --git a/migration/options.c b/migration/options.c +index 136a8575df..01c9a93adb 100644 +--- a/migration/options.c ++++ b/migration/options.c +@@ -227,6 +227,7 @@ Property migration_properties[] = { + DEFINE_PROP_MIG_CAP("x-switchover-ack", + MIGRATION_CAPABILITY_SWITCHOVER_ACK), + DEFINE_PROP_MIG_CAP("x-dirty-limit", MIGRATION_CAPABILITY_DIRTY_LIMIT), ++ DEFINE_PROP_MIG_CAP("x-ldst", MIGRATION_CAPABILITY_LDST), + DEFINE_PROP_END_OF_LIST(), + }; + +@@ -335,6 +336,13 @@ bool migrate_postcopy_ram(void) + return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; + } + ++bool migrate_use_ldst(void) ++{ ++ MigrationState *s = migrate_get_current(); ++ ++ return s->capabilities[MIGRATION_CAPABILITY_LDST]; ++} ++ + bool migrate_rdma_pin_all(void) + { + MigrationState *s = migrate_get_current(); +@@ -475,7 +483,8 @@ INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot, + MIGRATION_CAPABILITY_XBZRLE, + MIGRATION_CAPABILITY_X_COLO, + MIGRATION_CAPABILITY_VALIDATE_UUID, +- MIGRATION_CAPABILITY_ZERO_COPY_SEND); ++ MIGRATION_CAPABILITY_ZERO_COPY_SEND, ++ MIGRATION_CAPABILITY_LDST); + + static bool migrate_incoming_started(void) + { +diff --git a/migration/options.h b/migration/options.h +index 6b2a893217..92cc79cb26 100644 +--- a/migration/options.h ++++ b/migration/options.h +@@ -46,6 +46,7 @@ bool migrate_validate_uuid(void); + bool migrate_xbzrle(void); + bool migrate_zero_blocks(void); + bool migrate_zero_copy_send(void); ++bool migrate_use_ldst(void); + + /* + * pseudo capabilities +diff --git a/qapi/migration.json b/qapi/migration.json +index 37e1d4857e..4ff473b680 100644 +--- a/qapi/migration.json ++++ b/qapi/migration.json +@@ -461,6 +461,10 @@ + # and target or migration will not even start. NOTE: If the + # migration fails during postcopy the VM will fail. (since 2.6) + # ++# @ldst: If enabled, the memory of virtual machine will be migrated ++# to the borrowed memory via migrate_pages, completing the live ++# migration of the virtual machine. (Since 8.2) ++# + # @x-colo: If enabled, migration will never end, and the state of the + # VM on the primary side will be migrated continuously to the VM + # on secondary side, this process is called COarse-Grain LOck +@@ -546,7 +550,7 @@ + { 'enum': 'MigrationCapability', + 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', + { 'name': 'compress', 'features': [ 'deprecated' ] }, +- 'events', 'postcopy-ram', ++ 'events', 'postcopy-ram', 'ldst', + { 'name': 'x-colo', 'features': [ 'unstable' ] }, + 'release-ram', + { 'name': 'block', 'features': [ 'deprecated' ] }, +@@ -609,7 +613,8 @@ + # {"state": false, "capability": "compress"}, + # {"state": true, "capability": "events"}, + # {"state": false, "capability": "postcopy-ram"}, +-# {"state": false, "capability": "x-colo"} ++# {"state": false, "capability": "x-colo"}, ++# {"state": false, "capability": "ldst"} + # ]} + ## + { 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} +-- +2.33.0 + diff --git a/qemu.spec b/qemu.spec index d223aa121af6393ff597341b40bbe5972c996810..e8c7697be63aa807315c701dcf3a9d89f9e072e0 100644 --- a/qemu.spec +++ b/qemu.spec @@ -3,7 +3,7 @@ Name: qemu Version: 8.2.0 -Release: 53 +Release: 54 Epoch: 11 Summary: QEMU is a generic and open source machine emulator and virtualizer License: GPLv2 and BSD and MIT and CC-BY-SA-4.0 @@ -1136,6 +1136,74 @@ Patch1119: target-arm-kvm-Set-DISCOVER_IMPL_-hypercalls.patch Patch1120: Fix-the-compilation-of-target-arm-softmmu.patch Patch1121: trace-Add-trace-to-measure-hot-migrate-downtime.patch Patch1122: backends-Add-support-of-one-guest-numa-node-alloc-me.patch +Patch1123: system-add-support-of-hugepage-use-on-demand.patch +Patch1124: ub-support-enable-or-disable-ub-feature.patch +Patch1125: ub-add-bus-controller-state-for-ub-init.patch +Patch1126: ub-realize-base-ub-device-framework.patch +Patch1127: ub-add-base-ubbus-framework.patch +Patch1128: ub-acpi-introduce-acpi-header.patch +Patch1129: ub-acpi-introduce-ub-mem-header-file.patch +Patch1130: ub-add-mem-reserve-help-function.patch +Patch1131: ub-support-ubc-device.patch +Patch1132: ub-support-ub-acpi-report.patch +Patch1133: ub-adapter-acpi-change-for-last-commit.patch +Patch1134: ub-config-introduce-ub-config-base-framework.patch +Patch1135: ub-do-more-realize-for-ub-device.patch +Patch1136: ub-port-setup-ub-port-info.patch +Patch1137: ub-init-ub-bus-controller-dev-config-space.patch +Patch1138: ub-realize-ub-fm-memory-region-ops.patch +Patch1139: ub-support-ubc-msg-process.patch +Patch1140: ub-realize-ub-config-space-msg-process.patch +Patch1141: ub-support-ub-cna-mgmt-msg-process.patch +Patch1142: ub-support-enum-msg-process.patch +Patch1143: ub-support-sec-msg-process.patch +Patch1144: ub-realize-more-for-ubbus-and-realize-ub-ers-update.patch +Patch1145: ub-extract-common-mmu-translate.patch +Patch1146: ub-add-ummu-base-framework.patch +Patch1147: ub-use-ummu-and-init-ummu-registers.patch +Patch1148: ub-ummu-realize-some-ummu-read-write-process.patch +Patch1149: ub-support-mcmdq-process.patch +Patch1150: ub-ummu-supprot-create-kvtbl-and-del-kvtbl-mcmdq-pro.patch +Patch1151: ub-support-mcmdq-sync-handler.patch +Patch1152: migration-support-ldst-migration.patch +Patch1153: migration-ham-a-new-approach-to-vm-live-migration-de.patch +Patch1154: migration-ham-add-deterministic-migration-informatio.patch +Patch1155: migration-ham-modify-the-shutdown-criteria-for-deter.patch +Patch1156: migration-ham-HAM-migration-memory-cancellation-dirt.patch +Patch1157: migration-CONFIG_HAM_MIGRATION-is-associated-with-th.patch +Patch1158: ub-add-kvm-irqchip-usi-route-help-function.patch +Patch1159: ub-just-default-build-on-aarch64-machine.patch +Patch1160: ub-ummu-glb-int-enable.patch +Patch1161: ub-ummu-add-ummu_ops-for-vfio-ub-preprare.patch +Patch1162: ub-ummu-realize-config-tecte-mcmdq-process.patch +Patch1163: ub-realize-more-mcmd-process.patch +Patch1164: ub-support-ummu-dma-address-translate.patch +Patch1165: ub-add-lock-for-bus-instance.patch +Patch1166: backends: Add support of one guest numa node alloc me2.patch +Patch1167: ub-add-some-common-function-for-later-vfio-ub-realiz.patch +Patch1168: ub-add-function-for-later-alloc-idev-ers-addr.patch +Patch1169: ub-add-bus-instance-verify-for-vfio-ub-prepare.patch +Patch1170: ub-prepare-some-function-for-later-vfio-ub-realize.patch +Patch1171: ub-init-usi-operator-function.patch +Patch1172: ub-add-base-vfio-ub-device-framework.patch +Patch1173: ub-realize-detail-for-vfio-ub-device.patch +Patch1174: ub-realize-ubbus-and-udev-reset-callbalk.patch +Patch1175: ub-add-qapi-query-ub-for-get-ub-info.patch +Patch1176: ub-add-bitmap_scnprintf-help-function-for-later-hmp-.patch +Patch1177: ub-and-some-hmp-cmd-for-query-ub-info.patch +Patch1178: ub-realize-vfio-ers-exit.patch +Patch1179: ub-code-reinforcement.patch +Patch1180: ub-fix-ummu_dev_set_iommu_dev-s-return.patch +Patch1181: ub-switch-name-fers-to-ers.patch +Patch1182: ub-fix-use-gpa-instead-of-hva-to-R-W-msgq-data-probl.patch +Patch1183: system-add-return-false-when-all_subpages_in_hugepag.patch +Patch1184: Migration-support-the-basic-framework-of-URMA-migrat.patch +Patch1185: Migration-support-send-data-through-urma-protocol-du.patch +Patch1186: Migration-support-onecopy-migration.patch +Patch1187: Migration-support-skip-GPU-pixman-image-I-O-during-U.patch +Patch1188: Migration-support-devices-parallel-feature.patch +Patch1189: Migration-support-skip-GPU-pixman-image-I-O-during-H.patch +Patch1190: backends-fix-memory-leak-in-the-function-host_memory.patch BuildRequires: flex BuildRequires: gcc @@ -1197,6 +1265,9 @@ BuildRequires: spice-server-devel BuildRequires: qatzip-devel BuildRequires: intel-qpl-devel %endif +%ifarch aarch64 +BuildRequires: umdk-urma-devel +%endif BuildRequires: glibc-static glib2-static zlib-static libatomic-static @@ -1454,6 +1525,9 @@ cd ../ --enable-zstd \ --disable-brlapi \ --disable-plugins \ +%ifarch aarch64 + --enable-urma-migration \ +%endif --enable-debug make %{?_smp_mflags} $buildldflags V=1 @@ -1903,6 +1977,77 @@ getent passwd qemu >/dev/null || \ /bin/systemctl try-restart systemd-binfmt.service &>/dev/null || : %changelog +* Wed Nov 26 2025 Pengrui Zhang - 11:8.2.0-54 +- Add BuildRequires umdk-urma-devel ifarch aarch64 +- system: add support of hugepage use on demand +- ub: support enable or disable ub feature +- ub: add bus controller state for ub init +- ub: realize base ub device framework +- ub: add base ubbus framework +- ub acpi: introduce acpi header +- ub acpi: introduce ub mem header file +- ub: add mem reserve help function +- ub: support ubc device +- ub: support ub acpi report +- ub: adapter acpi change for last commit +- ub config: introduce ub config base framework +- ub: do more realize for ub device +- ub port: setup ub port info +- ub: init ub bus controller dev config space +- ub: realize ub fm memory region ops +- ub: support ubc msg process +- ub: realize ub config space msg process +- ub: support ub cna mgmt msg process +- ub: support enum msg process +- ub: support sec msg process +- ub: realize more for ubbus and realize ub ers update +- ub: extract common mmu translate +- ub: add ummu base framework +- ub: use ummu and init ummu registers +- ub ummu: realize some ummu read/write process +- ub: support mcmdq process +- ub: ummu supprot create kvtbl and del kvtbl mcmdq process +- ub: support mcmdq sync handler +- migration: support ldst migration +- migration/ham: a new approach to vm live migration: deterministic migration +- migration/ham: add deterministic migration information transmission QM… +- migration/ham: modify the shutdown criteria for deterministic migration +- migration/ham: HAM migration memory cancellation dirty page synchronization +- migration: CONFIG_HAM_MIGRATION is associated with the aarch64 architecture. +- ub: add kvm irqchip usi route help function +- ub: just default build on aarch64 machine +- ub: ummu glb int enable +- ub: ummu add ummu_ops for vfio-ub preprare +- ub: ummu realize config tecte mcmdq process +- ub: realize more mcmd process +- ub: support ummu dma address translate +- ub: add lock for bus instance +- backends: Add support of one guest numa node alloc memory from multi host nodes +- ub: add some common function for later vfo-ub realize +- ub: add function for later alloc idev ers addr +- ub: add bus instance verify for vfo-ub prepare +- ub: prepare some function for later vfio-ub realize +- ub: init usi operator function +- ub: add base vfio-ub device framework +- ub: realize detail for vfio-ub device +- ub: realize ubbus and udev reset callbalk +- ub: add qapi query-ub for get ub info +- ub: add bitmap_scnprintf help function for later hmp cmd +- ub: and some hmp cmd for query ub info +- ub: realize vfio ers exit +- ub: code reinforcement +- ub: fix ummu_dev_set_iommu_dev's return +- ub: switch name fers to ers +- ub: fix use gpa instead of hva to R/W msgq data problem +- system: add return false when all_subpages_in_hugepage_freed has invalid parameters +- Migration: support the basic framework of URMA migration +- Migration: support send data through urma protocol during migration +- Migration: support onecopy migration +- Migration: support skip GPU pixman image I/O during URMA migration +- Migration: support devices-parallel feature +- Migration: support skip GPU pixman image I/O during HAM migration +- backends: fix memory leak in the function host_memory_backend_memory_complete + * Sat Nov 22 2025 huangyan - 11:8.2.0-53 - Fix qemu-user(-static) binfmt configs causing install-time errors diff --git a/system-add-return-false-when-all_subpages_in_hugepag.patch b/system-add-return-false-when-all_subpages_in_hugepag.patch new file mode 100644 index 0000000000000000000000000000000000000000..064eba399f39d0ef6290f94fd6b7c8a5b62d1564 --- /dev/null +++ b/system-add-return-false-when-all_subpages_in_hugepag.patch @@ -0,0 +1,32 @@ +From 60f2f8757c1221e40a171f0f558d6eab059416c5 Mon Sep 17 00:00:00 2001 +From: leizongkun +Date: Mon, 24 Nov 2025 16:02:57 +0800 +Subject: [PATCH] system: add return false when all_subpages_in_hugepage_freed + has invalid parameters + +In the requirement of hugepage use on demand, the function +all_subpages_in_hugepage_freed need to return false when +all_subpages_in_hugepage_freed has invalid parameters + +Signed-off-by: wangzhigang +Signed-off-by: zhangliang +Signed-off-by: leizongkun +--- + hw/virtio/virtio-balloon.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c +index 9f41b303b0..ce0f3c35b3 100644 +--- a/hw/virtio/virtio-balloon.c ++++ b/hw/virtio/virtio-balloon.c +@@ -134,6 +134,7 @@ static inline bool all_subpages_in_hugepage_freed(GlobalBalloonedPage *gbp, unsi + if (hugepage_index * ULONGS_PER_HUGEPAGE < gbp->page_nr) { + return bitmap_full(&gbp->freed_page_bitmap[hugepage_index * ULONGS_PER_HUGEPAGE], PAGES_IN_HUGEPAGE); + } ++ return false; + } + + static void mark_freed_subpage(RAMBlock *rb, ram_addr_t rb_offset) +-- +2.33.0 + diff --git a/system-add-support-of-hugepage-use-on-demand.patch b/system-add-support-of-hugepage-use-on-demand.patch new file mode 100644 index 0000000000000000000000000000000000000000..6926fc32409458e276871f8f4be5917d20ddfb85 --- /dev/null +++ b/system-add-support-of-hugepage-use-on-demand.patch @@ -0,0 +1,641 @@ +From 52e651e5ff84c5bf658ff00b01e70d178d41618f Mon Sep 17 00:00:00 2001 +From: leizongkun +Date: Tue, 11 Nov 2025 14:00:12 +0800 +Subject: [PATCH] system: add support of hugepage use on demand + +Optimize memory regions that use 2MB huge pages +and are not pre-allocated, this defers physical +memory allocation, reduces memory overhead, and +achieves on-demand memory usage when starting +virtual machines. + +Signed-off-by: wangzhigang +Signed-off-by: zhangliang +Signed-off-by: leizongkun +--- + accel/kvm/kvm-all.c | 40 +++++++ + hw/virtio/virtio-balloon.c | 190 ++++++++++++++++++++++++++++++++++ + include/exec/memory.h | 11 ++ + include/sysemu/kvm.h | 5 + + linux-headers/linux/kvm.h | 6 ++ + meson.build | 8 ++ + meson_options.txt | 3 + + migration/migration.c | 34 ++++++ + migration/migration.h | 4 + + migration/ram.c | 64 ++++++++++++ + scripts/meson-buildoptions.sh | 3 + + system/memory.c | 25 +++++ + 12 files changed, 393 insertions(+) + +diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c +index a321bf514c..8fb3f2eee7 100644 +--- a/accel/kvm/kvm-all.c ++++ b/accel/kvm/kvm-all.c +@@ -513,6 +513,11 @@ static int kvm_mem_flags(MemoryRegion *mr) + if (readonly && kvm_readonly_mem_allowed) { + flags |= KVM_MEM_READONLY; + } ++#ifdef CONFIG_HUGEPAGE_POD ++ if (memory_region_is_huge_pod(mr)) { ++ flags |= KVM_MEM_HUGE_POD; ++ } ++#endif + return flags; + } + +@@ -4233,6 +4238,41 @@ void query_stats_schemas_cb(StatsSchemaList **result, Error **errp) + } + } + ++#ifdef CONFIG_HUGEPAGE_POD ++int kvm_update_touched_log(void) ++{ ++ return kvm_vm_ioctl(kvm_state, KVM_POD_TOUCHED_LOG, NULL); ++} ++ ++int kvm_clear_slot_dirty_bitmap(void *ram) ++{ ++ KVMState *s = kvm_state; ++ KVMMemoryListener *kml; ++ int i; ++ int ret = -1; ++ ++ if (!s) ++ return ret; ++ ++ kml = &s->memory_listener; ++ kvm_slots_lock(); ++ for (i = 0; i < s->nr_slots; i++) { ++ KVMSlot *mem = &kml->slots[i]; ++ ++ if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { ++ kvm_slot_reset_dirty_pages(mem); ++ ret = 0; ++ ++ qemu_log("Reset kvm slot dirty bitmap for ram %p", ram); ++ break; ++ } ++ } ++ kvm_slots_unlock(); ++ ++ return ret; ++} ++#endif ++ + void kvm_mark_guest_state_protected(void) + { + kvm_state->guest_state_protected = true; +diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c +index d004cf29d2..9f41b303b0 100644 +--- a/hw/virtio/virtio-balloon.c ++++ b/hw/virtio/virtio-balloon.c +@@ -39,6 +39,162 @@ + + #define BALLOON_PAGE_SIZE (1 << VIRTIO_BALLOON_PFN_SHIFT) + ++#ifdef CONFIG_HUGEPAGE_POD ++#define ULONGS_PER_HUGEPAGE 8 /* Number of unsigned longs per huge page in the bitmap */ ++static bool guest_enabled_fpr = false; ++ ++/* Set if guest support and enabled free-page-reporting */ ++static void set_guest_enabled_fpr(bool enabled) { ++ guest_enabled_fpr = enabled; ++} ++ ++/* Represent of a RAMBlock */ ++typedef struct GlobalBalloonedPage { ++ void *base_hva; /* start HVA of a RAMBlock */ ++ size_t page_nr; /* total 4KiB page count of a RAMBlock */ ++ unsigned long *freed_page_bitmap; /* every set bit represent a freed 4KiB page */ ++ int *hugepage_freed_pages; /* every element represent freed subpages count in a hugepage */ ++} GlobalBalloonedPage; ++ ++#define PAGES_IN_HUGEPAGE 512 ++#define HUGEPAGE_SHIFT 21 ++#define GBP_LIST_LENGTH 8 ++GlobalBalloonedPage *gbp_list[GBP_LIST_LENGTH] = { 0 }; ++ ++static GlobalBalloonedPage *find_gbp_by_addr(void *base_hva) ++{ ++ int i; ++ ++ for (i = 0; i < GBP_LIST_LENGTH; i++) { ++ GlobalBalloonedPage *gbp = gbp_list[i]; ++ if (gbp == NULL) { ++ continue; ++ } ++ ++ if (gbp->base_hva == base_hva) { ++ return gbp; ++ } ++ } ++ return NULL; ++} ++ ++static GlobalBalloonedPage *alloc_new_gbp(void *base_hva, ram_addr_t length) ++{ ++ int i; ++ ++ for (i = 0; i < GBP_LIST_LENGTH; i++) { ++ GlobalBalloonedPage *gbp = gbp_list[i]; ++ if (gbp == NULL) { ++ gbp = g_malloc0(sizeof(GlobalBalloonedPage)); ++ if (gbp == NULL) { ++ error_report("alloc memory for GlobalBalloonedPage failed"); ++ return NULL; ++ } ++ gbp->base_hva = base_hva; ++ gbp->page_nr = length >> VIRTIO_BALLOON_PFN_SHIFT; ++ gbp->freed_page_bitmap = bitmap_new(gbp->page_nr); ++ gbp->hugepage_freed_pages = g_malloc0(gbp->page_nr/PAGES_IN_HUGEPAGE * sizeof(int)); ++ ++ gbp_list[i] = gbp; ++ return gbp; ++ } ++ } ++ warn_report("gbp list is full, max length: %d", GBP_LIST_LENGTH); ++ ++ return NULL; ++} ++ ++static void free_gbp(void) ++{ ++ int i; ++ ++ for (i = 0; i < GBP_LIST_LENGTH; i++) { ++ GlobalBalloonedPage *gbp = gbp_list[i]; ++ if (gbp == NULL) { ++ continue; ++ } ++ ++ g_free(gbp->freed_page_bitmap); ++ g_free(gbp->hugepage_freed_pages); ++ g_free(gbp); ++ ++ gbp_list[i] = NULL; ++ } ++} ++ ++static inline void clear_subpages_in_hugepage(GlobalBalloonedPage *gbp, unsigned long hugepage_index) ++{ ++ if (hugepage_index * ULONGS_PER_HUGEPAGE < gbp->page_nr) { ++ bitmap_zero(&gbp->freed_page_bitmap[hugepage_index * ULONGS_PER_HUGEPAGE], PAGES_IN_HUGEPAGE); ++ } ++} ++ ++static inline bool all_subpages_in_hugepage_freed(GlobalBalloonedPage *gbp, unsigned long hugepage_index) ++{ ++ if (hugepage_index * ULONGS_PER_HUGEPAGE < gbp->page_nr) { ++ return bitmap_full(&gbp->freed_page_bitmap[hugepage_index * ULONGS_PER_HUGEPAGE], PAGES_IN_HUGEPAGE); ++ } ++} ++ ++static void mark_freed_subpage(RAMBlock *rb, ram_addr_t rb_offset) ++{ ++ void *base_hva = qemu_ram_get_host_addr(rb); ++ ram_addr_t length = qemu_ram_get_max_length(rb); ++ ram_addr_t rb_page_size = qemu_ram_pagesize(rb); ++ ram_addr_t rb_aligned_offset = QEMU_ALIGN_DOWN(rb_offset, rb_page_size); ++ unsigned long page_index = rb_offset >> VIRTIO_BALLOON_PFN_SHIFT; ++ unsigned long hugepage_index = rb_offset >> HUGEPAGE_SHIFT; ++ GlobalBalloonedPage *gbp = find_gbp_by_addr(base_hva); ++ if (gbp == NULL) { ++ gbp = alloc_new_gbp(base_hva, length); ++ if (gbp == NULL) { ++ return; ++ } ++ } ++ ++ /* When one subpage released by balloon, set the bit of this page */ ++ if (page_index < gbp->page_nr && !test_and_set_bit(page_index, gbp->freed_page_bitmap)) { ++ if (hugepage_index < (gbp->page_nr / PAGES_IN_HUGEPAGE)) { ++ gbp->hugepage_freed_pages[hugepage_index]++; ++ /* ++ * All bits have been set meaning that all subpages of a hugepage is freed ++ * by balloon, So we can release this hugepage back to Host. ++ */ ++ if (gbp->hugepage_freed_pages[hugepage_index] == PAGES_IN_HUGEPAGE) { ++ clear_subpages_in_hugepage(gbp, hugepage_index); ++ gbp->hugepage_freed_pages[hugepage_index] = 0; ++ ++ /* Release this hugepage back to Host */ ++ ram_block_discard_range(rb, rb_aligned_offset, rb_page_size); ++ } ++ } ++ } ++} ++ ++static void mark_used_subpage(RAMBlock *rb, ram_addr_t rb_offset) ++{ ++ void *base_hva = qemu_ram_get_host_addr(rb); ++ unsigned long page_index = rb_offset >> VIRTIO_BALLOON_PFN_SHIFT; ++ unsigned long hugepage_index = rb_offset >> HUGEPAGE_SHIFT; ++ GlobalBalloonedPage *gbp = find_gbp_by_addr(base_hva); ++ if (gbp == NULL) { ++ warn_report("Couldn't find gbp of rb_offset 0x%lx\n", rb_offset); ++ return; ++ } ++ ++ /* ++ * When one subpage deflated back to the Guest, clear the bit of this page. ++ * This means that this subpage could be used by Guest, so we cannot ++ * release to Host by mark_freed_subpage. ++ */ ++ if (page_index < gbp->page_nr && test_and_clear_bit(page_index, gbp->freed_page_bitmap)) { ++ if (hugepage_index < (gbp->page_nr / PAGES_IN_HUGEPAGE)) { ++ gbp->hugepage_freed_pages[hugepage_index]--; ++ } ++ } ++} ++#endif ++ + typedef struct PartiallyBalloonedPage { + ram_addr_t base_gpa; + unsigned long *bitmap; +@@ -92,6 +248,14 @@ static void balloon_inflate_page(VirtIOBalloon *balloon, + rb = qemu_ram_block_from_host(addr, false, &rb_offset); + rb_page_size = qemu_ram_pagesize(rb); + ++#ifdef CONFIG_HUGEPAGE_POD ++ if (rb_page_size == (1 << HUGEPAGE_SHIFT)) { ++ /* 2M pagesize case */ ++ mark_freed_subpage(rb, rb_offset); ++ return; ++ } ++#endif ++ + if (rb_page_size == BALLOON_PAGE_SIZE) { + /* Easy case */ + +@@ -157,6 +321,14 @@ static void balloon_deflate_page(VirtIOBalloon *balloon, + rb = qemu_ram_block_from_host(addr, false, &rb_offset); + rb_page_size = qemu_ram_pagesize(rb); + ++#ifdef CONFIG_HUGEPAGE_POD ++ if (rb_page_size == (1 << HUGEPAGE_SHIFT)) { ++ /* 2M pagesize case */ ++ mark_used_subpage(rb, rb_offset); ++ return; ++ } ++#endif ++ + host_addr = (void *)((uintptr_t)addr & ~(rb_page_size - 1)); + + /* When a page is deflated, we hint the whole host page it lives +@@ -257,6 +429,14 @@ static void balloon_stats_get_all(Object *obj, Visitor *v, const char *name, + goto out_end; + } + for (i = 0; i < VIRTIO_BALLOON_S_NR; i++) { ++#ifdef CONFIG_HUGEPAGE_POD ++ if (guest_enabled_fpr && i == VIRTIO_BALLOON_S_CACHES) { ++ if (i < VIRTIO_BALLOON_S_NR) { ++ s->stats[i] |= 1024; ++ } ++ } ++#endif ++ + if (!visit_type_uint64(v, balloon_stat_names[i], &s->stats[i], errp)) { + goto out_nested; + } +@@ -379,6 +559,10 @@ static void virtio_balloon_handle_report(VirtIODevice *vdev, VirtQueue *vq) + ram_block_discard_range(rb, ram_offset, size); + } + ++#ifdef CONFIG_HUGEPAGE_POD ++ set_guest_enabled_fpr(true); ++#endif ++ + skip_element: + virtqueue_push(vq, elem, 0); + virtio_notify(vdev, vq); +@@ -923,6 +1107,9 @@ static void virtio_balloon_device_unrealize(DeviceState *dev) + virtio_delete_queue(s->reporting_vq); + } + virtio_cleanup(vdev); ++#ifdef CONFIG_HUGEPAGE_POD ++ free_gbp(); ++#endif + } + + static void virtio_balloon_device_reset(VirtIODevice *vdev) +@@ -940,6 +1127,9 @@ static void virtio_balloon_device_reset(VirtIODevice *vdev) + } + + s->poison_val = 0; ++#ifdef CONFIG_HUGEPAGE_POD ++ set_guest_enabled_fpr(false); ++#endif + } + + static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status) +diff --git a/include/exec/memory.h b/include/exec/memory.h +index 51fe10d4a0..c5edf864e1 100644 +--- a/include/exec/memory.h ++++ b/include/exec/memory.h +@@ -2109,6 +2109,17 @@ static inline bool memory_region_is_nonvolatile(MemoryRegion *mr) + return mr->nonvolatile; + } + ++#ifdef CONFIG_HUGEPAGE_POD ++/** ++ * memory_region_is_huge_pod: check whether a memory region is POD hugepage ++ * ++ * Returns %true if a memory region is POD hugepage. ++ * ++ * @mr: the memory region being queried ++ */ ++bool memory_region_is_huge_pod(MemoryRegion *mr); ++#endif ++ + /** + * memory_region_get_fd: Get a file descriptor backing a RAM memory region. + * +diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h +index 7602cd4429..de68df91a3 100644 +--- a/include/sysemu/kvm.h ++++ b/include/sysemu/kvm.h +@@ -606,6 +606,11 @@ int kvm_create_shadow_device(PCIDevice *dev); + int kvm_delete_shadow_device(PCIDevice *dev); + #endif + ++#ifdef CONFIG_HUGEPAGE_POD ++int kvm_update_touched_log(void); ++int kvm_clear_slot_dirty_bitmap(void *ram); ++#endif ++ + void kvm_mark_guest_state_protected(void); + + #endif +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index 96bc60475e..422a811f7e 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -104,6 +104,9 @@ struct kvm_userspace_memory_region { + */ + #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) + #define KVM_MEM_READONLY (1UL << 1) ++#ifdef CONFIG_HUGEPAGE_POD ++#define KVM_MEM_HUGE_POD (1UL << 9) ++#endif + + /* for KVM_IRQ_LINE */ + struct kvm_irq_level { +@@ -1785,6 +1788,9 @@ struct kvm_enc_region { + /* Available with KVM_CAP_ARM_SVE */ + #define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int) + ++/* Available always */ ++#define KVM_POD_TOUCHED_LOG _IO(KVMIO, 0xfe) ++ + /* Available with KVM_CAP_S390_VCPU_RESETS */ + #define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3) + #define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4) +diff --git a/meson.build b/meson.build +index 50b1e31edf..d379a71927 100644 +--- a/meson.build ++++ b/meson.build +@@ -573,6 +573,13 @@ have_mbind_proportion = get_option('mbind_by_proportion') \ + + config_host_data.set('CONFIG_MBIND_PROPORTION', have_mbind_proportion) + ++# hugepage pod ++have_hugepage_pod = get_option('hugepage_pod') \ ++ .require(targetos == 'linux', error_message: 'hugepage_pod is supported only on Linux') \ ++ .allowed() ++ ++config_host_data.set('CONFIG_HUGEPAGE_POD', have_hugepage_pod) ++ + # vhost + have_vhost_user = get_option('vhost_user') \ + .disable_auto_if(targetos != 'linux') \ +@@ -4495,6 +4502,7 @@ summary_info += {'FUSE lseek': fuse_lseek.found()} + summary_info += {'selinux': selinux} + summary_info += {'libdw': libdw} + summary_info += {'mbind proportion': have_mbind_proportion} ++summary_info += {'hugepage pod': have_hugepage_pod} + summary(summary_info, bool_yn: true, section: 'Dependencies') + + if host_arch == 'unknown' +diff --git a/meson_options.txt b/meson_options.txt +index 94a9b479bd..f446612ff6 100644 +--- a/meson_options.txt ++++ b/meson_options.txt +@@ -377,3 +377,6 @@ option('hexagon_idef_parser', type : 'boolean', value : true, + + option('mbind_by_proportion', type: 'feature', value: 'auto', + description: ' support of one guest numa node alloc memory from multi host nodes') ++ ++option('hugepage_pod', type: 'feature', value: 'auto', ++ description: ' support of hugepage use on demand') +diff --git a/migration/migration.c b/migration/migration.c +index eba3f9d17d..91b2267c3f 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -67,6 +67,10 @@ + #include "options.h" + #include "sysemu/dirtylimit.h" + #include "qemu/sockets.h" ++#ifdef CONFIG_HUGEPAGE_POD ++#include "qemu/log-for-trace.h" ++#include "sysemu/kvm.h" ++#endif + + #define DEFAULT_FD_MAX 4096 + +@@ -3777,3 +3781,33 @@ static void register_migration_types(void) + } + + type_init(register_migration_types); ++ ++#ifdef CONFIG_HUGEPAGE_POD ++#define TOUCHED_LOG_TRY_TIME_MAX 3 ++int ram_init_touched_log(void) ++{ ++ int ret; ++ int try_times = 0; ++ ++ qemu_log("start init touched log\n"); ++ while(try_times < TOUCHED_LOG_TRY_TIME_MAX) { ++ ret = kvm_update_touched_log(); ++ if (!ret) { ++ qemu_log("end init touched log\n"); ++ return ret; ++ } ++ if (ret == -EINTR) { ++ try_times++; ++ continue; ++ } ++ if (ret) { ++ if (ret == -ENOSYS) { ++ qemu_log("kvm not support touched log\n"); ++ } ++ qemu_log("touched log failed (%d)\n", ret); ++ return ret; ++ } ++ } ++ return -EINTR; ++} ++#endif +diff --git a/migration/migration.h b/migration/migration.h +index eeddb7c0bd..66fe4dd799 100644 +--- a/migration/migration.h ++++ b/migration/migration.h +@@ -555,4 +555,8 @@ void migrate_fd_cancel(MigrationState *s); + + bool memcrypt_enabled(void); + ++#ifdef CONFIG_HUGEPAGE_POD ++int ram_init_touched_log(void); ++#endif ++ + #endif +diff --git a/migration/ram.c b/migration/ram.c +index 028b1ebb6e..b46de7cd6d 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -77,6 +77,10 @@ + #include "qemu/userfaultfd.h" + #endif /* defined(__linux__) */ + ++#ifdef CONFIG_HUGEPAGE_POD ++#include "sysemu/kvm.h" ++#endif ++ + /***********************************************************/ + /* ram save/restore */ + +@@ -3255,6 +3259,10 @@ static void ram_init_bitmaps(RAMState *rs) + migration_bitmap_clear_discarded_pages(rs); + } + ++#ifdef CONFIG_HUGEPAGE_POD ++static int ram_init_touched_bitmap(RAMState *rs); ++#endif ++ + static int ram_init_all(RAMState **rsp) + { + if (ram_state_init(rsp)) { +@@ -3267,6 +3275,11 @@ static int ram_init_all(RAMState **rsp) + } + + ram_init_bitmaps(*rsp); ++#ifdef CONFIG_HUGEPAGE_POD ++ if (ram_init_touched_bitmap(*rsp)) { ++ return -1; ++ } ++#endif + + return 0; + } +@@ -4794,3 +4807,54 @@ void ram_mig_init(void) + register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state); + ram_block_notifier_add(&ram_mig_ram_notifier); + } ++ ++#ifdef CONFIG_HUGEPAGE_POD ++static int ram_init_touched_bitmap(RAMState *rs) ++{ ++ RAMBlock *block; ++ bool has_pod = false; ++ ++ qemu_mutex_lock_ramlist(); ++ rcu_read_lock(); ++ RAMBLOCK_FOREACH_NOT_IGNORED(block) { ++ if (!memory_region_is_huge_pod(block->mr)) { ++ continue; ++ } ++ ++ kvm_clear_slot_dirty_bitmap(block->host); ++ has_pod = true; ++ } ++ rcu_read_unlock(); ++ qemu_mutex_unlock_ramlist(); ++ ++ if (!has_pod) { ++ return 0; ++ } ++ ++ if (ram_init_touched_log()) { ++ error_report("POD: Init touched log failed\n"); ++ return -1; ++ } ++ ++ info_report("Start update touched log bitmaps\n"); ++ qemu_mutex_lock_ramlist(); ++ rcu_read_lock(); ++ RAMBLOCK_FOREACH_NOT_IGNORED(block) { ++ if (!memory_region_is_huge_pod(block->mr)) { ++ continue; ++ } ++ ++ ram_state->migration_dirty_pages -= ++ bitmap_count_one_with_offset(block->bmap, 0, ++ block->used_length >> TARGET_PAGE_BITS); ++ bitmap_clear(block->bmap, 0, block->used_length >> TARGET_PAGE_BITS); ++ } ++ migration_bitmap_sync_precopy(rs, false); ++ rcu_read_unlock(); ++ qemu_mutex_unlock_ramlist(); ++ ++ info_report("End update touched log bitmaps, touched pages %lu\n", ++ (unsigned long)ram_state->migration_dirty_pages); ++ return 0; ++} ++#endif +diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh +index d5d9130540..06f4f803c9 100644 +--- a/scripts/meson-buildoptions.sh ++++ b/scripts/meson-buildoptions.sh +@@ -228,6 +228,7 @@ meson_options_help() { + printf "%s\n" ' mbind-by-proportion' + printf "%s\n" ' support of one guest numa node alloc memory from multi' + printf "%s\n" ' host nodes' ++ printf "%s\n" ' hugepage-pod support of hugepage use on demand' + } + _meson_option_parse() { + case $1 in +@@ -576,6 +577,8 @@ _meson_option_parse() { + --disable-uadk) printf "%s" -Duadk=disabled ;; + --enable-mbind-by-proportion) printf "%s" -Dmbind_by_proportion=enabled ;; + --disable-mbind-by-proportion) printf "%s" -Dmbind_by_proportion=disabled ;; ++ --enable-hugepage-pod) printf "%s" -Dhugepage_pod=enabled ;; ++ --disable-hugepage-pod) printf "%s" -Dhugepage_pod=disabled ;; + *) return 1 ;; + esac + } +diff --git a/system/memory.c b/system/memory.c +index fa99009701..bf331d0e7b 100644 +--- a/system/memory.c ++++ b/system/memory.c +@@ -3786,3 +3786,28 @@ static void memory_register_types(void) + } + + type_init(memory_register_types) ++ ++#ifdef CONFIG_HUGEPAGE_POD ++#define HUGEPAGESIZE (1 << 21) ++bool memory_region_is_huge_pod(MemoryRegion *mr) ++{ ++ HostMemoryBackend *backend; ++ ++ rcu_read_lock(); ++ while (mr->alias) { ++ mr = mr->alias; ++ } ++ backend = (HostMemoryBackend *)object_dynamic_cast(mr->owner, TYPE_MEMORY_BACKEND); ++ rcu_read_unlock(); ++ ++ if (backend == NULL || backend->prealloc) { ++ return false; ++ } ++ ++ if (host_memory_backend_pagesize(backend) != HUGEPAGESIZE) { ++ return false; ++ } ++ ++ return true; ++} ++#endif +-- +2.33.0 + diff --git a/ub-acpi-introduce-acpi-header.patch b/ub-acpi-introduce-acpi-header.patch new file mode 100644 index 0000000000000000000000000000000000000000..5ac17092a6c1ea58655f96be522a835ffe30351f --- /dev/null +++ b/ub-acpi-introduce-acpi-header.patch @@ -0,0 +1,255 @@ +From 527fdbe72778f45a0bd09ae0c2672d3054ac4800 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 11:16:02 +0800 +Subject: [PATCH 2/7] ub acpi: introduce acpi header + +introduce acpi header, this prepare for later ub acpi report + +Signed-off-by: caojinhuahw +--- + hw/ub/meson.build | 1 + + hw/ub/ub_acpi.c | 36 +++++++++ + include/hw/ub/ub_acpi.h | 175 ++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 212 insertions(+) + create mode 100644 hw/ub/ub_acpi.c + create mode 100644 include/hw/ub/ub_acpi.h + +diff --git a/hw/ub/meson.build b/hw/ub/meson.build +index 39fd4b7c77..e1146704e6 100644 +--- a/hw/ub/meson.build ++++ b/hw/ub/meson.build +@@ -2,6 +2,7 @@ ub_ss = ss.source_set() + ub_ss.add(files( + 'ub.c', + 'ub_ubc.c', ++ 'ub_acpi.c', + )) + system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss) + subdir('hisi') +diff --git a/hw/ub/ub_acpi.c b/hw/ub/ub_acpi.c +new file mode 100644 +index 0000000000..cd3c22b00f +--- /dev/null ++++ b/hw/ub/ub_acpi.c +@@ -0,0 +1,36 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "qemu/module.h" ++#include "qemu/cutils.h" ++#include "qemu/units.h" ++#include "hw/arm/virt.h" ++#include "hw/boards.h" ++#include "hw/qdev-properties.h" ++#include "hw/qdev-properties-system.h" ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_bus.h" ++#include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_acpi.h" ++#include "qemu/log.h" ++#include "migration/vmstate.h" ++#include "qapi/error.h" ++#include "qapi/util.h" ++#include "qapi/qmp/qstring.h" ++#include "hw/ub/hisi/ub_fm.h" ++#include "hw/acpi/aml-build.h" ++ +diff --git a/include/hw/ub/ub_acpi.h b/include/hw/ub/ub_acpi.h +new file mode 100644 +index 0000000000..d3af1c78bd +--- /dev/null ++++ b/include/hw/ub/ub_acpi.h +@@ -0,0 +1,175 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef HW_UB_ACPI_H ++#define HW_UB_ACPI_H ++#include "hw/arm/virt.h" ++#include "hw/acpi/acpi-defs.h" ++#include "hw/acpi/bios-linker-loader.h" ++#include "hw/acpi/aml-build.h" ++#include "hw/acpi/utils.h" ++#include "hw/ub/ub.h" ++ ++#define DTS_TABLE_HEADER_RESERVE_LEN 3 ++#define DTS_ROOT_TABLE_RESERVE_LEN 6 ++#define DTS_TABLE_HEADER_NAME_LEN 16 ++typedef struct DtsTableHeader { ++ char name[DTS_TABLE_HEADER_NAME_LEN]; ++ uint32_t total_size; ++ uint8_t version; ++ uint8_t reserved[DTS_TABLE_HEADER_RESERVE_LEN]; ++ uint32_t remain_size; ++ uint32_t checksum; ++} DtsTableHeader; ++ ++/* DTS UBIOS INFO TABLE */ ++typedef struct DtsRootTable { ++ DtsTableHeader header; ++ uint16_t count; ++ uint8_t reserved[DTS_ROOT_TABLE_RESERVE_LEN]; ++ uint64_t tables[3]; ++} DtsRootTable; ++ ++#define UBC_QUEUE_INTERRUPT_DEFAULT 443 ++ ++#define UBC_VENDOR_INFO_LEN 256 ++/* ub controller block */ ++typedef struct UbcNode { ++ uint32_t interrupt_id_start; ++ uint32_t interrupt_id_end; ++ uint64_t gpa_base; ++ uint64_t gpa_size; ++ uint8_t memory_size_limit; ++ uint8_t dma_cca; /* 0: DMA(y) CCA(N) ; 1: DMA(Y) CCA(Y); other: DMA(N) */ ++ uint16_t ummu_mapping; ++ uint16_t proximity_domain; ++ uint8_t reserved1[2]; ++ uint64_t msg_queue_base; ++ uint64_t msg_queue_size; ++ uint16_t msg_queue_depth; ++ uint16_t msg_queue_interrupt; ++ uint8_t msg_queue_interrupt_attr; ++ uint8_t reserved2[59]; ++ UbGuid ubc_info; /* UB controller's GUID */ ++ uint8_t vendor_info[UBC_VENDOR_INFO_LEN]; /* vendor private info */ ++} UbcNode; ++ ++#define UMMU_VEND_LEN 80 ++typedef struct UmmuNode { ++ uint64_t base_addr; ++ uint64_t addr_size; ++ uint32_t interrupt_id; ++ uint16_t proximity_domain; ++ uint16_t its_index; ++ uint64_t pmu_addr; ++ uint64_t pmu_size; ++ uint32_t pmu_interrupt_id; ++ uint32_t min_tid; ++ uint32_t max_tid; ++ uint8_t reserved2[26]; ++ uint16_t vender_id; ++ uint8_t vender_info[UMMU_VEND_LEN]; ++} UmmuNode; ++ ++/* UMMU table */ ++typedef struct DtsSubUmmuTable { ++ DtsTableHeader header; ++ uint32_t count; ++ uint32_t flag; ++ UmmuNode node[0]; ++} DtsSubUmmuTable; ++ ++#define LOCAL_CNA_START 1 ++#define LOCAL_CNA_END 65535 ++#define LOCAL_EID_START 1 ++#define LOCAL_EID_END 65535 ++ ++/* UB Controller table */ ++typedef struct DtsSubUbcTable { ++ DtsTableHeader header; ++ uint32_t local_cna_start; ++ uint32_t local_cna_end; ++ uint32_t local_eid_start; ++ uint32_t local_eid_end; ++ uint8_t feature_set; ++ uint8_t reserved[3]; ++ uint16_t cluster_mode; ++ uint16_t ubc_count; ++ UbcNode node[0]; ++} DtsSubUbcTable; ++ ++typedef struct MemRange { ++ uint8_t flags; ++ uint8_t reserved[7]; ++ uint64_t base; ++ uint64_t size; ++} MemRange; ++/* UB Reserved Memory table */ ++typedef struct DtsRsvMemTable { ++ DtsTableHeader header; ++ uint16_t count; ++ uint8_t reserved[6]; ++ MemRange node[0]; ++} DtsRsvMemTable; ++ ++/* UBRT subtable */ ++typedef struct UbrtSubtable { ++ uint8_t type; ++ uint8_t reserved[7]; ++ uint64_t pointer; ++} UbrtSubtable; ++ ++typedef struct acpi_table_header { ++ char signature[4]; ++ uint32_t length; ++ uint8_t revision; ++ uint8_t checksum; ++ char oem_id[6]; ++ char oem_table_id[8]; ++ uint32_t oem_revision; ++ char asl_compiler_id[4]; ++ uint32_t asl_compiler_revision; ++} ACPI_TABLE_HEADER; ++ ++/* UBRT table */ ++typedef struct AcpiUbrtTable { ++ ACPI_TABLE_HEADER header; ++ uint32_t count; ++ UbrtSubtable subtables[]; ++} AcpiUbrtTable; ++#define ACPI_UB_TABLE_TYPE_BUS_CONTROLLER 0 ++#define ACPI_UB_TABLE_TYPE_UMMU 1 ++#define ACPI_UB_TABLE_TYPE_RSV_MEM 2 ++#define ACPI_UB_TABLE_TYPE_VIRTUAL_BUS 3 ++#define ACPI_UB_TABLE_TYPE_CALL_ID_SERVICE 4 ++#define ACPI_UB_TABLE_TYPE_DEVICE 5 ++#define ACPI_UB_TABLE_TYPE_TOPOLOGY 6 ++ ++#define UBIOS_UBC_TABLE_CNT 1 ++#define UBIOS_UMMU_TABLE_CNT 1 ++#define UBIOS_MMIOS_SIZE_PER_UBC (512 * GiB) ++#define UBIOS_INFO_TABLE_SIZE (sizeof(DtsRootTable)) ++#define UBIOS_UBC_TABLE_SIZE(cnt) (sizeof(DtsSubUbcTable) + (cnt) * sizeof(UbcNode)) ++#define UBIOS_UMMU_TABLE_SIZE(cnt) (sizeof(DtsSubUmmuTable) + (cnt) * sizeof(UmmuNode)) ++#define UBIOS_RSV_MEM_TABLE_SIZE(cnt) (sizeof(DtsRsvMemTable) + (cnt) * sizeof(MemRange)) ++ ++#define UBIOS_TABLE_SIZE (UBIOS_INFO_TABLE_SIZE + \ ++ UBIOS_UBC_TABLE_SIZE(UBIOS_UBC_TABLE_CNT) + \ ++ UBIOS_UMMU_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT) + \ ++ UBIOS_RSV_MEM_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT)) ++ ++#endif +\ No newline at end of file +-- +2.33.0 + diff --git a/ub-acpi-introduce-ub-mem-header-file.patch b/ub-acpi-introduce-ub-mem-header-file.patch new file mode 100644 index 0000000000000000000000000000000000000000..499c3d9b67371b247c138ed3c97642fecb5de036 --- /dev/null +++ b/ub-acpi-introduce-ub-mem-header-file.patch @@ -0,0 +1,194 @@ +From f4984d14ae507e8f29427037ae11d6539651cd81 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 11:31:03 +0800 +Subject: [PATCH 3/7] ub acpi: introduce ub mem header file + +introduce ub mem header file, this prepare for later ub acpi report + +Signed-off-by: caojinhuahw +--- + hw/ub/ub_acpi.c | 1 + + include/hw/ub/hisi/ub_mem.h | 159 ++++++++++++++++++++++++++++++++++++ + 2 files changed, 160 insertions(+) + create mode 100644 include/hw/ub/hisi/ub_mem.h + +diff --git a/hw/ub/ub_acpi.c b/hw/ub/ub_acpi.c +index cd3c22b00f..9b3af82203 100644 +--- a/hw/ub/ub_acpi.c ++++ b/hw/ub/ub_acpi.c +@@ -31,6 +31,7 @@ + #include "qapi/error.h" + #include "qapi/util.h" + #include "qapi/qmp/qstring.h" ++#include "hw/ub/hisi/ub_mem.h" + #include "hw/ub/hisi/ub_fm.h" + #include "hw/acpi/aml-build.h" + +diff --git a/include/hw/ub/hisi/ub_mem.h b/include/hw/ub/hisi/ub_mem.h +new file mode 100644 +index 0000000000..aaf30322c5 +--- /dev/null ++++ b/include/hw/ub/hisi/ub_mem.h +@@ -0,0 +1,159 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef HW_UB_MEM_H ++#define HW_UB_MEM_H ++ ++/* ++ * ++ * +---------------------------------------------------------+ ++ * | CPU Die | ++ * | +-------------+-------------+---------------+ | ++ * | | A | B | C | AddrSpace| ++ * |__+-/-----------+-----------\-+--------------\+ _________| ++ * / \ \ ++ * +-----------------/---------------------------\----------------\-------------+ ++ * | +-------------/ +-----------+ +-----------+ \------------+ +-\----------+ | ++ * | | MAR0(Master)| |MAR0(Slave)| |MAR1(Slave)| |MAR1(Master)| |MAR2(Master)| | ++ * | +-------------+ +-----------+ +-----------+ +------------+ +------------+ | ++ * | +---------------------------+ +--------------------------+ +------------+ | ++ * | | MAR0 | | MAR1 | | MAR2 | | ++ * | | +-----------+ +---------+ | | +---------+ +----------+ | | +--------+ | | ++ * | | | NL0 | | NL1 | | | | NL2 | | NL3 | | | | NL4 | | | ++ * | | +-----------+ +---------+ | | +---------+ +----------+ | | +--------+ | | ++ * | | +----+ +----+ +---+ +---+ | | +---+ +---+ +---+ +----+ | | +--------+ | | ++ * | | | | | | | | | | | | | | | | | | | | | | | | | | ++ * | | +----+ +----+ +---+ +---+ | | +---+ +---+ +---+ +----+ | | +--------+ | | ++ * +----------------------------------------------------------------------------+ ++*/ ++#define MAR_NUM_ONE_UDIE 5 ++#define DECODER0_MARID 0 ++#define DECODER_SLAVE_MARID0 1 ++#define DECODER_SLAVE_MARID1 2 ++#define DECODER1_MARID 3 ++#define DECODER2_MARID 4 ++#define MB_SIZE_OFFSET 20 ++#define UB_MEM_MAR0_SPACE_SIZE (512 * GiB) ++#define UB_MEM_MAR1_SPACE_SIZE (0) ++#define UB_MEM_MAR2_SPACE_SIZE (0) ++#define UB_MEM_MAR3_SPACE_SIZE (512 * GiB) ++#define UB_MEM_MAR4_SPACE_SIZE (1024 * GiB) ++#define UB_MEM_SPACE_SIZE (UB_MEM_MAR0_SPACE_SIZE + \ ++ UB_MEM_MAR1_SPACE_SIZE + \ ++ UB_MEM_MAR2_SPACE_SIZE + \ ++ UB_MEM_MAR3_SPACE_SIZE + \ ++ UB_MEM_MAR4_SPACE_SIZE) ++#define UB_MEM_REG_SHIFT 16 ++#define UMMU_EXT_REG_SIZE 0x100 ++#define UB_MEM_VALID_VALUE 0 ++#define UB_MEM_VALID_MASK GENMASK_ULL(2, 0) ++#define UB_MEM_REG_BASE 0x800000 ++#define UMMU_MEM_START_ADDR 0x0 ++#define START_PTE_ADDR_MASK GENMASK(26, 0) ++#define START_ATE_ADDR_MASK GENMASK(22, 0) ++#define UMMU_MEM_LEN_GRANU 0x4 ++#define MEM_GRANU_MASK GENMASK(19, 17) ++#define MEM_GRANU_SHIFT 17 ++#define MEM_LEN_MASK GENMASK(16, 0) ++#define UMMU_MEM_BTE 0x8 ++#define MEM_BTE_MASK GENMASK(16, 0) ++#define UMMU_MEM_INDEX 0xC ++#define MEM_INDEX_RSV_MASK GENMASK(31, 20) ++#define MEM_WR_MASK (1UL << 19) ++#define MEM_TYPE_MASK (1UL << 18) ++#define MEM_VLD_MASK (1UL << 17) ++#define MEM_PTE_INDEX_MASK GENMASK(9, 0) ++#define MEM_ATE_INDEX_MASK GENMASK(16, 0) ++#define UMMU_MEM_DTLB_INVLD 0x10 ++#define MEM_DTLB_INVLD_MASK (1UL) ++typedef struct UbMemMmuInfo { ++ /* valid bits ++ * bit0: protection_table_bits ++ * bit1: translation_table_bits ++ * bit2: ummu_reg_addr_bits ++ * other reserved ++ */ ++ uint64_t valid_bits; ++ uint32_t protection_table_bits; ++ uint32_t translation_table_bits; ++ uint64_t ext_reg_base; ++ uint64_t ext_reg_size; ++ uint8_t reserved[48]; ++} UbMemMmuInfo; ++ ++typedef struct UbMemDecoderInfo { ++ uint64_t decode_addr; ++ uint32_t cc_base_addr; ++ uint32_t cc_base_size; ++ uint32_t nc_base_addr; ++ uint32_t nc_base_size; ++} UbMemDecoderInfo; ++ ++typedef struct UbcVendorInfo { ++ uint32_t ub_mem_ver; ++ uint8_t max_addr_bits; ++ uint8_t reserved1[3]; ++ UbMemDecoderInfo mem_info[MAR_NUM_ONE_UDIE]; ++ uint64_t cmd_queue_base; /* IO Decoder CMD queue */ ++ uint64_t event_queue_base; /* IO Decoder Event queue */ ++ uint8_t vendor_feature_sets; /* bit0: management plane deployment 0(enable) 1(disable) */ ++ uint8_t reserved2[111]; ++} UbcVendorInfo; ++ ++/* hisi memory */ ++typedef struct UbMemBlockHw { ++ /* DW0 */ ++ uint32_t valid : 1; ++ uint32_t mem_base : 9; ++ uint32_t mem_limit : 9; ++ uint32_t one_path : 1; ++ uint32_t wr_delay_comp : 1; ++ uint32_t reduce_delay_comp : 1; ++ uint32_t cmo_delay_comp : 1; ++ uint32_t so : 1; ++ uint32_t lb_0 : 8; ++ /* DW1 */ ++ uint32_t token_id0 : 20; ++ uint32_t dcna0_l : 12; ++ /* DW2 */ ++ uint32_t dcna0_h : 4; ++ uint32_t uba_base0_l : 28; ++ /* DW3 */ ++ uint32_t uba_base0_h : 15; ++ uint32_t pa_0 : 1; ++ uint32_t rsv : 16; ++} UbMemBlockHw; ++ ++#define MAX_BLOCKS 4 ++typedef struct UbMemPageEntry { ++ UbMemBlockHw blocks[MAX_BLOCKS]; ++} UbMemPageEntry; ++ ++typedef enum UbMemEntryGranule { ++ GRANULE_1GB = 0, ++ GRANULE_2GB = 1, ++ GRANULE_4GB = 2, ++ GRANULE_8GB = 3, ++ GRANULE_16GB = 4, ++ GRANULE_32GB = 5, ++ GRANULE_64GB = 6, ++ GRANULE_128GB = 7, ++ GRANULE_256GB = 8, ++ GRANULE_512GB = 9, ++ GRANULE_1TB = 10, ++} UbMemEntryGranule; ++#endif +-- +2.33.0 + diff --git a/ub-adapter-acpi-change-for-last-commit.patch b/ub-adapter-acpi-change-for-last-commit.patch new file mode 100644 index 0000000000000000000000000000000000000000..8f90c6ecab5e8fd0765eff46d0f2f5d0eaf1948f --- /dev/null +++ b/ub-adapter-acpi-change-for-last-commit.patch @@ -0,0 +1,94 @@ +From 440b5d4816fd866642dcb1dbb5357dbe42bec197 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 18 Nov 2025 16:50:57 +0800 +Subject: [PATCH 7/7] ub: adapter acpi change for last commit + +acpi table changed last commit, adapter bios tests + +Signed-off-by: caojinhuahw +--- + tests/data/acpi/virt/DSDT | Bin 5814 -> 5898 bytes + tests/data/acpi/virt/DSDT.acpihmatvirt | Bin 7323 -> 7407 bytes + tests/data/acpi/virt/DSDT.memhp | Bin 7175 -> 7259 bytes + tests/data/acpi/virt/DSDT.pxb | Bin 8297 -> 8381 bytes + tests/data/acpi/virt/DSDT.topology | Bin 9335 -> 9419 bytes + tests/data/acpi/virt/IORT | Bin 128 -> 320 bytes + tests/data/acpi/virt/UBRT | Bin 0 -> 88 bytes + 7 files changed, 0 insertions(+), 0 deletions(-) + create mode 100644 tests/data/acpi/virt/UBRT + +diff --git a/tests/data/acpi/virt/DSDT b/tests/data/acpi/virt/DSDT +index 404bc5ac2188d885e28b6c80d499193865cf1c9c..afa7f3b07da5ce6be2894bc70e3de114523591b2 100644 +GIT binary patch +delta 110 +zcmdm{+oi|l66_MfCCt<8 + +delta 25 +gcmeCu+osFq66_MPO^kto$$cW1G*e^dM)iI%09^YA2><{9 + +diff --git a/tests/data/acpi/virt/DSDT.acpihmatvirt b/tests/data/acpi/virt/DSDT.acpihmatvirt +index 5f9c0b2d3cdc55949c32d564c92309aa54529d8d..4500a1699b7f4e19b9dabd9d00d23029a793074d 100644 +GIT binary patch +delta 110 +zcmbPj`QDPtCDLZX|bLY#J&003)N2nqlI + +diff --git a/tests/data/acpi/virt/DSDT.memhp b/tests/data/acpi/virt/DSDT.memhp +index d85565e3b7801de346e302e6e88bc76d88f33f27..abaa7a991b065b8d708dd37fbb37eccac509108e 100644 +GIT binary patch +delta 110 +zcmZp-xNX7Z66_KZEyKXT6gZJfnyKyZM)e2MA<<1zp-#>Q9Pu8WF1#L|!JY=DCWZ_g +Y@u8kB3?Mn*P!u^+B)I@IIdhmC0BLv|)c^nh + +delta 25 +gcmca@(Qd)z66_MfF2lgU)H;z%nyG2uM)e2M09^M6fdBvi + +diff --git a/tests/data/acpi/virt/DSDT.pxb b/tests/data/acpi/virt/DSDT.pxb +index ccb43ab242521cdfc80f6d6b170d2e0818186632..430e9bde9817dfd8fc92b1fcd245c88584b82330 100644 +GIT binary patch +delta 110 +zcmaFqu-B2xCDQ9Pu8WF1#L|!JY=DCWZ_g +Y@u8kB3?Mn*P!u^+B)I@IIdhmC05$&`AOHXW + +delta 25 +gcmdn%_|k#PCDjp_j^A<<1zp-#>Q9Pu8WF1#L|!JY=DCWZ_g +Y@u8kB3?Mn*P!u^+B)I@IIdhmC0BK(vr~m)} + +delta 25 +gcmX@@`Q3xdCD*Vk35v<@85#X!<1VAAM5F13Z0~o33QiFL&I&-l2owUbK{PW+9ts?QG$Rub!vsKVAO<1`04fIY +E0r;T@0RR91 + +literal 0 +HcmV?d00001 + +-- +2.33.0 + diff --git a/ub-add-base-ubbus-framework.patch b/ub-add-base-ubbus-framework.patch new file mode 100644 index 0000000000000000000000000000000000000000..6b38c6c18796bf958c1842d33fd68bcff7e92380 --- /dev/null +++ b/ub-add-base-ubbus-framework.patch @@ -0,0 +1,190 @@ +From 36b23f639e30184c36f817614c6be42520080d45 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 10:27:04 +0800 +Subject: [PATCH 1/7] ub: add base ubbus framework + +add base ubbus framework, function detail will be realize later + +Signed-off-by: caojinhuahw +--- + hw/ub/ub.c | 89 ++++++++++++++++++++++++++++++++++++++++++ + hw/ub/ub_ubc.c | 3 ++ + include/hw/ub/ub_bus.h | 8 ++++ + include/hw/ub/ub_ubc.h | 4 ++ + 4 files changed, 104 insertions(+) + +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 0c494fc9f9..8d94601dbb 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -28,6 +28,94 @@ + #include "hw/ub/ub_ubc.h" + #include "qemu/log.h" + #include "qapi/error.h" ++#include "hw/ub/ub_bus.h" ++#include "hw/ub/ub_ubc.h" ++#include "migration/vmstate.h" ++ ++ ++QLIST_HEAD(, BusControllerState) ub_bus_controllers; ++ ++static void ubbus_dev_print(Monitor *mon, DeviceState *dev, int indent) ++{ ++} ++ ++static char *ubbus_get_dev_path(DeviceState *dev) ++{ ++ return NULL; ++} ++ ++static char *ubbus_get_fw_dev_path(DeviceState *dev) ++{ ++ return NULL; ++} ++ ++static const VMStateDescription vmstate_ubbus = { ++ .name = TYPE_UB_BUS, ++ .version_id = 1, ++ .minimum_version_id = 1, ++ .fields = (VMStateField[]) { ++ VMSTATE_END_OF_LIST() ++ } ++}; ++ ++static void ub_bus_realize(BusState *qbus, Error **errp) ++{ ++ UBBus *bus = UB_BUS(qbus); ++ ++ vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_ubbus, bus); ++} ++ ++void ub_save_ubc_list(BusControllerState *s) ++{ ++ QLIST_INSERT_HEAD(&ub_bus_controllers, s, node); ++} ++ ++static void ub_bus_unrealize(BusState *qbus) ++{ ++ UBBus *bus = UB_BUS(qbus); ++ ++ vmstate_unregister(NULL, &vmstate_ubbus, bus); ++} ++ ++static void ubbus_reset(BusState *qbus) ++{ ++} ++ ++UBBus *ub_register_root_bus(DeviceState *parent, const char *name, ++ MemoryRegion *io_mmio) ++{ ++ UBBus *bus; ++ ++ bus = UB_BUS(qbus_new(TYPE_UB_BUS, parent, name)); ++ bus->address_space_mem = io_mmio; ++ ++ return bus; ++} ++ ++void ub_unregister_root_bus(UBBus *bus) ++{ ++ qbus_unrealize(BUS(bus)); ++} ++ ++static void ub_bus_class_init(ObjectClass *klass, void *data) ++{ ++ BusClass *k = BUS_CLASS(klass); ++ ++ k->print_dev = ubbus_dev_print; ++ k->get_dev_path = ubbus_get_dev_path; ++ k->get_fw_dev_path = ubbus_get_fw_dev_path; ++ k->realize = ub_bus_realize; ++ k->unrealize = ub_bus_unrealize; ++ k->reset = ubbus_reset; ++} ++ ++static const TypeInfo ub_bus_info = { ++ .name = TYPE_UB_BUS, ++ .parent = TYPE_BUS, ++ .instance_size = sizeof(UBBus), ++ .class_size = sizeof(UBBusClass), ++ .class_init = ub_bus_class_init, ++}; + + static UBDevice *do_ub_register_device(UBDevice *ub_dev, const char *name, Error **errp) + { +@@ -101,6 +189,7 @@ static const TypeInfo ub_device_type_info = { + + static void ub_register_types(void) + { ++ type_register_static(&ub_bus_info); + type_register_static(&ub_device_type_info); + } + +diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c +index a0bc907ec8..3e634ab83a 100644 +--- a/hw/ub/ub_ubc.c ++++ b/hw/ub/ub_ubc.c +@@ -123,6 +123,8 @@ static void ub_bus_controller_realize(DeviceState *dev, Error **errp) + memory_region_init(&s->io_mmio, OBJECT(s), "UB_MMIO", UINT64_MAX); + sysbus_init_mmio(sysdev, &s->io_mmio); + ++ s->bus = ub_register_root_bus(dev, name, &s->io_mmio); ++ ub_save_ubc_list(s); + g_free(name); + } + +@@ -132,6 +134,7 @@ static void ub_bus_controller_unrealize(DeviceState *dev) + SysBusDevice *sysdev = SYS_BUS_DEVICE(dev); + g_free(sysdev->parent_obj.id); + QLIST_REMOVE(s, node); ++ ub_unregister_root_bus(s->bus); + ub_reg_free(dev); + } + +diff --git a/include/hw/ub/ub_bus.h b/include/hw/ub/ub_bus.h +index ef78305cb0..4fbc9407d5 100644 +--- a/include/hw/ub/ub_bus.h ++++ b/include/hw/ub/ub_bus.h +@@ -36,4 +36,12 @@ struct UBBus { + #define TYPE_UB_BUS "UB_BUS" + OBJECT_DECLARE_TYPE(UBBus, UBBusClass, UB_BUS) + ++UBBus *ub_register_root_bus(DeviceState *parent, const char *name, ++ MemoryRegion *io_mmio); ++void ub_unregister_root_bus(UBBus *bus); ++UBDevice *ub_find_device_by_eid(UBBus *bus, uint32_t eid); ++static inline UBBus *ub_get_bus(const UBDevice *dev) ++{ ++ return UB_BUS(qdev_get_parent_bus(DEVICE(dev))); ++} + #endif +diff --git a/include/hw/ub/ub_ubc.h b/include/hw/ub/ub_ubc.h +index 5e791fbcf3..5d9098a4c3 100644 +--- a/include/hw/ub/ub_ubc.h ++++ b/include/hw/ub/ub_ubc.h +@@ -20,6 +20,8 @@ + + #include "hw/sysbus.h" + #include "qom/object.h" ++#include "hw/ub/hisi/ubc.h" ++#include "hw/ub/ub_bus.h" + + #define TYPE_BUS_CONTROLLER "ub-bus-controller" + OBJECT_DECLARE_TYPE(BusControllerState, BusControllerClass, BUS_CONTROLLER) +@@ -37,6 +39,7 @@ struct BusControllerState { + MemoryRegion io_mmio; /* ub mmio hpa memory region */ + uint32_t mmio_size; + bool mig_enabled; ++ UBBus *bus; + QLIST_ENTRY(BusControllerState) node; + }; + +@@ -44,4 +47,5 @@ struct BusControllerClass { + SysBusDeviceClass parent_class; + }; + ++void ub_save_ubc_list(BusControllerState *s); + #endif +-- +2.33.0 + diff --git a/ub-add-base-vfio-ub-device-framework.patch b/ub-add-base-vfio-ub-device-framework.patch new file mode 100644 index 0000000000000000000000000000000000000000..ecc6c22c516726d2d8cb2e0f275c76e08d7eabc3 --- /dev/null +++ b/ub-add-base-vfio-ub-device-framework.patch @@ -0,0 +1,274 @@ +From c48a75a6f3e6e5b14cb185cda30387c560d343dd Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 14 Nov 2025 12:11:06 +0800 +Subject: [PATCH 1/7] ub: add base vfio-ub device framework + +start support vfio-ub device, add based framewrok first, +more will be realized later. + +Signed-off-by: caojinhuahw +--- + hw/vfio/Kconfig | 4 ++ + hw/vfio/meson.build | 1 + + hw/vfio/ub.c | 131 ++++++++++++++++++++++++++++++++++ + hw/vfio/ub.h | 72 +++++++++++++++++++ + include/hw/vfio/vfio-common.h | 1 + + 5 files changed, 209 insertions(+) + create mode 100644 hw/vfio/ub.c + create mode 100644 hw/vfio/ub.h + +diff --git a/hw/vfio/Kconfig b/hw/vfio/Kconfig +index 5f0d3c2d2b..18fe2a6ed4 100644 +--- a/hw/vfio/Kconfig ++++ b/hw/vfio/Kconfig +@@ -47,3 +47,7 @@ config VFIO_HCT + default y + select VFIO + depends on LINUX && PCI ++ ++config VFIO_UB ++ bool ++ default y if UB +diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build +index bda2688983..39abfaccfb 100644 +--- a/hw/vfio/meson.build ++++ b/hw/vfio/meson.build +@@ -22,5 +22,6 @@ vfio_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c')) + vfio_ss.add(when: 'CONFIG_VFIO_AP', if_true: files('ap.c')) + vfio_ss.add(when: 'CONFIG_VFIO_IGD', if_true: files('igd.c')) + vfio_ss.add(when: 'CONFIG_VFIO_HCT', if_true: files('hct.c')) ++vfio_ss.add(when: 'CONFIG_VFIO_UB', if_true: files('ub.c')) + + specific_ss.add_all(when: 'CONFIG_VFIO', if_true: vfio_ss) +diff --git a/hw/vfio/ub.c b/hw/vfio/ub.c +new file mode 100644 +index 0000000000..6cc999f0ab +--- /dev/null ++++ b/hw/vfio/ub.c +@@ -0,0 +1,131 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ ++#include "qemu/range.h" ++#include ++#include ++#include ++ ++#include "qemu/module.h" ++#include "hw/qdev-properties.h" ++#include "migration/vmstate.h" ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_common.h" ++#include "hw/ub/ub_bus.h" ++#include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_config.h" ++#include "hw/ub/ub_acpi.h" ++#include "hw/ub/ub_usi.h" ++#include "hw/ub/ubus_instance.h" ++#include "hw/qdev-properties.h" ++#include "hw/qdev-properties-system.h" ++#include "qemu/log.h" ++#include "ub.h" ++#include "qapi/error.h" ++#include "qemu/error-report.h" ++#include "exec/address-spaces.h" ++#include "sysemu/iommufd.h" ++#include "trace.h" ++ ++static Property vfio_ub_dev_properties[] = { ++ DEFINE_PROP_UB_HOST_DEVADDR("host", VFIOUBDevice, host), ++#ifdef CONFIG_IOMMUFD ++ DEFINE_PROP_LINK("iommufd", VFIOUBDevice, vbasedev.iommufd, ++ TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), ++#endif ++ DEFINE_PROP_END_OF_LIST(), ++}; ++ ++static bool vfio_ub_needed(void *opaque) ++{ ++ return 0; ++} ++ ++static const VMStateDescription vfio_ub_vmstate = { ++ .name = TYPE_VFIO_UB, ++ .unmigratable = 1, ++ .version_id = 0, ++ .minimum_version_id = 0, ++ .needed = vfio_ub_needed, ++ .fields = (VMStateField[]) { ++ VMSTATE_END_OF_LIST() ++ } ++}; ++ ++static void vfio_ub_reset(DeviceState *dev) ++{ ++} ++ ++static void vfio_realize(UBDevice *udev, Error **errp) ++{ ++} ++ ++static void vfio_exitfn(UBDevice *udev) ++{ ++} ++ ++static void vfio_ub_read_config(UBDevice *dev, uint64_t offset, ++ uint32_t *val, uint32_t dw_mask) ++{ ++} ++ ++static void vfio_ub_write_config(UBDevice *dev, uint64_t offset, ++ uint32_t *val, uint32_t dw_mask) ++{ ++} ++ ++static void vfio_ub_dev_class_init(ObjectClass *klass, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(klass); ++ UBDeviceClass *udc = UB_DEVICE_CLASS(klass); ++ ++ dc->reset = vfio_ub_reset; ++ device_class_set_props(dc, vfio_ub_dev_properties); ++ dc->vmsd = &vfio_ub_vmstate; ++ dc->desc = "VFIO-based UB device assignment"; ++ set_bit(DEVICE_CATEGORY_MISC, dc->categories); ++ udc->realize = vfio_realize; ++ udc->exit = vfio_exitfn; ++ udc->config_read = vfio_ub_read_config; ++ udc->config_write = vfio_ub_write_config; ++} ++ ++static void vfio_instance_init(Object *obj) ++{ ++} ++ ++static void vfio_instance_finalize(Object *obj) ++{ ++} ++ ++static const TypeInfo vfio_ub_dev_info = { ++ .name = TYPE_VFIO_UB, ++ .parent = TYPE_UB_DEVICE, ++ .instance_size = sizeof(VFIOUBDevice), ++ .class_init = vfio_ub_dev_class_init, ++ .instance_init = vfio_instance_init, ++ .instance_finalize = vfio_instance_finalize, ++}; ++ ++static void register_vfio_ub_dev_types(void) ++{ ++ type_register_static(&vfio_ub_dev_info); ++} ++ ++type_init(register_vfio_ub_dev_types) +\ No newline at end of file +diff --git a/hw/vfio/ub.h b/hw/vfio/ub.h +new file mode 100644 +index 0000000000..699b36eb49 +--- /dev/null ++++ b/hw/vfio/ub.h +@@ -0,0 +1,72 @@ ++/* ++ * Copyright (c) 2023-2023 HUAWEI TECHNOLOGIES CO.,LTD. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#ifndef HW_VFIO_VFIO_UB_H ++#define HW_VFIO_VFIO_UB_H ++ ++#include "exec/memory.h" ++#include "hw/ub/ub.h" ++#include "hw/vfio/vfio-common.h" ++#include "qemu/event_notifier.h" ++#include "qemu/queue.h" ++#include "qemu/timer.h" ++#include "qom/object.h" ++#include "sysemu/kvm.h" ++ ++#define UB_ANY_ID (~0) ++ ++#define TYPE_VFIO_UB "vfio-ub" ++OBJECT_DECLARE_SIMPLE_TYPE(VFIOUBDevice, VFIO_UB) ++#define VFIO_UB_SAFE(UBDevice) \ ++ ((UBDevice)->host_dev ? VFIO_UB(UBDevice) : NULL) ++ ++typedef struct VFIOERS { ++ VFIORegion region; ++ MemoryRegion *mr; ++ size_t size; ++ QLIST_HEAD(, VFIOQuirk) quirks; ++} VFIOERS; ++ ++typedef struct VFIOUSIVector { ++ EventNotifier interrupt; ++ EventNotifier kvm_interrupt; ++ struct VFIOUBDevice *vdev; ++ int virq; ++ bool use; ++} VFIOUSIVector; ++ ++typedef struct VFIOUSIInfo { ++ uint16_t vec_table_num; ++ uint16_t addr_table_num; ++ uint64_t vec_table_start_addr; ++ uint64_t addr_table_start_addr; ++ uint64_t pend_table_start_addr; ++} VFIOUSIInfo; ++ ++struct VFIOUBDevice { ++ UBDevice udev; ++ VFIODevice vbasedev; ++ unsigned int config_size; ++ uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */ ++ off_t config_offset; /* Offset of config space region within device fd */ ++ VFIOERS ers[UB_NUM_REGIONS]; ++ VFIOUSIInfo *usi; ++ VFIOUSIVector *usi_vectors; ++ int nr_vectors; /* Number of usi vectors currently in use */ ++ ++ UBHostDeviceAddress host; ++}; ++#endif /* HW_VFIO_VFIO_UB_H */ +\ No newline at end of file +diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h +index abae8655c4..a1bf609071 100644 +--- a/include/hw/vfio/vfio-common.h ++++ b/include/hw/vfio/vfio-common.h +@@ -41,6 +41,7 @@ enum { + VFIO_DEVICE_TYPE_PLATFORM = 1, + VFIO_DEVICE_TYPE_CCW = 2, + VFIO_DEVICE_TYPE_AP = 3, ++ VFIO_DEVICE_TYPE_UB = 4, + }; + + typedef struct VFIOMmap { +-- +2.33.0 + diff --git a/ub-add-bitmap_scnprintf-help-function-for-later-hmp-.patch b/ub-add-bitmap_scnprintf-help-function-for-later-hmp-.patch new file mode 100644 index 0000000000000000000000000000000000000000..12d42082ea4f9b34c389544f3468cdaeba8ca36d --- /dev/null +++ b/ub-add-bitmap_scnprintf-help-function-for-later-hmp-.patch @@ -0,0 +1,80 @@ +From 65d7045b6a8475e0f48f4f855129cfa913ad3181 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 14 Nov 2025 15:54:16 +0800 +Subject: [PATCH 5/7] ub: add bitmap_scnprintf help function for later hmp cmd + +add some help functions + +Signed-off-by: caojinhuahw +--- + include/qemu/bitmap.h | 2 ++ + util/bitmap.c | 44 +++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 46 insertions(+) + +diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h +index 97806811ee..dc5c683e6c 100644 +--- a/include/qemu/bitmap.h ++++ b/include/qemu/bitmap.h +@@ -280,5 +280,7 @@ void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src, + unsigned long offset, unsigned long nbits); + void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src, + unsigned long shift, unsigned long nbits); ++int bitmap_scnprintf(char *buf, unsigned int buflen, ++ const unsigned long *maskp, int nmaskbits); + + #endif /* BITMAP_H */ +diff --git a/util/bitmap.c b/util/bitmap.c +index 8d12e90a5a..2f9292462e 100644 +--- a/util/bitmap.c ++++ b/util/bitmap.c +@@ -532,3 +532,47 @@ void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src, + *dst |= (*src & last_mask) << shift; + } + } ++ ++#define CHUNKSZ 32 ++#define ALIGN(x, a) (((x) + (a) - 1UL) & ~((a) - 1UL)) ++ ++int bitmap_scnprintf(char *buf, unsigned int buflen, ++ const unsigned long *maskp, int nmaskbits) ++{ ++ int i, word, bit; ++ int len = 0; ++ unsigned long val; ++ const char *sep = ""; ++ int chunksz; ++ uint32_t chunkmask; ++ int first = 1; ++ int ret; ++ ++ chunksz = nmaskbits & (CHUNKSZ - 1); ++ if (chunksz == 0) { ++ chunksz = CHUNKSZ; ++ } ++ ++ i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ; ++ for (; i >= 0; i -= CHUNKSZ) { ++ chunkmask = ((1ULL << chunksz) - 1); ++ word = i / BITS_PER_LONG; ++ bit = i % BITS_PER_LONG; ++ val = (maskp[word] >> bit) & chunkmask; ++ if (val != 0 || !first || i == 0) { ++ /* (chunksz + 3) / 4 in order to align */ ++ ret = snprintf(buf + len, buflen - len, "%s%0*lx", sep, ++ (chunksz + 3) / 4, val); ++ if (ret < 0) { ++ (void)fprintf(stderr, "Executing snprintf failed: %d\n", ret); ++ return -1; ++ } else { ++ len += ret; ++ } ++ chunksz = CHUNKSZ; ++ sep = ","; ++ first = 0; ++ } ++ } ++ return len; ++} +-- +2.33.0 + diff --git a/ub-add-bus-controller-state-for-ub-init.patch b/ub-add-bus-controller-state-for-ub-init.patch new file mode 100644 index 0000000000000000000000000000000000000000..92cffed9e93833ad692848d5b740b4a151e5315c --- /dev/null +++ b/ub-add-bus-controller-state-for-ub-init.patch @@ -0,0 +1,503 @@ +From 05937620dc1268a0552622be48b5ec96eba50453 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Mon, 10 Nov 2025 19:16:05 +0800 +Subject: [PATCH] ub: add bus controller state for ub init +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1、add base bus controller state realize +2、support create bus controller in virt init + +Signed-off-by: caojinhuahw +--- + hw/arm/virt.c | 20 ++++ + hw/ub/meson.build | 1 + + hw/ub/ub_ubc.c | 198 +++++++++++++++++++++++++++++++++++++ + include/hw/ub/hisi/ub_fm.h | 25 +++++ + include/hw/ub/hisi/ubc.h | 48 +++++++++ + include/hw/ub/ub.h | 68 +++++++++++++ + include/hw/ub/ub_ubc.h | 47 +++++++++ + 7 files changed, 407 insertions(+) + create mode 100644 hw/ub/ub_ubc.c + create mode 100644 include/hw/ub/hisi/ub_fm.h + create mode 100644 include/hw/ub/hisi/ubc.h + create mode 100644 include/hw/ub/ub.h + create mode 100644 include/hw/ub/ub_ubc.h + +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index e046fff5dd..b209140684 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -87,6 +87,13 @@ + #include "hw/char/pl011.h" + #include "qemu/guest-random.h" + #include "qapi/qmp/qdict.h" ++#include "qemu/log.h" ++#ifdef CONFIG_UB ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_ubc.h" ++#include "hw/ub/hisi/ubc.h" ++#include "hw/ub/hisi/ub_fm.h" ++#endif // CONFIG_UB + + #define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \ + static void virt_##major##_##minor##_class_init(ObjectClass *oc, \ +@@ -1714,7 +1721,17 @@ static void create_virtio_iommu_dt_bindings(VirtMachineState *vms) + 0x0, vms->iommu_phandle, 0x0, bdf, + bdf + 1, vms->iommu_phandle, bdf + 1, 0xffff - bdf); + } ++#ifdef CONFIG_UB ++static void create_ub(VirtMachineState *vms) ++{ ++ DeviceState *ubc; + ++ ubc = qdev_new(TYPE_BUS_CONTROLLER); ++ qdev_prop_set_uint32(ubc, "ub-bus-controller-msgq-reg-size", UBC_MSGQ_REG_SIZE); ++ qdev_prop_set_uint32(ubc, "ub-bus-controller-fm-msgq-reg-size", FM_MSGQ_REG_SIZE); ++ sysbus_realize_and_unref(SYS_BUS_DEVICE(ubc), &error_fatal); ++} ++#endif // CONFIG_UB + static void create_pcie(VirtMachineState *vms) + { + hwaddr base_mmio = vms->memmap[VIRT_PCIE_MMIO].base; +@@ -2874,6 +2891,9 @@ static void machvirt_init(MachineState *machine) + create_rtc(vms); + + create_pcie(vms); ++#ifdef CONFIG_UB ++ create_ub(vms); ++#endif // CONFIG_UB + + if (!has_ged) { + create_gpio_devices(vms, VIRT_GPIO, sysmem); +diff --git a/hw/ub/meson.build b/hw/ub/meson.build +index 21c3f0ea6c..b6d5f4beff 100644 +--- a/hw/ub/meson.build ++++ b/hw/ub/meson.build +@@ -1,5 +1,6 @@ + ub_ss = ss.source_set() + ub_ss.add(files( ++ 'ub_ubc.c', + )) + system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss) + subdir('hisi') +diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c +new file mode 100644 +index 0000000000..a0bc907ec8 +--- /dev/null ++++ b/hw/ub/ub_ubc.c +@@ -0,0 +1,198 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#include ++#include "qemu/osdep.h" ++#include "qapi/error.h" ++#include "qemu/log.h" ++#include "qemu/module.h" ++#include "qemu/units.h" ++#include "hw/arm/virt.h" ++#include "hw/qdev-properties.h" ++#include "hw/qdev-properties-system.h" ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_ubc.h" ++#include "migration/vmstate.h" ++ ++static uint64_t ub_msgq_reg_read(void *opaque, hwaddr addr, unsigned len) ++{ ++ BusControllerState *s = opaque; ++ uint64_t val; ++ ++ switch (len) { ++ case BYTE_SIZE: ++ val = ub_get_byte(s->msgq_reg + addr); ++ break; ++ case WORD_SIZE: ++ val = ub_get_word(s->msgq_reg + addr); ++ break; ++ case DWORD_SIZE: ++ val = ub_get_long(s->msgq_reg + addr); ++ break; ++ default: ++ qemu_log("invalid argument len 0x%x\n", len); ++ val = ~0x0; ++ break; ++ } ++ ++ return val; ++} ++ ++static void ub_msgq_reg_write(void *opaque, hwaddr addr, uint64_t val, unsigned len) ++{ ++ BusControllerState *s = opaque; ++ ++ switch (len) { ++ case BYTE_SIZE: ++ ub_set_byte(s->msgq_reg + addr, val); ++ break; ++ case WORD_SIZE: ++ ub_set_word(s->msgq_reg + addr, val); ++ break; ++ case DWORD_SIZE: ++ ub_set_long(s->msgq_reg + addr, val); ++ break; ++ default: ++ /* As length is under guest control, handle illegal values. */ ++ qemu_log("invalid argument len 0x%x val 0x%lx\n", len, val); ++ return; ++ } ++} ++ ++static const MemoryRegionOps ub_msgq_reg_ops = { ++ .read = ub_msgq_reg_read, ++ .write = ub_msgq_reg_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++}; ++ ++static const MemoryRegionOps ub_fm_msgq_reg_ops = { ++ ++}; ++ ++static void ub_reg_alloc(DeviceState *dev) ++{ ++ BusControllerState *s = BUS_CONTROLLER(dev); ++ ++ s->msgq_reg = g_malloc0(s->msgq_reg_size); ++ s->fm_msgq_reg = g_malloc0(s->fm_msgq_reg_size); ++ qemu_log("alloc ub reg mem size: msgq_reg %u, " ++ "fm_msgq_reg %u\n", ++ s->msgq_reg_size, s->fm_msgq_reg_size); ++} ++ ++static void ub_reg_free(DeviceState *dev) ++{ ++ BusControllerState *s = BUS_CONTROLLER(dev); ++ ++ g_free(s->msgq_reg); ++ g_free(s->fm_msgq_reg); ++ qemu_log("free ub reg mem\n"); ++} ++ ++static void ub_bus_controller_realize(DeviceState *dev, Error **errp) ++{ ++ BusControllerState *s = BUS_CONTROLLER(dev); ++ SysBusDevice *sysdev = SYS_BUS_DEVICE(dev); ++ static uint8_t NO = 0; ++ char *name = g_strdup_printf("ubus.%u", NO); ++ ++ sysdev->parent_obj.id = g_strdup_printf("ubc.%u", NO++); ++ /* for msgq reg */ ++ memory_region_init_io(&s->msgq_reg_mem, OBJECT(s), &ub_msgq_reg_ops, ++ s, TYPE_BUS_CONTROLLER, s->msgq_reg_size); ++ sysbus_init_mmio(sysdev, &s->msgq_reg_mem); ++ /* for fm msgq reg */ ++ memory_region_init_io(&s->fm_msgq_reg_mem, OBJECT(s), &ub_fm_msgq_reg_ops, ++ s, TYPE_BUS_CONTROLLER, s->fm_msgq_reg_size); ++ sysbus_init_mmio(sysdev, &s->fm_msgq_reg_mem); ++ ub_reg_alloc(dev); ++ /* for ub controller mmio */ ++ memory_region_init(&s->io_mmio, OBJECT(s), "UB_MMIO", UINT64_MAX); ++ sysbus_init_mmio(sysdev, &s->io_mmio); ++ ++ g_free(name); ++} ++ ++static void ub_bus_controller_unrealize(DeviceState *dev) ++{ ++ BusControllerState *s = BUS_CONTROLLER(dev); ++ SysBusDevice *sysdev = SYS_BUS_DEVICE(dev); ++ g_free(sysdev->parent_obj.id); ++ QLIST_REMOVE(s, node); ++ ub_reg_free(dev); ++} ++ ++static bool ub_bus_controller_needed(void *opaque) ++{ ++ BusControllerState *s = opaque; ++ return s->mig_enabled; ++} ++ ++static Property ub_bus_controller_properties[] = { ++ DEFINE_PROP_UINT32("ub-bus-controller-msgq-reg-size", BusControllerState, ++ msgq_reg_size, 0), ++ DEFINE_PROP_UINT32("ub-bus-controller-fm-msgq-reg-size", BusControllerState, ++ fm_msgq_reg_size, 0), ++ DEFINE_PROP_BOOL("ub-bus-controller-migration-enabled", BusControllerState, ++ mig_enabled, true), ++ DEFINE_PROP_END_OF_LIST(), ++}; ++ ++const VMStateDescription vmstate_ub_bus_controller = { ++ .name = TYPE_BUS_CONTROLLER, ++ .needed = ub_bus_controller_needed, ++ .version_id = 1, ++ .minimum_version_id = 1, ++ .fields = (VMStateField[]) { ++ /* support migration later */ ++ VMSTATE_END_OF_LIST() ++ } ++}; ++ ++static void ub_bus_controller_class_init(ObjectClass *class, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(class); ++ ++ device_class_set_props(dc, ub_bus_controller_properties); ++ dc->realize = ub_bus_controller_realize; ++ dc->unrealize = ub_bus_controller_unrealize; ++ dc->vmsd = &vmstate_ub_bus_controller; ++} ++ ++static void ub_bus_controller_instance_init(Object *obj) ++{ ++ /* do nothing now */ ++} ++ ++static void ub_bus_controller_instance_finalize(Object *obj) ++{ ++ /* do nothing now */ ++} ++static const TypeInfo ub_bus_controller_type_info = { ++ .name = TYPE_BUS_CONTROLLER, ++ .parent = TYPE_SYS_BUS_DEVICE, ++ .instance_size = sizeof(BusControllerState), ++ .instance_init = ub_bus_controller_instance_init, ++ .instance_finalize = ub_bus_controller_instance_finalize, ++ .class_size = sizeof(BusControllerClass), ++ .class_init = ub_bus_controller_class_init, ++}; ++ ++static void ub_bus_controller_register_types(void) ++{ ++ type_register_static(&ub_bus_controller_type_info); ++} ++type_init(ub_bus_controller_register_types) +diff --git a/include/hw/ub/hisi/ub_fm.h b/include/hw/ub/hisi/ub_fm.h +new file mode 100644 +index 0000000000..bd606227a6 +--- /dev/null ++++ b/include/hw/ub/hisi/ub_fm.h +@@ -0,0 +1,25 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_HISI_FM_H ++#define UB_HISI_FM_H ++#include "hw/ub/hisi/ubc.h" ++ ++#define FM_MSGQ_REG_OFFSET (UBC_MSGQ_REG_OFFSET + UBC_MSGQ_REG_SIZE) ++#define FM_MSGQ_REG_SIZE 0x100000 /* 1MiB */ ++ ++#endif +diff --git a/include/hw/ub/hisi/ubc.h b/include/hw/ub/hisi/ubc.h +new file mode 100644 +index 0000000000..fdaeae7b3e +--- /dev/null ++++ b/include/hw/ub/hisi/ubc.h +@@ -0,0 +1,48 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef HISI_UBC_H ++#define HISI_UBC_H ++ ++/* ++ * Address space layout of the UB controller ++ * References: LinQuickCV100 Programming User Guide ++ * ++ * +----------------------------+ BA_ADDR+0xFFFF_FFFF ++ * | UMMU REG (3G) | ++ * +----------------------------+ BA_ADDR+0x4000_0000 ++ * | ... | ++ * +----------------------------+ ++ * | UB MSGQ(32M) only 2MB used | ++ * +----------------------------+ BA_ADDR+0x1000_0000 ++ * | Local Register (256M) | ++ * +----------------------------+ BA_ADDR+0x0000_0000 ++*/ ++#define BASE_REG_SIZE 0x100000000 /* 4GiB */ ++#define LOCAL_REG_SIZE 0x10000000 /* 256MiB */ ++#define UBC_MSGQ_REG_SIZE 0x100000 /* 1MiB */ ++#define UMMU_REG_SIZE 0xC0000000 /* 3GiB */ ++#define UMMU_REG_OFFSET 0x40000000 ++#define UBC_MSGQ_REG_OFFSET LOCAL_REG_SIZE ++#define LOCAL_REG_OFFSET 0 ++#define SINGLE_UMMU_REG_SIZE 0x5000 /* 20KiB */ ++#define SINGLE_UMMU_PMU_REG_SIZE 0x1000 /* 4KiB */ ++#define UBC_INTERRUPT_ID_START 0x1FFF ++#define UBC_INTERRUPT_ID_CNT 0x1000 ++#define VENDER_ID_HUAWEI 0xCC08 ++ ++#endif +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +new file mode 100644 +index 0000000000..4e3ed8a919 +--- /dev/null ++++ b/include/hw/ub/ub.h +@@ -0,0 +1,68 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_H ++#define UB_H ++#include ++#include "qemu/typedefs.h" ++#include "exec/memory.h" ++ ++#define BYTE_SIZE 1 ++#define WORD_SIZE 2 ++#define DWORD_SIZE 4 ++ ++static inline void ub_set_byte(uint8_t *config, uint8_t val) ++{ ++ *config = val; ++} ++ ++static inline uint8_t ub_get_byte(const uint8_t *config) ++{ ++ return *config; ++} ++ ++static inline void ub_set_word(uint8_t *config, uint16_t val) ++{ ++ stw_le_p(config, val); ++} ++ ++static inline uint16_t ub_get_word(const uint8_t *config) ++{ ++ return lduw_le_p(config); ++} ++ ++static inline void ub_set_long(uint8_t *config, uint32_t val) ++{ ++ stl_le_p(config, val); ++} ++ ++static inline uint32_t ub_get_long(const uint8_t *config) ++{ ++ return ldl_le_p(config); ++} ++ ++static inline void ub_set_quad(uint8_t *config, uint64_t val) ++{ ++ stq_le_p(config, val); ++} ++ ++static inline uint64_t ub_get_quad(const uint8_t *config) ++{ ++ return ldq_le_p(config); ++} ++ ++#endif +diff --git a/include/hw/ub/ub_ubc.h b/include/hw/ub/ub_ubc.h +new file mode 100644 +index 0000000000..5e791fbcf3 +--- /dev/null ++++ b/include/hw/ub/ub_ubc.h +@@ -0,0 +1,47 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_UBC_H ++#define UB_UBC_H ++ ++#include "hw/sysbus.h" ++#include "qom/object.h" ++ ++#define TYPE_BUS_CONTROLLER "ub-bus-controller" ++OBJECT_DECLARE_TYPE(BusControllerState, BusControllerClass, BUS_CONTROLLER) ++ ++typedef struct BusControllerState BusControllerState; ++struct BusControllerState { ++ SysBusDevice busdev; ++ ++ MemoryRegion msgq_reg_mem; /* ubc msgq */ ++ uint32_t msgq_reg_size; ++ uint8_t *msgq_reg; ++ MemoryRegion fm_msgq_reg_mem; /* fm msgq */ ++ uint32_t fm_msgq_reg_size; ++ uint8_t *fm_msgq_reg; ++ MemoryRegion io_mmio; /* ub mmio hpa memory region */ ++ uint32_t mmio_size; ++ bool mig_enabled; ++ QLIST_ENTRY(BusControllerState) node; ++}; ++ ++struct BusControllerClass { ++ SysBusDeviceClass parent_class; ++}; ++ ++#endif +-- +2.33.0 + diff --git a/ub-add-bus-instance-verify-for-vfio-ub-prepare.patch b/ub-add-bus-instance-verify-for-vfio-ub-prepare.patch new file mode 100644 index 0000000000000000000000000000000000000000..d54e51a16da172ca82f8274ee306f92fbd46ffa4 --- /dev/null +++ b/ub-add-bus-instance-verify-for-vfio-ub-prepare.patch @@ -0,0 +1,143 @@ +From 9d95b63b82a727fb5feeb2e8132ff9e829976c24 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 14 Nov 2025 09:36:20 +0800 +Subject: [PATCH 3/5] ub: add bus instance verify for vfio-ub prepare + +ub device used by guest through vfio should bind to bus instance, +this prepare for bus instance check when use vfio ub device + +Signed-off-by: caojinhuahw +--- + hw/ub/ub.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 88 insertions(+) + +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 6d42abfe27..fbfedb6368 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -34,6 +34,7 @@ + #include "hw/ub/ub_ubc.h" + #include "migration/vmstate.h" + #include "exec/address-spaces.h" ++#include "hw/ub/ubus_instance.h" + #include "monitor/monitor.h" + #include "trace.h" + +@@ -474,6 +475,70 @@ static void do_ub_unregister_device(UBDevice *ub_dev) + ub_port_info_free(ub_dev); + } + ++static uint32_t ub_get_host_bus_instance_eid(UbGuid *guid) ++{ ++ uint32_t bus_instance_eid; ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ int bus_instance_type; ++ ++ ub_device_get_str_from_guid(guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ bus_instance_eid = sysfs_get_bus_instance_eid_by_guid(guid); ++ if (bus_instance_eid == UINT32_MAX) { ++ qemu_log("sysfs failed to get bus instance eid by guid %s\n", guid_str); ++ return UINT32_MAX; ++ } ++ ++ bus_instance_type = sysfs_get_bus_instance_type_by_eid(bus_instance_eid); ++ if (!UBUS_INSTANCE_IS_DYNAMIC(bus_instance_type)) { ++ qemu_log("bus instance(guid: %s) not dynamic bus instance.\n", guid_str); ++ return UINT32_MAX; ++ } ++ ++ return bus_instance_eid; ++} ++ ++/* current this just for vfio ub dev host bus instance verify */ ++static int ub_dev_bus_instance_verify(UBDevice *dev, Error **errp) ++{ ++ BusControllerState *ubc = QLIST_FIRST(&ub_bus_controllers); ++ BusControllerDev *ub_bus_controller_dev = NULL; ++ UBDevice *ubc_dev = NULL; ++ uint32_t bus_instance_eid; ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ ++ if (!ubc) { ++ qemu_log("failed to get ub bus controller, bus instance verify later.\n"); ++ return 0; ++ } ++ ++ ub_bus_controller_dev = ubc->ubc_dev; ++ ++ if (!ub_bus_controller_dev) { ++ qemu_log("ub controller dev not realized, bus instance verify later.\n"); ++ return 0; ++ } ++ ++ ubc_dev = &ub_bus_controller_dev->parent; ++ ++ if (ubc_dev->bus_instance_eid == UINT32_MAX) { ++ bus_instance_eid = ub_get_host_bus_instance_eid(&ub_bus_controller_dev->bus_instance_guid); ++ if (bus_instance_eid == UINT32_MAX) { ++ error_setg(errp, "failed to get bus instance eid.\n"); ++ return -1; ++ } ++ ubc_dev->bus_instance_eid = bus_instance_eid; ++ } ++ ++ if (ubc_dev->bus_instance_eid != dev->bus_instance_eid) { ++ ub_device_get_str_from_guid(&dev->guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ error_setg(errp, "ub dev(guid: %s) bus instance eid verify failed: expect 0x%x, actual 0x%x\n", ++ guid_str, ubc_dev->bus_instance_eid, dev->bus_instance_eid); ++ return -1; ++ } ++ ++ return 0; ++} ++ + static void ub_qdev_realize(DeviceState *qdev, Error **errp) + { + UBDevice *ub_dev = (UBDevice *)qdev; +@@ -490,6 +555,7 @@ static void ub_qdev_realize(DeviceState *qdev, Error **errp) + return; + } + ++ ub_dev->bus_instance_verify = ub_dev_bus_instance_verify; + if (uc->realize) { + uc->realize(ub_dev, &local_err); + if (local_err) { +@@ -916,6 +982,24 @@ uint32_t ub_interrupt_id(UBDevice *udev) + return cfg1_int_cap->interrupt_id; + } + ++static int ub_bus_instance_verify(Error **errp) ++{ ++ BusControllerState *ubc = QLIST_FIRST(&ub_bus_controllers); ++ UBDevice *dev = NULL; ++ ++ QLIST_FOREACH(dev, &ubc->bus->devices, node) { ++ if (dev->dev_type == UB_TYPE_IBUS_CONTROLLER || ++ dev->bus_instance_eid == UINT32_MAX) { ++ continue; ++ } ++ ++ if (ub_dev_bus_instance_verify(dev, errp)) { ++ return -1; ++ } ++ } ++ return 0; ++} ++ + /* + * now all ub device add, finally setup for all ub device. + * 1. check ub device bus instance type +@@ -923,6 +1007,10 @@ uint32_t ub_interrupt_id(UBDevice *udev) + * */ + int ub_dev_finally_setup(VirtMachineState *vms, Error **errp) + { ++ if (ub_bus_instance_verify(errp)) { ++ return -1; ++ } ++ + /* + * Initialize the port information of all UB devices according + * to the input information after all UB devices are constructed. +-- +2.33.0 + diff --git a/ub-add-function-for-later-alloc-idev-ers-addr.patch b/ub-add-function-for-later-alloc-idev-ers-addr.patch new file mode 100644 index 0000000000000000000000000000000000000000..2c8f30333f28bd7c1909e494ff90d8e0c0c3befb --- /dev/null +++ b/ub-add-function-for-later-alloc-idev-ers-addr.patch @@ -0,0 +1,247 @@ +From f953a105abfd3b7e9e1a695d9e51333c0cca68a3 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 22:19:12 +0800 +Subject: [PATCH 2/5] ub: add function for later alloc idev ers addr + +ers addr is allocate by qemu when idev used by guest through +vfio, this prepre for later idev ers addr allocate + +Signed-off-by: caojinhuahw +--- + hw/ub/ub_acpi.c | 192 ++++++++++++++++++++++++++++++++++++++++ + include/hw/ub/ub_acpi.h | 2 + + 2 files changed, 194 insertions(+) + +diff --git a/hw/ub/ub_acpi.c b/hw/ub/ub_acpi.c +index f9e38a2da3..4f4c77b869 100644 +--- a/hw/ub/ub_acpi.c ++++ b/hw/ub/ub_acpi.c +@@ -23,6 +23,7 @@ + #include "hw/qdev-properties.h" + #include "hw/qdev-properties-system.h" + #include "hw/ub/ub.h" ++#include "hw/ub/ub_config.h" + #include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" + #include "hw/ub/ub_acpi.h" +@@ -42,6 +43,26 @@ + #define DTS_SIG_UMMU "ummu" + #define DTS_SIG_RSV_MEM "rsv_mem" + ++typedef struct UBIdevErsAddrSpaceNode { ++ uint64_t offset; ++ uint64_t allocated_offset; ++ uint64_t size; ++ ++ QTAILQ_ENTRY(UBIdevErsAddrSpaceNode) stailq_free; ++ QTAILQ_ENTRY(UBIdevErsAddrSpaceNode) stailq_used; ++} UBIdevErsAddrSpaceNode; ++ ++typedef struct UBIdevErsAddrSpaceManage { ++ bool init; ++ uint64_t size; ++ hwaddr base_addr; ++ ++ QTAILQ_HEAD(, UBIdevErsAddrSpaceNode) as_free_list; ++ QTAILQ_HEAD(, UBIdevErsAddrSpaceNode) as_used_list; ++} UBIdevErsAddrSpaceManage; ++ ++UBIdevErsAddrSpaceManage g_idevErsAddrSpaceManage; ++ + static uint8_t gpa_bits; + void ub_set_gpa_bits(uint8_t bits) + { +@@ -319,6 +340,177 @@ void ub_set_ubinfo_in_ubc_table(VirtMachineState *vms) + cpu_physical_memory_unmap(ubios, size, true, size); + } + ++static void ub_idev_ers_address_space_manage_init(void) ++{ ++ VirtMachineState *vms = (VirtMachineState *)current_machine; ++ UBIdevErsAddrSpaceNode *free_node = NULL; ++ ++ g_idevErsAddrSpaceManage.base_addr = vms->memmap[VIRT_UB_IDEV_ERS].base; ++ g_idevErsAddrSpaceManage.size = vms->memmap[VIRT_UB_IDEV_ERS].size; ++ ++ QTAILQ_INIT(&g_idevErsAddrSpaceManage.as_free_list); ++ QTAILQ_INIT(&g_idevErsAddrSpaceManage.as_used_list); ++ ++ free_node = g_new0(UBIdevErsAddrSpaceNode, 1); ++ free_node->size = g_idevErsAddrSpaceManage.size; ++ free_node->offset = 0; ++ QTAILQ_INSERT_TAIL(&g_idevErsAddrSpaceManage.as_free_list, free_node, stailq_free); ++ qemu_log("ub idev ers address space manage init success, base_addr: 0x%lx size: 0x%lx\n", ++ g_idevErsAddrSpaceManage.base_addr, g_idevErsAddrSpaceManage.size); ++} ++ ++hwaddr ub_idev_ers_alloc_address_space(uint64_t size, uint32_t sys_pgs) ++{ ++ UBIdevErsAddrSpaceNode *free_node = NULL; ++ UBIdevErsAddrSpaceNode *selected_free_node = NULL; ++ UBIdevErsAddrSpaceNode *used_node = NULL; ++ uint64_t need_node_size; ++ uint64_t free_node_base_addr; ++ uint64_t allocated_base_addr; ++ uint64_t allocated_diff; ++ ++ if (!g_idevErsAddrSpaceManage.init) { ++ g_idevErsAddrSpaceManage.init = true; ++ ub_idev_ers_address_space_manage_init(); ++ } ++ ++ /* according UB Spec, if sys_pgs 0, unit is 4Kbytes, then unit is 64Kbytes */ ++ if (!sys_pgs) { ++ size *= UB_CFG1_BASIC_SYSTEM_GRANULE_SIZE_4K; ++ } else { ++ size *= UB_CFG1_BASIC_SYSTEM_GRANULE_SIZE_64K; ++ } ++ ++ QTAILQ_FOREACH(free_node, &g_idevErsAddrSpaceManage.as_free_list, stailq_free) { ++ if (free_node->size < size) { ++ continue; ++ } ++ ++ free_node_base_addr = g_idevErsAddrSpaceManage.base_addr + free_node->offset; ++ /* allocated base addr need align to allocated size */ ++ allocated_base_addr = ALIGN_UP(free_node_base_addr, size); ++ allocated_diff = allocated_base_addr - free_node_base_addr; ++ need_node_size = allocated_diff + size; ++ if (free_node->size < need_node_size) { ++ continue; ++ } ++ ++ if (selected_free_node && selected_free_node->size < free_node->size) { ++ continue; ++ } ++ ++ selected_free_node = free_node; ++ if (!used_node) { ++ /* create used node */ ++ used_node = g_new0(UBIdevErsAddrSpaceNode, 1); ++ } ++ used_node->offset = selected_free_node->offset; ++ used_node->allocated_offset = used_node->offset + allocated_diff; ++ used_node->size = size + allocated_diff; ++ } ++ ++ if (!selected_free_node) { ++ g_free(used_node); ++ return UINT64_MAX; ++ } ++ ++ /* adjust free node */ ++ if (selected_free_node->size - size < UB_CFG1_BASIC_SYSTEM_GRANULE_SIZE_4K) { ++ used_node->size = selected_free_node->size; ++ QTAILQ_REMOVE(&g_idevErsAddrSpaceManage.as_free_list, selected_free_node, stailq_free); ++ g_free(selected_free_node); ++ } else { ++ selected_free_node->size -= used_node->size; ++ selected_free_node->offset += used_node->size; ++ } ++ ++ QTAILQ_INSERT_TAIL(&g_idevErsAddrSpaceManage.as_used_list, used_node, stailq_used); ++ ++ return allocated_base_addr; ++} ++ ++void ub_idev_ers_free_address_space(hwaddr offset) ++{ ++ UBIdevErsAddrSpaceNode *used_node = NULL; ++ UBIdevErsAddrSpaceNode *free_node = NULL; ++ UBIdevErsAddrSpaceNode *next_free_node = NULL; ++ uint64_t as_offset = offset - g_idevErsAddrSpaceManage.base_addr; ++ ++ QTAILQ_FOREACH(used_node, &g_idevErsAddrSpaceManage.as_used_list, stailq_used) { ++ if (used_node->allocated_offset == as_offset) { ++ QTAILQ_REMOVE(&g_idevErsAddrSpaceManage.as_used_list, used_node, stailq_used); ++ break; ++ } ++ } ++ ++ if (!used_node) { ++ qemu_log("idev ers address space free failed, unable to find offset 0x%lx.\n", offset); ++ return; ++ } ++ ++ /* adjust free node list */ ++ /* case 1: as free list is empty */ ++ if (QTAILQ_EMPTY(&g_idevErsAddrSpaceManage.as_free_list)) { ++ QTAILQ_INSERT_HEAD(&g_idevErsAddrSpaceManage.as_free_list, used_node, stailq_free); ++ return; ++ } ++ ++ /* case 2: freed used_node->offset is minial */ ++ free_node = QTAILQ_FIRST(&g_idevErsAddrSpaceManage.as_free_list); ++ if (used_node->offset + used_node->size < free_node->offset) { ++ QTAILQ_INSERT_HEAD(&g_idevErsAddrSpaceManage.as_free_list, used_node, stailq_free); ++ return; ++ } else if (used_node->offset + used_node->size == free_node->offset) { /* merge to first free node */ ++ free_node->offset = used_node->offset; ++ free_node->size += used_node->size; ++ g_free(used_node); ++ return; ++ } ++ ++ /* case 3: foreach all free node, insert freed address space to free node in order */ ++ QTAILQ_FOREACH(free_node, &g_idevErsAddrSpaceManage.as_free_list, stailq_free) { ++ next_free_node = QTAILQ_NEXT(free_node, stailq_free); ++ if (!next_free_node) { ++ if (free_node->offset + free_node->size < used_node->offset) { ++ QTAILQ_INSERT_TAIL(&g_idevErsAddrSpaceManage.as_free_list, used_node, stailq_free); ++ } else if (free_node->offset + free_node->size == used_node->offset) { ++ free_node->size += used_node->size; ++ g_free(used_node); ++ } ++ return; ++ } ++ ++ if (used_node->offset >= next_free_node->offset + next_free_node->size) { ++ continue; ++ } ++ ++ if (free_node->offset + free_node->size == used_node->offset && ++ used_node->offset + used_node->size < next_free_node->offset) { ++ free_node->size += used_node->size; ++ g_free(used_node); ++ return; ++ } else if (free_node->offset + free_node->size < used_node->offset && ++ used_node->offset + used_node->size == next_free_node->offset) { ++ next_free_node->offset = used_node->offset; ++ next_free_node->size += used_node->size; ++ g_free(used_node); ++ return; ++ } else if (free_node->offset + free_node->size < used_node->offset && ++ used_node->offset + used_node->size < next_free_node->offset) { ++ QTAILQ_INSERT_AFTER(&g_idevErsAddrSpaceManage.as_free_list, free_node, used_node, stailq_free); ++ return; ++ } else { ++ next_free_node->offset = free_node->offset; ++ next_free_node->size += free_node->size; ++ next_free_node->size += used_node->size; ++ QTAILQ_REMOVE(&g_idevErsAddrSpaceManage.as_free_list, free_node, stailq_free); ++ g_free(used_node); ++ g_free(free_node); ++ return; ++ } ++ } ++} ++ + void build_ubrt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) + { + /* 3 subtables: ubc, ummu, UB Reserved Memory */ +diff --git a/include/hw/ub/ub_acpi.h b/include/hw/ub/ub_acpi.h +index 3579256444..47f3950dfe 100644 +--- a/include/hw/ub/ub_acpi.h ++++ b/include/hw/ub/ub_acpi.h +@@ -175,6 +175,8 @@ typedef struct AcpiUbrtTable { + UBIOS_RSV_MEM_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT)) + + void ub_init_ubios_info_table(VirtMachineState *vms, uint64_t total_size); ++hwaddr ub_idev_ers_alloc_address_space(uint64_t size, uint32_t sys_pgs); ++void ub_idev_ers_free_address_space(hwaddr offset); + void ub_set_gpa_bits(uint8_t bits); + void build_ubrt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms); + void ub_set_ubinfo_in_ubc_table(VirtMachineState *vms); +-- +2.33.0 + diff --git a/ub-add-kvm-irqchip-usi-route-help-function.patch b/ub-add-kvm-irqchip-usi-route-help-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..c41f23b2a211c422619a7b1d35b8a3b4a53b0b77 --- /dev/null +++ b/ub-add-kvm-irqchip-usi-route-help-function.patch @@ -0,0 +1,291 @@ +From e9ef936d487ec7d235e27a6e06cc7330fc4cc3ec Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 17:03:21 +0800 +Subject: [PATCH 1/2] ub: add kvm irqchip usi route help function + +add kvm_irqchip_add_usi_route and kvm_irqchip_update_usi_route help +function + +Signed-off-by: caojinhuahw +--- + accel/kvm/kvm-all.c | 64 ++++++++++++++++++++++++++++++++++++++++++ + hw/ub/ub.c | 18 ++++++++++++ + include/hw/ub/ub.h | 22 +++++++++++++++ + include/hw/ub/ub_bus.h | 2 ++ + include/sysemu/kvm.h | 9 +++++- + target/arm/kvm.c | 41 +++++++++++++++++++++++++++ + 6 files changed, 155 insertions(+), 1 deletion(-) + +diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c +index 94634a1804..92ecbc78ff 100644 +--- a/accel/kvm/kvm-all.c ++++ b/accel/kvm/kvm-all.c +@@ -26,6 +26,10 @@ + #include "qapi/error.h" + #include "hw/pci/msi.h" + #include "hw/pci/msix.h" ++#ifdef CONFIG_UB ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_usi.h" ++#endif // CONFIG_UB + #include "hw/s390x/adapter.h" + #include "exec/gdbstub.h" + #include "sysemu/kvm_int.h" +@@ -56,6 +60,8 @@ + #ifdef CONFIG_HAM_MIGRATION + #include "migration/ham.h" + #endif ++#include "qemu/log.h" ++ + /* This check must be after config-host.h is included */ + #ifdef CONFIG_EVENTFD + #include +@@ -2045,6 +2051,64 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) + return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); + } + ++#ifdef CONFIG_UB ++int kvm_irqchip_add_usi_route(KVMRouteChange *c, USIMessage msg, ++ uint32_t devid, UBDevice *udev) ++{ ++ struct kvm_irq_routing_entry kroute = {}; ++ int virq; ++ KVMState *s = c->s; ++ ++ virq = kvm_irqchip_get_virq(s); ++ if (virq < 0) { ++ qemu_log("kvm irqchip get virq failed\n"); ++ return virq; ++ } ++ ++ kroute.gsi = virq; ++ kroute.type = KVM_IRQ_ROUTING_MSI; ++ kroute.flags = 0; ++ kroute.u.msi.address_lo = (uint32_t)msg.address; ++ kroute.u.msi.address_hi = msg.address >> 32; ++ kroute.u.msi.data = le32_to_cpu(msg.data); ++ kroute.flags = KVM_MSI_VALID_DEVID; ++ kroute.u.msi.devid = devid; ++ ++ if (udev && kvm_arch_fixup_usi_route(&kroute, msg.address, msg.data, udev)) { ++ kvm_irqchip_release_virq(s, virq); ++ return -EINVAL; ++ } ++ ++ kvm_add_routing_entry(s, &kroute); ++ c->changes++; ++ ++ return virq; ++} ++ ++int kvm_irqchip_update_usi_route(KVMRouteChange *c, int virq, USIMessage msg, UBDevice *udev) ++{ ++ struct kvm_irq_routing_entry kroute = {}; ++ ++ qemu_log("ub device(%s %s) virq(%d) start update usi route.\n", ++ udev->name, udev->qdev.id, virq); ++ kroute.gsi = virq; ++ kroute.type = KVM_IRQ_ROUTING_MSI; ++ kroute.flags = 0; ++ kroute.u.msi.address_lo = (uint32_t)msg.address; ++ kroute.u.msi.address_hi = msg.address >> 32; ++ kroute.u.msi.data = le32_to_cpu(msg.data); ++ kroute.flags = KVM_MSI_VALID_DEVID; ++ kroute.u.msi.devid = ub_interrupt_id(udev); ++ ++ if (udev && kvm_arch_fixup_usi_route(&kroute, msg.address, msg.data, udev)) { ++ qemu_log("failed to fixup usi route: addr(0x%lx) data(%u).\n", msg.address, msg.data); ++ return -EINVAL; ++ } ++ ++ return kvm_update_routing_entry(c, &kroute); ++} ++#endif // CONFIG_UB ++ + int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) + { + struct kvm_irq_routing_entry kroute = {}; +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 21481b950c..6a2c3cc493 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -625,6 +625,16 @@ BusControllerState *container_of_ubbus(UBBus *bus) + return NULL; + } + ++AddressSpace *ub_device_iommu_address_space(UBDevice *dev) ++{ ++ UBBus *bus = ub_get_bus(dev); ++ ++ if (bus->iommu_ops && bus->iommu_ops->get_address_space) { ++ return bus->iommu_ops->get_address_space(bus, bus->iommu_opaque, dev->eid); ++ } ++ return &address_space_memory; ++} ++ + UBDevice *ub_find_device_by_id(const char *id) + { + BusControllerState *ubc = NULL; +@@ -898,6 +908,14 @@ static int ub_dev_init_port_info_by_cmd(Error **errp) + return 0; + } + ++uint32_t ub_interrupt_id(UBDevice *udev) ++{ ++ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2, true); ++ UbCfg1IntType2Cap *cfg1_int_cap = (UbCfg1IntType2Cap *)(udev->config + offset); ++ ++ return cfg1_int_cap->interrupt_id; ++} ++ + /* + * now all ub device add, finally setup for all ub device. + * 1. check ub device bus instance type +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index b07cc36efd..db0a19a8bb 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -20,6 +20,7 @@ + #include + #include "qemu/typedefs.h" + #include "exec/memory.h" ++#include "sysemu/host_iommu_device.h" + #include "hw/arm/virt.h" + + #define BYTE_SIZE 1 +@@ -181,6 +182,25 @@ typedef struct UBDeviceClass { + DECLARE_OBJ_CHECKERS(UBDevice, UBDeviceClass, + UB_DEVICE, TYPE_UB_DEVICE) + ++typedef struct UBIOMMUOps { ++ /** ++ * @get_address_space: get the address space for a set of devices ++ * on a UB bus. ++ * ++ * Mandatory callback which returns a pointer to an #AddressSpace ++ * ++ * @bus: the #UBBus being accessed. ++ * ++ * @opaque: the data passed to ub_setup_iommu(). ++ * ++ * @eid: ub device eid ++ */ ++ AddressSpace * (*get_address_space)(UBBus *bus, void *opaque, uint32_t eid); ++ bool (*set_iommu_device)(UBBus *bus, void *opaque, uint32_t eid, ++ HostIOMMUDevice *dev, Error **errp); ++ void (*unset_iommu_device)(UBBus *bus, void *opaque, uint32_t eid); ++ bool (*ummu_is_nested)(void *opaque); ++} UBIOMMUOps; + + static inline void ub_set_byte(uint8_t *config, uint8_t val) + { +@@ -232,5 +252,7 @@ static inline uint64_t ub_config_size(void) + { + return UB_DEV_CONFIG_SPACE_TOTAL_SIZE; + } ++AddressSpace *ub_device_iommu_address_space(UBDevice *dev); + UBDevice *ub_find_device_by_id(const char *id); ++uint32_t ub_interrupt_id(UBDevice *udev); + #endif +diff --git a/include/hw/ub/ub_bus.h b/include/hw/ub/ub_bus.h +index 58baea4efb..189dbf8785 100644 +--- a/include/hw/ub/ub_bus.h ++++ b/include/hw/ub/ub_bus.h +@@ -41,6 +41,8 @@ struct UBBus { + BusState qbus; + UBDeviceList devices; + MemoryRegion *address_space_mem; ++ const UBIOMMUOps *iommu_ops; ++ void *iommu_opaque; + }; + + #define TYPE_UB_BUS "UB_BUS" +diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h +index de68df91a3..4669c070d6 100644 +--- a/include/sysemu/kvm.h ++++ b/include/sysemu/kvm.h +@@ -411,7 +411,10 @@ void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); + #endif + + void kvm_arch_init_irq_routing(KVMState *s); +- ++#ifdef CONFIG_UB ++int kvm_arch_fixup_usi_route(struct kvm_irq_routing_entry *route, ++ uint64_t address, uint32_t data, UBDevice *dev); ++#endif // CONFIG_UB + int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data, PCIDevice *dev); + +@@ -513,6 +516,10 @@ int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev); + int kvm_irqchip_update_msi_route(KVMRouteChange *c, int virq, MSIMessage msg, + PCIDevice *dev); + void kvm_irqchip_commit_routes(KVMState *s); ++#ifdef CONFIG_UB ++int kvm_irqchip_add_usi_route(KVMRouteChange *c, USIMessage msg, uint32_t devid, UBDevice *udev); ++int kvm_irqchip_update_usi_route(KVMRouteChange *c, int virq, USIMessage msg, UBDevice *udev); ++#endif // CONFIG_UB + + static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s) + { +diff --git a/target/arm/kvm.c b/target/arm/kvm.c +index 50f22717ec..1bb6e332e7 100644 +--- a/target/arm/kvm.c ++++ b/target/arm/kvm.c +@@ -36,6 +36,9 @@ + #include "hw/irq.h" + #include "qapi/visitor.h" + #include "qemu/log.h" ++#ifdef CONFIG_UB ++#include "hw/ub/ub.h" ++#endif // CONFIG_UB + + /* + * SMMCC KVM Vendor hypercall definitions. +@@ -1402,6 +1405,44 @@ int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level) + return kvm_set_irq(kvm_state, kvm_irq, !!level); + } + ++#ifdef CONFIG_UB ++int kvm_arch_fixup_usi_route(struct kvm_irq_routing_entry *route, ++ uint64_t address, uint32_t data, UBDevice *dev) ++{ ++ AddressSpace *as = ub_device_iommu_address_space(dev); ++ hwaddr xlat, len, doorbell_gpa; ++ MemoryRegionSection mrs; ++ MemoryRegion *mr; ++ ++ if (as == &address_space_memory) { ++ return 0; ++ } ++ ++ /* USI doorbell address is translated by an IOMMU */ ++ RCU_READ_LOCK_GUARD(); ++ mr = address_space_translate(as, address, &xlat, &len, true, ++ MEMTXATTRS_UNSPECIFIED); ++ if (!mr) { ++ qemu_log("address space translate address(0x%lx) failed.\n", address); ++ return 1; ++ } ++ ++ mrs = memory_region_find(mr, xlat, 1); ++ if (!mrs.mr) { ++ qemu_log("mr failed to find mrs.\n"); ++ return 1; ++ } ++ ++ doorbell_gpa = mrs.offset_within_address_space; ++ memory_region_unref(mrs.mr); ++ qemu_log("IOVA(0x%lx) trans to GPA(0x%lx) by iommu success.\n", address, doorbell_gpa); ++ route->u.msi.address_lo = doorbell_gpa; ++ route->u.msi.address_hi = doorbell_gpa >> 32; ++ ++ return 0; ++} ++#endif // CONFIG_UB ++ + int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data, PCIDevice *dev) + { +-- +2.33.0 + diff --git a/ub-add-lock-for-bus-instance.patch b/ub-add-lock-for-bus-instance.patch new file mode 100644 index 0000000000000000000000000000000000000000..73118ea95d094e35a0f0aabb976a2f7c70af9add --- /dev/null +++ b/ub-add-lock-for-bus-instance.patch @@ -0,0 +1,210 @@ +From 508b115a45d126222ad87924012a3e00d411f3b1 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 21:18:30 +0800 +Subject: [PATCH 6/6] ub: add lock for bus instance + +bus instances must not be duplicated among virtual machines, and this +duplication is prevented through file lock + +Signed-off-by: caojinhuahw +--- + hw/ub/ub_common.c | 13 ++++++- + hw/ub/ub_ubc.c | 72 +++++++++++++++++++++++++++++++++++ + include/hw/ub/ub_common.h | 2 + + include/hw/ub/ubus_instance.h | 40 +++++++++++++++++++ + 4 files changed, 126 insertions(+), 1 deletion(-) + create mode 100644 include/hw/ub/ubus_instance.h + +diff --git a/hw/ub/ub_common.c b/hw/ub/ub_common.c +index 3f8dff2a45..368d420463 100644 +--- a/hw/ub/ub_common.c ++++ b/hw/ub/ub_common.c +@@ -86,4 +86,15 @@ uint32_t fill_cq(BusControllerState *s, HiMsgCqe *cqe) + ub_set_long(s->msgq_reg + CQ_PI, ++pi % depth); + + return pi; +-} +\ No newline at end of file ++} ++ ++bool ub_guid_is_none(UbGuid *guid) ++{ ++ if (guid->seq_num == 0 && ++ guid->device_id == 0 && guid->version == 0 && ++ guid->type == 0 && guid->vendor == 0) { ++ return true; ++ } ++ ++ return false; ++} +diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c +index 0d5a31a22a..0fc5255c05 100644 +--- a/hw/ub/ub_ubc.c ++++ b/hw/ub/ub_ubc.c +@@ -32,6 +32,7 @@ + #include "hw/ub/hisi/ub_mem.h" + #include "hw/ub/hisi/ub_fm.h" + #include "migration/vmstate.h" ++#include "hw/ub/ubus_instance.h" + + static uint64_t ub_msgq_reg_read(void *opaque, hwaddr addr, unsigned len) + { +@@ -157,6 +158,7 @@ static void ub_bus_controller_realize(DeviceState *dev, Error **errp) + g_free(name); + } + ++static void ub_bus_instance_guid_unlock(BusControllerDev *ubc_dev); + static void ub_bus_controller_unrealize(DeviceState *dev) + { + BusControllerState *s = BUS_CONTROLLER(dev); +@@ -165,6 +167,7 @@ static void ub_bus_controller_unrealize(DeviceState *dev) + QLIST_REMOVE(s, node); + ub_unregister_root_bus(s->bus); + ub_reg_free(dev); ++ ub_bus_instance_guid_unlock(s->ubc_dev); + } + + static bool ub_bus_controller_needed(void *opaque) +@@ -409,6 +412,68 @@ static bool ub_ubc_is_empty(UBBus *bus) + return true; + } + ++#define UB_BUSINSTANCE_GUID_LOCK_DIR "/run/libvirt/qemu" ++ ++static int ub_bus_instance_guid_lock(UbGuid *guid) ++{ ++ char path[256] = {0}; ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ int lock_fd; ++ ++ ub_device_get_str_from_guid(guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ snprintf(path, sizeof(path), "%s/ub-bus-instance-%s.lock", UB_BUSINSTANCE_GUID_LOCK_DIR, guid_str); ++ lock_fd = open(path, O_RDONLY | O_CREAT, 0600); ++ if (lock_fd < 0) { ++ qemu_log("failed to open lock file %s: %s\n", path, strerror(errno)); ++ return -1; ++ } ++ ++ if (flock(lock_fd, LOCK_EX | LOCK_NB)) { ++ qemu_log("lock %s failed: %s\n", path, strerror(errno)); ++ close(lock_fd); ++ return -1; ++ } ++ ++ return lock_fd; ++} ++ ++static void ub_bus_instance_guid_unlock(BusControllerDev *ubc_dev) ++{ ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ ++ if (ubc_dev->bus_instance_lock_fd <= 0) { ++ return; ++ } ++ ++ ub_device_get_str_from_guid(&ubc_dev->bus_instance_guid, guid_str, ++ UB_DEV_GUID_STRING_LENGTH + 1); ++ qemu_log("unlock ub bus instance lock for guid: %s\n", guid_str); ++ if (flock(ubc_dev->bus_instance_lock_fd, LOCK_UN)) { ++ qemu_log("failed to unlock for bus instance guid %s: %s\n", ++ guid_str, strerror(errno)); ++ } ++ close(ubc_dev->bus_instance_lock_fd); ++} ++ ++static int ub_bus_instance_process(BusControllerDev *ubc_dev, Error **errp) ++{ ++ int lock_fd; ++ ++ if (ub_guid_is_none(&ubc_dev->bus_instance_guid)) { ++ error_setg(errp, "ubc bus instance guid is required"); ++ return -1; ++ } ++ ++ lock_fd = ub_bus_instance_guid_lock(&ubc_dev->bus_instance_guid); ++ if (lock_fd < 0) { ++ error_setg(errp, "ubc bus instance guid lock failed, it may used by other vm"); ++ return -1; ++ } ++ ++ ubc_dev->bus_instance_lock_fd = lock_fd; ++ return 0; ++} ++ + static void ub_bus_controller_dev_realize(UBDevice *dev, Error **errp) + { + UBBus *bus = UB_BUS(qdev_get_parent_bus(DEVICE(dev))); +@@ -437,6 +502,13 @@ static void ub_bus_controller_dev_realize(UBDevice *dev, Error **errp) + if (0 > ummu_associating_with_ubc(ubc)) { + qemu_log("failed to associating ubc with ummu. %s\n", dev->name); + } ++ ++ qemu_log("set type UB_TYPE_CONTROLLER, ubc %p, " ++ "ubc->ubc_dev %p, bus %p\n", ubc, ubc->ubc_dev, bus); ++ if (ub_bus_instance_process(ubc->ubc_dev, errp)) { ++ qemu_log("ub bus instance process failed\n"); ++ return; ++ } + } + + static Property ub_bus_controller_dev_properties[] = { +diff --git a/include/hw/ub/ub_common.h b/include/hw/ub/ub_common.h +index 840052931e..440a5bacdf 100644 +--- a/include/hw/ub/ub_common.h ++++ b/include/hw/ub/ub_common.h +@@ -416,4 +416,6 @@ typedef struct MsgPktHeader { /* TODO, check byte order */ + uint32_t fill_rq(BusControllerState *s, void *rsp, uint32_t rsp_size); + uint32_t fill_cq(BusControllerState *s, HiMsgCqe *cqe); + ++bool ub_guid_is_none(UbGuid *guid); ++ + #endif +diff --git a/include/hw/ub/ubus_instance.h b/include/hw/ub/ubus_instance.h +new file mode 100644 +index 0000000000..deb6353ed8 +--- /dev/null ++++ b/include/hw/ub/ubus_instance.h +@@ -0,0 +1,40 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_BUS_INSTANCE_H ++#define UB_BUS_INSTANCE_H ++ ++#define UBUS_INSTANCE_UNKNOW (-1) ++#define UBUS_INSTANCE_STATIC_SERVER 0 ++#define UBUS_INSTANCE_STATIC_CLUSTER 1 ++#define UBUS_INSTANCE_DYNAMIC_SERVER 2 ++#define UBUS_INSTANCE_DYNAMIC_CLUSTER 3 ++ ++#define UBUS_INSTANCE_IS_STATIC_SERVER(type) (type == UBUS_INSTANCE_STATIC_SERVER) ++#define UBUS_INSTANCE_IS_STATIC_CLUSTER(type) (type == UBUS_INSTANCE_STATIC_CLUSTER) ++#define UBUS_INSTANCE_IS_DYNAMIC_SERVER(type) (type == UBUS_INSTANCE_DYNAMIC_SERVER) ++#define UBUS_INSTANCE_IS_DYNAMIC_CLUSTER(type) (type == UBUS_INSTANCE_DYNAMIC_CLUSTER) ++#define UBUS_INSTANCE_IS_STATIC(type) \ ++ (UBUS_INSTANCE_IS_STATIC_SERVER(type) || UBUS_INSTANCE_IS_STATIC_CLUSTER(type)) ++#define UBUS_INSTANCE_IS_DYNAMIC(type) \ ++ (UBUS_INSTANCE_IS_DYNAMIC_SERVER(type) || UBUS_INSTANCE_IS_DYNAMIC_CLUSTER(type)) ++#define UBUS_INSTANCE_IS_SERVER(type) \ ++ (UBUS_INSTANCE_IS_STATIC_SERVER(type) || UBUS_INSTANCE_IS_DYNAMIC_SERVER(type)) ++#define UBUS_INSTANCE_IS_CLUSTER(type) \ ++ (UBUS_INSTANCE_IS_STATIC_CLUSTER(type) || UBUS_INSTANCE_IS_DYNAMIC_CLUSTER(type)) ++ ++#endif +-- +2.33.0 + diff --git a/ub-add-mem-reserve-help-function.patch b/ub-add-mem-reserve-help-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..04ef1d5eccfce857efcd7b1f7d1ed49425c66301 --- /dev/null +++ b/ub-add-mem-reserve-help-function.patch @@ -0,0 +1,64 @@ +From 45dfd6c97c281969ee9339db56a971fa9e64888c Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 11:47:49 +0800 +Subject: [PATCH 4/7] ub: add mem reserve help function + +add help function for mem reserve, this prepare for acpi report later + +Signed-off-by: caojinhuahw +--- + include/exec/memory.h | 6 ++++++ + system/memory.c | 20 ++++++++++++++++++++ + 2 files changed, 26 insertions(+) + +diff --git a/include/exec/memory.h b/include/exec/memory.h +index c5edf864e1..89d676e049 100644 +--- a/include/exec/memory.h ++++ b/include/exec/memory.h +@@ -269,6 +269,12 @@ extern uint64_t virtcca_cvm_gpa_start; + + extern uint64_t virtcca_cvm_ram_size; + ++void memory_region_add_reservation_with_ram(MemoryRegion *mr, ++ Object *owner, ++ const char *name, ++ hwaddr offset, ++ uint64_t size); ++ + static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, + IOMMUNotifierFlag flags, + hwaddr start, hwaddr end, +diff --git a/system/memory.c b/system/memory.c +index bf331d0e7b..e663467aa0 100644 +--- a/system/memory.c ++++ b/system/memory.c +@@ -3727,6 +3727,26 @@ void memory_region_init_rom_device(MemoryRegion *mr, + vmstate_register_ram(mr, owner_dev); + } + ++void memory_region_add_reservation_with_ram(MemoryRegion *mr, ++ Object *owner, ++ const char *name, ++ hwaddr offset, ++ uint64_t size) ++{ ++ Error *local_err = NULL; ++ uint32_t ram_flags = 0; ++ MemoryRegion *resved = g_malloc(sizeof(*resved)); ++ char *mrname = g_strdup_printf("%s-reservedmemory", name ? name : "(none)"); ++ ++ memory_region_init_ram_flags_nomigrate(resved, owner, mrname, size, ram_flags, &local_err); ++ ++ memory_region_add_subregion(mr, offset, resved); ++ g_free(mrname); ++ if (local_err) { ++ error_report_err(local_err); ++ } ++} ++ + /* + * Support system builds with CONFIG_FUZZ using a weak symbol and a stub for + * the fuzz_dma_read_cb callback +-- +2.33.0 + diff --git a/ub-add-qapi-query-ub-for-get-ub-info.patch b/ub-add-qapi-query-ub-for-get-ub-info.patch new file mode 100644 index 0000000000000000000000000000000000000000..398fa5a2d94b657368bf1bcb21fbec3c230e1dce --- /dev/null +++ b/ub-add-qapi-query-ub-for-get-ub-info.patch @@ -0,0 +1,207 @@ +From 4e580efbaf3e09994f39356268647d76fa1bddaf Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 14 Nov 2025 15:41:33 +0800 +Subject: [PATCH 4/7] ub: add qapi query-ub for get ub info + +support qapi query-ub for get ub info + +Signed-off-by: caojinhuahw +--- + hw/ub/ub.c | 59 ++++++++++++++++++++++++++++++++ + qapi/meson.build | 5 +++ + qapi/qapi-schema.json | 1 + + qapi/ub.json | 79 +++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 144 insertions(+) + create mode 100644 qapi/ub.json + +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 23e1279cf4..6377005d9f 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -34,6 +34,10 @@ + #include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" + #include "migration/vmstate.h" ++#include "qapi/qapi-commands-ub.h" ++#include "qapi/error.h" ++#include "qapi/util.h" ++#include "qapi/qmp/qstring.h" + #include "exec/address-spaces.h" + #include "hw/ub/ubus_instance.h" + #include "monitor/monitor.h" +@@ -782,6 +786,61 @@ bool ub_device_get_guid_from_str(UbGuid *guid, char *guid_str) + return true; + } + ++static UBDeviceInfo *qmp_query_ub_device(UBDevice *dev) ++{ ++ UBDeviceInfo *info; ++ /* will be freed by qmp framework */ ++ char *guid_str = g_malloc0(UB_DEV_GUID_STRING_LENGTH + 1); ++ ++ info = g_new0(UBDeviceInfo, 1); ++ info->bi = dev->bus_instance_eid; ++ info->eid = dev->eid; ++ info->type = dev->dev_type; ++ info->name = g_strdup(dev->name); ++ info->id = g_strdup(dev->qdev.id); ++ info->cna = dev->cna; ++ info->feidx = dev->ue_idx; ++ ub_device_get_str_from_guid(&dev->guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ info->guid = guid_str; ++ info->ports = dev->port.port_num; ++ info->usis = dev->usi_entries_nr; ++ return info; ++} ++ ++static UBDeviceInfoList *qmp_query_ub_devices(UBBus *bus) ++{ ++ UBDeviceInfoList *head = NULL, **tail = &head; ++ UBDevice *dev; ++ ++ QLIST_FOREACH(dev, &bus->devices, node) { ++ QAPI_LIST_APPEND(tail, qmp_query_ub_device(dev)); ++ } ++ ++ return head; ++} ++ ++static UBInfo *qmp_query_ub_bus(UBBus *bus) ++{ ++ UBInfo *info = NULL; ++ info = g_malloc0(sizeof(*info)); ++ info->devices = qmp_query_ub_devices(bus); ++ ++ return info; ++} ++ ++UBInfoList *qmp_query_ub(Error **errp) ++{ ++ UBInfoList *head = NULL, **tail = &head; ++ BusControllerState *ubc = NULL; ++ ++ QLIST_FOREACH(ubc, &ub_bus_controllers, node) { ++ QAPI_LIST_APPEND(tail, ++ qmp_query_ub_bus(ubc->bus)); ++ } ++ ++ return head; ++} ++ + /* container_of cannot be used here because 'bus' is a pointer member. */ + BusControllerState *container_of_ubbus(UBBus *bus) + { +diff --git a/qapi/meson.build b/qapi/meson.build +index f81a37565c..7cf176eb6e 100644 +--- a/qapi/meson.build ++++ b/qapi/meson.build +@@ -66,6 +66,11 @@ if have_system + 'tpm', + ] + endif ++if have_ub ++ qapi_all_modules += [ ++ 'ub', ++ ] ++endif + if have_system or have_tools + qapi_all_modules += [ + 'ui', +diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json +index c01ec335e6..457e2190c4 100644 +--- a/qapi/qapi-schema.json ++++ b/qapi/qapi-schema.json +@@ -80,3 +80,4 @@ + { 'include': 'virtio.json' } + { 'include': 'cryptodev.json' } + { 'include': 'cxl.json' } ++{ 'include': 'ub.json' } +diff --git a/qapi/ub.json b/qapi/ub.json +new file mode 100644 +index 0000000000..bd74b46a9c +--- /dev/null ++++ b/qapi/ub.json +@@ -0,0 +1,79 @@ ++# -*- Mode: Python -*- ++# vim: filetype=python ++# ++# This work is licensed under the terms of the GNU GPL, version 2 or later. ++# See the COPYING file in the top-level directory. ++# SPDX-License-Identifier: GPL-2.0-or-later ++ ++## ++# = UB ++## ++ ++ ++## ++# @UBDeviceInfo: ++# ++# Information about a UB device ++# ++# @bi: the bus instance of the device ++# ++# @eid: the eid of the device ++# ++# @name: the name of the device ++# ++# @id: the id of the device ++# ++# @type: the type of the device ++# ++# @cna: the cna of the device ++# ++# @feidx: the FE Index of the device ++# ++# @guid: the guid of the device ++# ++# @usis: the usi entries num of the device ++# ++# @ports: the total ports of the device ++# ++# Since: 0.14 ++# ++## ++ ++{ 'struct': 'UBDeviceInfo', ++ 'data': {'bi': 'int', 'eid': 'int', 'name': 'str', 'id':'str', 'type':'int', ++ 'cna':'int', 'feidx':'int', 'guid':'str', 'usis':'int', 'ports':'int'}} ++ ++ ++ ++## ++# @UBInfo: ++# ++# Information about a UB bus ++# ++# @bus: the bus index ++# ++# @devices: a list of devices on this bus ++# ++# Since: 0.14 ++# ++## ++ ++{ 'struct': 'UBInfo', 'data': {'bus': 'int', 'devices': ['UBDeviceInfo']}} ++ ++## ++# @query-ub: ++# ++# Return information about the UB bus topology of the guest. ++# ++# Features: ++# ++# @unstable: This command is meant for debugging. ++# ++# Returns: a list of @UBInfo for each UB bus. Each bus is ++# represented by a json-object, which has a key with a json-array of ++# all PCI devices attached to it. Each device is represented by a ++# json-object. ++## ++{ 'command': 'query-ub', 'returns': ['UBInfo'], ++ 'if': 'CONFIG_UB', ++ 'features': [ 'unstable' ] } +\ No newline at end of file +-- +2.33.0 + diff --git a/ub-add-some-common-function-for-later-vfio-ub-realiz.patch b/ub-add-some-common-function-for-later-vfio-ub-realiz.patch new file mode 100644 index 0000000000000000000000000000000000000000..69b7cccad601cc6c0d80cb6b019e0b8a2c079ab5 --- /dev/null +++ b/ub-add-some-common-function-for-later-vfio-ub-realiz.patch @@ -0,0 +1,355 @@ +From c5c096482d18dbedad5143ee71834553e19fce79 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 21:44:24 +0800 +Subject: [PATCH 1/5] ub: add some common function for later vfio-ub realize + +this is prepare for later vfio ub device realize + +Signed-off-by: caojinhuahw +--- + hw/ub/ub_common.c | 298 ++++++++++++++++++++++++++++++++++++++ + include/hw/ub/ub_common.h | 8 +- + 2 files changed, 305 insertions(+), 1 deletion(-) + +diff --git a/hw/ub/ub_common.c b/hw/ub/ub_common.c +index 368d420463..f8ef12fb02 100644 +--- a/hw/ub/ub_common.c ++++ b/hw/ub/ub_common.c +@@ -19,6 +19,7 @@ + #include "qemu/log.h" + #include "qapi/error.h" + #include "hw/ub/ub_common.h" ++#include "hw/ub/ubus_instance.h" + #include "sysemu/dma.h" + + /* tmp for vfio-ub run with stub, remove later */ +@@ -88,6 +89,280 @@ uint32_t fill_cq(BusControllerState *s, HiMsgCqe *cqe) + return pi; + } + ++#define UB_MEM_DUMP_MAX_STR_LEN 4096 ++#define UB_MEM_DUMP_COLUMN 4 ++#define UB_MEM_DUMP_WIDTH 36 ++#define UB_MEM_DUMP_MAX_BYTES 2048 ++#define UB_HEXDUMP_TITLE " ↓0x0 ↓0x4 ↓0x8 ↓0xC\n" ++int ub_hexdump(void *data, int offset, int len, char *buff, int buff_size) ++{ ++ size_t l = 0; ++ size_t tmp; ++ int dw = len / sizeof(uint32_t) + !!(len % sizeof(uint32_t)); ++ int dw_round_up = ROUND_UP(dw, UB_MEM_DUMP_COLUMN); ++ int i; ++ void *real_data = data + offset; ++ char str_addr[64] = {0}; ++ int width; ++ bool last_line_all_0 = 1; ++ int cnt_line_all_0 = 0; ++ size_t last_line_cnt_character = 0; ++ g_autofree char *line = line_generator(UB_MEM_DUMP_WIDTH); ++ g_autofree char *line_head = g_strdup_printf("┌%s┐", line); ++ g_autofree char *line_tail = g_strdup_printf("└%s┘", line); ++ g_autofree char *line_zero = g_strdup_printf("%-*s", ++ UB_MEM_DUMP_WIDTH + 3, "│"); ++ ++ if (!line || !line_head || !line_tail || !line_zero) { ++ qemu_log("failed to alloc mem %p %p %p %p\n", ++ line, line_head, line_tail, line_zero); ++ return -1; ++ } ++ ++ if (buff_size < strlen(line_head) + strlen(line_tail) + ++ strlen(UB_HEXDUMP_TITLE) + dw_round_up * 8) { ++ qemu_log("buff too small %d %d %ld\n", ++ buff_size, len, strlen(line_head) + strlen(line_tail) + ++ strlen(UB_HEXDUMP_TITLE) + dw_round_up * 8); ++ return -1; ++ } ++ snprintf(str_addr, sizeof(str_addr), "0x%x", offset + len); ++ width = (int)strlen(str_addr); ++ l += snprintf(buff + l, buff_size - l, "\n%*s%s", ++ width, " ", UB_HEXDUMP_TITLE); ++ l += snprintf(buff + l, buff_size - l, "%*s%s", ++ width, " ", line_head); ++ for (i = 0; i < dw_round_up; i++) { ++ if (i >= dw) { ++ l += snprintf(buff + l, buff_size - l, " %8s", " "); ++ } else { ++ if ((i % UB_MEM_DUMP_COLUMN) != 0) { ++ tmp = snprintf(buff + l, buff_size - l, " %.8x", ++ *((uint32_t *)real_data + i)); ++ l += tmp; ++ last_line_all_0 &= !(*((uint32_t *)real_data + i)); ++ last_line_cnt_character += tmp; ++ } else { ++ if (last_line_all_0 && last_line_cnt_character) { ++ cnt_line_all_0++; ++ if (cnt_line_all_0 == 2) { ++ l -= last_line_cnt_character; ++ l += snprintf(buff + l, buff_size - l, ++ "│\n%*s%s", width, "...", ++ line_zero); ++ } else if (cnt_line_all_0 > 2) { ++ l -= last_line_cnt_character; ++ } ++ } else { ++ cnt_line_all_0 = 0; ++ } ++ snprintf(str_addr, sizeof(str_addr), "0x%lx", ++ offset + i * sizeof(uint32_t)); ++ tmp = snprintf(buff + l, buff_size - l, "%s\n%*s│ %.8x", ++ i == 0 ? "" : "│", width, str_addr, ++ *((uint32_t *)real_data + i)); ++ l += tmp; ++ last_line_all_0 = !(*((uint32_t *)real_data + i)); ++ last_line_cnt_character = tmp; ++ } ++ } ++ } ++ l += snprintf(buff + l, buff_size - l, "│\n%*s%s\n", ++ width, " ", line_tail); ++ return 0; ++} ++ ++void ub_mem_dump(void *start, int size, const char *tag_fmt, ...) ++{ ++ va_list ap; ++ char str[UB_MEM_DUMP_MAX_STR_LEN] = {0}; ++ size_t l; ++ ++ /* get mem tag info */ ++ va_start(ap, tag_fmt); ++ l = vsnprintf(str, sizeof(str), tag_fmt, ap); ++ va_end(ap); ++ ++ if (size > UB_MEM_DUMP_MAX_BYTES) { ++ qemu_log("%s execeed max len %d\n", ++ str, UB_MEM_DUMP_MAX_BYTES); ++ return; ++ } ++ ++ if (ub_hexdump(start, 0, size, str + l, sizeof(str) - l) < 0) { ++ qemu_log("failed to dump memory. %s\n", str); ++ return; ++ } ++ qemu_log("%s", str); ++} ++ ++/* get interrupt_id from sysfs, not found will return UINT32_MAX */ ++#define MAX_BUF_LENGTH 1024 ++uint32_t sysfs_get_dev_number_by_guid(UbGuid *guid) ++{ ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ uint32_t id = UINT32_MAX; ++ const char *ub_sysfs_devices = "/sys/bus/ub/devices"; ++ struct dirent *entry; ++ DIR *dir = NULL; ++ ++ dir = opendir(ub_sysfs_devices); ++ if (!dir) { ++ qemu_log("failed to opendir %s\n", ub_sysfs_devices); ++ return UINT32_MAX; ++ } ++ ub_device_get_str_from_guid(guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ ++ while ((entry = readdir(dir)) != NULL) { ++ char file_path[MAX_BUF_LENGTH] = {0}; /* guid file path */ ++ char guid_buffer[MAX_BUF_LENGTH] = {0}; /* guid that read from file */ ++ FILE *file = NULL; ++ size_t bytes_read; ++ ++ /* skip the stumbling blocks */ ++ if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) { ++ continue; ++ } ++ snprintf(file_path, sizeof(file_path), "%s/%s/guid", ++ ub_sysfs_devices, entry->d_name); ++ file = fopen(file_path, "r"); ++ if (file == NULL) { ++ qemu_log("failed to open %s\n", file_path); ++ closedir(dir); ++ return UINT32_MAX; ++ } ++ ++ bytes_read = fread(guid_buffer, 1, MAX_BUF_LENGTH - 1, file); ++ fclose(file); ++ guid_buffer[bytes_read] = '\0'; ++ /* discard annoying line breaks */ ++ if (bytes_read > 0 && guid_buffer[bytes_read - 1] == '\n') { ++ guid_buffer[bytes_read - 1] = '\0'; ++ } ++ ++ /* check if it's a long-awaited true love */ ++ if (strcmp(guid_buffer, guid_str) == 0) { ++ sscanf(entry->d_name, "%x", &id); ++ closedir(dir); ++ return id; ++ } ++ } ++ closedir(dir); ++ return id; ++} ++ ++uint32_t sysfs_get_ub_device_bus_instance_eid(char *sysfsdev) ++{ ++ FILE *f = NULL; ++ char *path = NULL; ++ char bus_instance_eid_buf[MAX_BUF_LENGTH] = {0}; ++ uint32_t eid = UINT32_MAX; ++ ++ path = g_strdup_printf("%s/instance", sysfsdev); ++ f = fopen(path, "r"); ++ if (!f) { ++ qemu_log("failed to open file:%s\n", path); ++ g_free(path); ++ return eid; ++ } ++ ++ if (fgets(bus_instance_eid_buf, MAX_BUF_LENGTH, f) != NULL) { ++ sscanf(bus_instance_eid_buf, "%x", &eid); ++ qemu_log("sysfs(%s) get bus instance eid: 0x%x.\n", sysfsdev, eid); ++ } ++ ++ if (eid == UINT32_MAX) { ++ qemu_log("cannot get bus instance eid: %s.\n", sysfsdev); ++ } ++ ++ fclose(f); ++ g_free(path); ++ return eid; ++} ++ ++uint32_t sysfs_get_bus_instance_eid_by_guid(UbGuid *guid) ++{ ++ FILE *file = NULL; ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ uint32_t eid = UINT32_MAX; ++ char line[MAX_BUF_LENGTH] = {0}; ++ bool found = false; ++ ++ ub_device_get_str_from_guid(guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ file = fopen("/sys/bus/ub/instance", "r"); ++ if (file == NULL) { ++ qemu_log("failed to open /sys/bus/ub/instance\n"); ++ return eid; ++ } ++ ++ while (fgets(line, sizeof(line), file) != NULL) { ++ if (strstr(line, guid_str) != NULL) { ++ found = true; ++ break; ++ } ++ } ++ fclose(file); ++ ++ if (found) { ++ /* 0 1 2 3 ++ * /sys/bus/ub/instance: guid:xxx type:xxx eid:xxx upi:xxx ++ */ ++ g_autofree char **eid_str = g_strsplit(line, " ", 4); ++ if (eid_str && eid_str[2]) { ++ sscanf(eid_str[2], "eid:%05x", &eid); ++ qemu_log("find ubus instance eid 0x%x by guid %s\n", eid, guid_str); ++ } ++ } ++ ++ if (eid == UINT32_MAX) { ++ qemu_log("can not find instance eid by guid %s.\n", guid_str); ++ } ++ ++ return eid; ++} ++ ++uint32_t sysfs_get_bus_instance_type_by_eid(uint32_t eid) ++{ ++ FILE *file = NULL; ++ char *eid_str = NULL; ++ char line[MAX_BUF_LENGTH] = {0}; ++ int bus_instance_type = UBUS_INSTANCE_UNKNOW; ++ bool found = false; ++ ++ file = fopen("/sys/bus/ub/instance", "r"); ++ if (file == NULL) { ++ qemu_log("failed to open /sys/bus/ub/instance\n"); ++ return bus_instance_type; ++ } ++ ++ eid_str = g_strdup_printf("eid:%05x", eid); ++ while (fgets(line, sizeof(line), file) != NULL) { ++ if (strstr(line, eid_str) != NULL) { ++ found = true; ++ break; ++ } ++ } ++ g_free(eid_str); ++ fclose(file); ++ ++ if (found) { ++ /* 0 1 2 3 ++ * /sys/bus/ub/instance: guid:xxx type:xxx eid:xxx upi:xxx ++ */ ++ g_autofree char **type = g_strsplit(line, " ", 4); ++ if (type && type[1]) { ++ sscanf(type[1] + strlen("type:"), "%d", &bus_instance_type); ++ qemu_log("bus instance eid(0x%x) type is %d.\n", eid, bus_instance_type); ++ } ++ } ++ ++ if (bus_instance_type == UBUS_INSTANCE_UNKNOW) { ++ qemu_log("can not get bus instance type by eid: 0x%x\n", eid); ++ } ++ ++ return bus_instance_type; ++} ++ + bool ub_guid_is_none(UbGuid *guid) + { + if (guid->seq_num == 0 && +@@ -98,3 +373,26 @@ bool ub_guid_is_none(UbGuid *guid) + + return false; + } ++ ++/* The caller is responsible for free memory. */ ++char *line_generator(uint8_t len) ++{ ++ char *line = NULL; ++ int i, j; ++ if (!len) { ++ qemu_log("invalid len %d", len); ++ return NULL; ++ } ++ ++ line = g_malloc0(len * DASH_SZ + 1); ++ if (!line) { ++ qemu_log("failed to alloc mem %d", len * DASH_SZ + 1); ++ return NULL; ++ } ++ for (i = 0, j = 0; i < len; i++) { ++ line[j++] = '\xE2'; ++ line[j++] = '\x80'; ++ line[j++] = '\x94'; ++ } ++ return line; ++} +diff --git a/include/hw/ub/ub_common.h b/include/hw/ub/ub_common.h +index 440a5bacdf..0af352c2bb 100644 +--- a/include/hw/ub/ub_common.h ++++ b/include/hw/ub/ub_common.h +@@ -415,7 +415,13 @@ typedef struct MsgPktHeader { /* TODO, check byte order */ + + uint32_t fill_rq(BusControllerState *s, void *rsp, uint32_t rsp_size); + uint32_t fill_cq(BusControllerState *s, HiMsgCqe *cqe); +- ++/* get eid from sysfs, not found will return UINT32_MAX */ ++uint32_t sysfs_get_dev_number_by_guid(UbGuid *guid); ++uint32_t sysfs_get_ub_device_bus_instance_eid(char *sysfsdev); ++uint32_t sysfs_get_bus_instance_type_by_eid(uint32_t eid); ++uint32_t sysfs_get_bus_instance_eid_by_guid(UbGuid *guid); ++void ub_mem_dump(void *start, int size, const char *tag_fmt, ...) __attribute__((format(printf, 3, 4))); ++int ub_hexdump(void *data, int offset, int len, char *buff, int buff_size); + bool ub_guid_is_none(UbGuid *guid); + + #endif +-- +2.33.0 + diff --git a/ub-add-ummu-base-framework.patch b/ub-add-ummu-base-framework.patch new file mode 100644 index 0000000000000000000000000000000000000000..ba0c911a627a9226bfbc87c07378fd11eff3a6f2 --- /dev/null +++ b/ub-add-ummu-base-framework.patch @@ -0,0 +1,1272 @@ +From f2b1575f0f315e09dbf1ad5b8ccdba6839665c2e Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 09:14:36 +0800 +Subject: [PATCH 1/6] ub: add ummu base framework +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1。realize ummu device framework +2. define some ummu registers +3. prepare some ummu inline help function for later ummu realize + +Signed-off-by: caojinhuahw +--- + hw/ub/meson.build | 1 + + hw/ub/ub_ummu.c | 83 ++++ + hw/ub/ub_ummu_internal.h | 958 ++++++++++++++++++++++++++++++++++++++ + include/hw/ub/hisi/ummu.h | 22 + + include/hw/ub/ub_ummu.h | 93 ++++ + include/hw/ub/ub_usi.h | 27 ++ + include/qemu/typedefs.h | 2 + + 7 files changed, 1186 insertions(+) + create mode 100644 hw/ub/ub_ummu.c + create mode 100644 hw/ub/ub_ummu_internal.h + create mode 100644 include/hw/ub/hisi/ummu.h + create mode 100644 include/hw/ub/ub_usi.h + +diff --git a/hw/ub/meson.build b/hw/ub/meson.build +index d629174ef8..400fa553d8 100644 +--- a/hw/ub/meson.build ++++ b/hw/ub/meson.build +@@ -2,6 +2,7 @@ ub_ss = ss.source_set() + ub_ss.add(files( + 'ub.c', + 'ub_ubc.c', ++ 'ub_ummu.c', + 'ub_config.c', + 'ub_acpi.c', + 'ub_enum.c', +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +new file mode 100644 +index 0000000000..8598e2272c +--- /dev/null ++++ b/hw/ub/ub_ummu.c +@@ -0,0 +1,83 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "qapi/error.h" ++#include "qemu/log.h" ++#include "qemu/module.h" ++#include "hw/arm/virt.h" ++#include "hw/qdev-properties.h" ++#include "hw/ub/ub.h" ++#include "hw/ub/hisi/ummu.h" ++#include "hw/ub/ub_bus.h" ++#include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_ummu.h" ++#include "hw/ub/ub_config.h" ++#include "hw/ub/hisi/ubc.h" ++#include "migration/vmstate.h" ++#include "ub_ummu_internal.h" ++#include "sysemu/dma.h" ++#include "hw/arm/mmu-translate-common.h" ++#include "hw/ub/ub_ubc.h" ++#include "qemu/error-report.h" ++#include "trace.h" ++ ++static void ummu_base_realize(DeviceState *dev, Error **errp) ++{ ++} ++ ++static void ummu_base_unrealize(DeviceState *dev) ++{ ++} ++ ++static void ummu_base_reset(DeviceState *dev) ++{ ++ /* reset ummu relative struct later */ ++} ++ ++static Property ummu_dev_properties[] = { ++ DEFINE_PROP_UINT64("ub-ummu-reg-size", UMMUState, ++ ummu_reg_size, 0), ++ DEFINE_PROP_LINK("primary-bus", UMMUState, primary_bus, ++ TYPE_UB_BUS, UBBus *), ++ DEFINE_PROP_BOOL("nested", UMMUState, nested, false), ++ DEFINE_PROP_END_OF_LIST(), ++}; ++ ++static void ummu_base_class_init(ObjectClass *klass, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(klass); ++ ++ device_class_set_props(dc, ummu_dev_properties); ++ dc->realize = ummu_base_realize; ++ dc->unrealize = ummu_base_unrealize; ++ dc->reset = ummu_base_reset; ++} ++ ++static const TypeInfo ummu_base_info = { ++ .name = TYPE_UB_UMMU, ++ .parent = TYPE_SYS_BUS_DEVICE, ++ .instance_size = sizeof(UMMUState), ++ .class_data = NULL, ++ .class_size = sizeof(UMMUBaseClass), ++ .class_init = ummu_base_class_init, ++}; ++ ++static void ummu_base_register_types(void) ++{ ++ type_register_static(&ummu_base_info); ++} ++type_init(ummu_base_register_types) +diff --git a/hw/ub/ub_ummu_internal.h b/hw/ub/ub_ummu_internal.h +new file mode 100644 +index 0000000000..68724e5ce1 +--- /dev/null ++++ b/hw/ub/ub_ummu_internal.h +@@ -0,0 +1,958 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#ifndef UB_UMMU_INTERNAL_H ++#define UB_UMMU_INTERNAL_H ++#include "hw/registerfields.h" ++#include "hw/ub/ub_usi.h" ++#include "sysemu/dma.h" ++#include "sysemu/iommufd.h" ++#include ++ ++/* ummu spec register define */ ++REG32(CAP0, 0x0010) ++ FIELD(CAP0, DSTEID_SIZE, 0, 8) ++ FIELD(CAP0, TOKENID_SIZE, 8, 5) ++ FIELD(CAP0, ATTR_PERMS_OVR, 13, 1) ++ FIELD(CAP0, ATTR_TYPES_OVR, 14, 1) ++ FIELD(CAP0, S2_ATTR_TYPE, 15, 1) ++ FIELD(CAP0, TCT_LEVEL, 16, 1) ++ FIELD(CAP0, TECT_MODE, 17, 2) ++ FIELD(CAP0, TECT_LEVEL, 19, 1) ++ ++REG32(CAP1, 0x0014) ++ FIELD(CAP1, EVENTQ_SIZE, 0, 5) ++ FIELD(CAP1, EVENTQ_NUMB, 5, 4) ++ FIELD(CAP1, EVENTQ_SUPPORT, 9, 1) ++ FIELD(CAP1, MCMDQ_SIZE, 10, 4) ++ FIELD(CAP1, MCMDQ_NUMB, 14, 4) ++ FIELD(CAP1, MCMDQ_SUPPORT, 18, 1) ++ FIELD(CAP1, EVENT_GEN, 19, 1) ++ FIELD(CAP1, STALL_MAX, 20, 12) ++ ++REG32(CAP2, 0x0018) ++ FIELD(CAP2, VMID_TLBI, 0, 1) ++ FIELD(CAP2, TLB_BOARDCAST, 1, 1) ++ FIELD(CAP2, RANGE_TLBI, 2, 1) ++ FIELD(CAP2, OA_SIZE, 3, 3) ++ FIELD(CAP2, GRAN4K_T, 6, 1) ++ FIELD(CAP2, GRAN16K_T, 7, 1) ++ FIELD(CAP2, GRAN64K_T, 8, 1) ++ FIELD(CAP2, VA_EXTEND, 9, 2) ++ FIELD(CAP2, S2_TRANS, 11, 1) ++ FIELD(CAP2, S1_TRANS, 12, 1) ++ FIELD(CAP2, SMALL_TRANS, 13, 1) ++ FIELD(CAP2, TRANS_FORM, 14, 2) ++ ++REG32(CAP3, 0x001C) ++ FIELD(CAP3, HIER_ATTR_DISABLE, 0, 1) ++ FIELD(CAP3, S2_EXEC_NEVER_CTRL, 1, 1) ++ FIELD(CAP3, BBM_LEVEL, 2, 2) ++ FIELD(CAP3, COHERENT_ACCESS, 4, 1) ++ FIELD(CAP3, TTENDIAN_MODE, 5, 2) ++ FIELD(CAP3, MTM_SUPPORT, 7, 1) ++ FIELD(CAP3, HTTU_SUPPORT, 8, 2) ++ FIELD(CAP3, HYP_S1CONTEXT, 10, 1) ++ FIELD(CAP3, USI_SUPPORT, 11, 1) ++ FIELD(CAP3, STALL_MODEL, 12, 2) ++ FIELD(CAP3, TERM_MODEL, 14, 1) ++ FIELD(CAP3, SATI_MAX, 15, 6) ++ ++REG32(CAP4, 0x0020) ++ FIELD(CAP4, UCMDQ_UCPLQ_NUMB, 0, 8) ++ FIELD(CAP4, UCMDQ_SIZE, 8, 4) ++ FIELD(CAP4, UCPLQ_SIZE, 12, 4) ++ FIELD(CAP4, UIEQ_SIZE, 16, 4) ++ FIELD(CAP4, UIEQ_NUMB, 20, 4) ++ FIELD(CAP4, UIEQ_SUPPORT, 24, 1) ++ FIELD(CAP4, PPLB_SUPPORT, 25, 1) ++ ++REG32(CAP5, 0x0024) ++ FIELD(CAP5, MAPT_SUPPORT, 0, 1) ++ FIELD(CAP5, MAPT_MODE, 1, 2) ++ FIELD(CAP5, GRAN2M_P, 3, 1) ++ FIELD(CAP5, GRAN4K_P, 4, 1) ++ FIELD(CAP5, TOKENVAL_CHK, 5, 1) ++ FIELD(CAP5, TOKENVAL_CHK_MODE, 6, 2) ++ FIELD(CAP5, RANGE_PLBI, 8, 1) ++ FIELD(CAP5, PLB_BORDCAST, 9, 1) ++ ++REG32(CAP6, 0x0028) ++ FIELD(CAP6, MTM_ID_MAX, 0, 16) ++ FIELD(CAP6, MTM_GP_MAX, 16, 8) ++ ++#define UMMU_CTRL0_WMASK GENMASK(5, 0) ++REG32(CTRL0, 0x0030) ++ FIELD(CTRL0, UMMU_EN, 0, 1) ++ FIELD(CTRL0, EVENTQ_EN, 1, 1) ++ FIELD(CTRL0, VMID_WILDCARD_T, 2, 3) ++ FIELD(CTRL0, MAPT_EN, 5, 1) ++ ++REG32(CTRL0_ACK, 0x0034) ++ FIELD(CTRL0_ACK, UMMU_EN, 0, 1) ++ FIELD(CTRL0_ACK, EVENTQ_EN, 1, 1) ++ FIELD(CTRL0_ACK, VMID_WILDCARD_T, 2, 3) ++ FIELD(CTRL0_ACK, MAPT_EN, 5, 1) ++ ++#define UMMU_CTRL1_WMASK GENMASK(15, 0) ++REG32(CTRL1, 0x0038) ++ FIELD(CTRL1, QUEUE_IC_T, 0, 2) ++ FIELD(CTRL1, QUEUE_OC_T, 2, 2) ++ FIELD(CTRL1, QUEUE_SH_T, 4, 2) ++ FIELD(CTRL1, TABLE_IC_T, 6, 2) ++ FIELD(CTRL1, TABLE_OC_T, 8, 2) ++ FIELD(CTRL1, TABLE_SH_T, 10, 2) ++ FIELD(CTRL1, E2H, 12, 1) ++ FIELD(CTRL1, BAD_DSTEID_RECORD, 13, 1) ++ FIELD(CTRL1, PRIVATE_TLB, 14, 1) ++ FIELD(CTRL1, TECT_MODE_SEL, 15, 1) ++ ++#define UMMU_CTRL2_WMASK GENMASK(6, 0) ++REG32(CTRL2, 0x003C) ++ FIELD(CTRL2, PRIVATE_PLB, 6, 1) ++ FIELD(CTRL2, UIE_QUEUE_SH_P, 4, 2) ++ FIELD(CTRL2, UIE_QUEUE_OC_P, 2, 2) ++ FIELD(CTRL2, UIE_QUEUE_IC_P, 0, 2) ++ ++#define UMMU_CTRL3_WMASK (GENMASK(23, 0) | GENMASK(31, 31)) ++REG32(CTRL3, 0x0040) ++ FIELD(CTRL3, UPDATE_FLG, 31, 1) ++ FIELD(CTRL3, UOTR_MTM_GP, 16, 8) ++ FIELD(CTRL3, UOTR_MTM_ID, 0, 16) ++ ++#define UMMU_TECT_BASE_WMASK (GENMASK_ULL(51, 6) | GENMASK_ULL(63, 63)) ++REG32(TECT_BASE0, 0x0070) ++ FIELD(TECT_BASE0, TECT_BASE_ADDR0, 6, 26) ++ ++REG32(TECT_BASE1, 0x0074) ++ FIELD(TECT_BASE1, TECT_BASE_ADDR1, 0, 19) ++ FIELD(TECT_BASE1, TECT_RA_CFG, 31, 1) ++ ++#define UMMU_TECT_BASE_CFG_WMASK GENMASK_ULL(12, 0) ++REG32(TECT_BASE_CFG, 0x0078) ++ FIELD(TECT_BASE_CFG, TECT_LOG2SIZE, 0, 6) ++ FIELD(TECT_BASE_CFG, TECT_SPLIT, 6, 5) ++ FIELD(TECT_BASE_CFG, TECT_FMT, 11, 2) ++ ++#define UMMU_MCMDQ_BASE_WMASK (GENMASK_ULL(51, 0) | GENMASK_ULL(63, 63)) ++#define UMMU_MCMDQ_PI_WMASK (GENMASK(19, 0) | GENMASK(23, 23) | GENMASK(31, 31)) ++#define UMMU_MCMDQ_CI_WMASK (GENMASK(19, 0) | GENMASK(26, 23) | GENMASK(31, 31)) ++#define A_MCMD_QUE_BASE 0x0100 ++#define A_MCMD_QUE_LASTEST_CI 0x10FC ++ ++#define UMMU_EVENTQ_BASE_WMASK (GENMASK_ULL(4, 0) | GENMASK_ULL(51, 6) | GENMASK_ULL(63, 63)) ++REG32(EVENT_QUE_BASE0, 0x1100) ++ FIELD(EVENT_QUE_BASE0, EVENT_QUE_LOG2SIZE, 0, 5) ++ FIELD(EVENT_QUE_BASE0, EVENT_QUE_ADDR0, 6, 26) ++ ++REG32(EVENT_QUE_BASE1, 0x1104) ++ FIELD(EVENT_QUE_BASE1, EVENT_QUE_ADDR1, 0, 20) ++ FIELD(EVENT_QUE_BASE1, EVENT_QUE_WA_CFG, 31, 1) ++ ++#define UMMU_EVENTQ_PI_WMASK (GENMASK(19, 0) | GENMASK(31, 31)) ++REG32(EVENT_QUE_PI, 0x1108) ++ FIELD(EVENT_QUE_PI, EVENT_QUE_WR_IDX, 0, 19) ++ FIELD(EVENT_QUE_PI, EVENT_QUE_WR_WRAP, 19, 1) ++ FIELD(EVENT_QUE_PI, EVENT_QUE_OVFLG, 31, 1) ++ ++#define UMMU_EVENTQ_CI_WMASK (GENMASK(19, 0) | GENMASK(31, 31)) ++REG32(EVENT_QUE_CI, 0x110C) ++ FIELD(EVENT_QUE_CI, EVENT_QUE_RD_IDX, 0, 19) ++ FIELD(EVENT_QUE_CI, EVENT_QUE_RD_WRAP, 19, 1) ++ FIELD(EVENT_QUE_CI, EVENT_QUE_OVFLG_RESP, 31, 1) ++ ++#define UMMU_EVENTQ_USI_ADDR_WMASK GENMASK_ULL(51, 2) ++REG32(EVENT_QUE_USI_ADDR0, 0x1110) ++ FIELD(EVENT_QUE_USI_ADDR0, USI_ADDR0, 2, 30) ++ ++REG32(EVENT_QUE_USI_ADDR1, 0x1114) ++ FIELD(EVENT_QUE_USI_ADDR1, USI_ADDR1, 0, 20) ++ ++#define UMMU_EVENT_QUE_USI_DATA_WMASK GENMASK(31, 0) ++REG32(EVENT_QUE_USI_DATA, 0x1118) ++ FIELD(EVENT_QUE_USI_DATA, USI_DATA, 0, 32) ++ ++#define UMMU_EVENTQ_USI_ATTR_WMASK GENMASK(5, 0) ++REG32(EVENT_QUE_USI_ATTR, 0x111C) ++ FIELD(EVENT_QUE_USI_ATTR, USI_MEM_ATTR_CFG, 0, 4) ++ FIELD(EVENT_QUE_USI_ATTR, USI_SH_CFG, 4, 2) ++ ++REG32(GLB_INT_EN, 0x1130) ++ FIELD(GLB_INT_EN, GLB_ERR_INT_EN, 0, 1) ++ FIELD(GLB_INT_EN, EVENT_QUE_INT_EN, 1, 1) ++ ++REG32(GLB_ERR, 0x1134) ++ FIELD(GLB_ERR, MCMD_QUE_ERR, 0, 1) ++ FIELD(GLB_ERR, EVENT_QUE_ABT_ERR, 1, 1) ++ FIELD(GLB_ERR, USI_MCMD_QUE_ABT_ERR, 2, 1) ++ FIELD(GLB_ERR, USI_EVENT_QUE_ABT_ERR, 3, 1) ++ FIELD(GLB_ERR, USI_UIEQ_QUE_ABT_ERR, 4, 1) ++ FIELD(GLB_ERR, USI_GLB_ERR_ABT_ERR, 7, 1) ++ ++#define UMMU_GLB_ERR_RESP_WMASK GENMASK(4, 0) | GENMASK(7, 7) ++REG32(GLB_ERR_RESP, 0x1138) ++ FIELD(GLB_ERR_RESP, MCMDQ_QUE_ERR, 0, 1) ++ FIELD(GLB_ERR_RESP, EVENT_QUE_ABT_ERR, 1, 1) ++ FIELD(GLB_ERR_RESP, USI_MCMDQ_QUE_ABT_ERR, 2, 1) ++ FIELD(GLB_ERR_RESP, USI_EVENT_QUE_ABT_ERR, 3, 1) ++ FIELD(GLB_ERR_RESP, USI_UIEQ_QUE_ABT_ERR, 4, 1) ++ FIELD(GLB_ERR_RESP, USI_GLB_ERR_ABT_ERR, 7, 1) ++ ++#define UMMU_GLB_ERR_INT_USI_ADDR_WMASK GENMASK_ULL(51, 2) ++REG32(GLB_ERR_INT_USI_ADDR0, 0x1140) ++ FIELD(GLB_ERR_INT_USI_ADDR0, USI_ADDR0, 2, 29) ++ ++REG32(GLB_ERR_INT_USI_ADDR1, 0x1144) ++ FIELD(GLB_ERR_INT_USI_ADDR1, USI_ADDR1, 0, 19) ++ ++#define UMMU_GLB_ERR_INT_USI_DATA_WMASK GENMASK(31, 0) ++REG32(GLB_ERR_INT_USI_DATA, 0x1148) ++ FIELD(GLB_ERR_INT_USI_DATA, USI_DATA, 0, 32) ++ ++#define UMMU_GLB_ERR_INT_USI_ATTR_WMASK GENMASK(5, 0) ++REG32(GLB_ERR_INT_USI_ATTR, 0x114C) ++ FIELD(GLB_ERR_INT_USI_ATTR, USI_MEM_ATTR_CFG, 0, 4) ++ FIELD(GLB_ERR_INT_USI_ATTR, USI_SH_CFG, 4, 2) ++ ++#define MAPT_CMDQ_CTXT_BADDR_WMASK (((GENMASK_ULL(31, 31) | GENMASK_ULL(19, 0)) << 32) | \ ++ (GENMASK_ULL(4, 0) | GENMASK_ULL(31, 6))) ++REG32(MAPT_CMDQ_CTXT_BADDR0, 0x1160) ++ FIELD(MAPT_CMDQ_CTXT_BADDR0, MAPT_CMDQ_CTXT_LOG2SIZE, 0, 5) ++ FIELD(MAPT_CMDQ_CTXT_BADDR0, MAPT_CMDQ_CTXT_ADDR0, 6, 26) ++ ++REG32(MAPT_CMDQ_CTXT_BADDR1, 0x1164) ++ FIELD(MAPT_CMDQ_CTXT_BADDR1, MAPT_CMDQ_CTXT_ADDR1, 0, 20) ++ FIELD(MAPT_CMDQ_CTXT_BADDR1, MAPT_CMDQ_CTXT_RA_CFG, 31, 1) ++ ++#define RELEASE_UM_QUEUE_WMASK 0x1 ++REG32(RELEASE_UM_QUEUE, 0x1178) ++ FIELD(RELEASE_UM_QUEUE, MAPT_RLSE_UM_CMDQ, 0, 1) ++ ++#define RELEASE_UM_QUEUE_ID_WMASK GENMASK(30, 0) ++REG32(RELEASE_UM_QUEUE_ID, 0x117C) ++ FIELD(RELEASE_UM_QUEUE_ID, MAPT_RLSE_UM_CMDQ_ID, 0, 31) ++ ++#define A_UCMDQ_PI_START_REG 0x20000 ++/* MAPT Commd queue control page 4k: 0x2000C + 2^16 * 0x1000 ++ * MAPT Commd queue control page 64k: 0x2000C + 2^12 * 0x10000 */ ++#define A_UCPLQ_CI_END_REG 0x1002000C ++ ++/* ummu user register define */ ++REG32(UMMU_INT_MASK, 0x3404) ++ FIELD(UMMU_INT_MASK, UIEQ_USI_MASK, 0, 1) ++ FIELD(UMMU_INT_MASK, UBIF_USI_MASK, 1, 1) ++ ++REG32(DSTEID_KV_TABLE_BASE0, 0x3800) ++ FIELD(DSTEID_KV_TABLE_BASE0, DSTEID_TV_TABLE_BASE_ADDR0, 5, 27) ++ ++REG32(DSTEID_KV_TABLE_BASE1, 0x3804) ++ FIELD(DSTEID_KV_TABLE_BASE1, DSTEID_TV_TABLE_BASE_ADDR1, 0, 20) ++ ++REG32(DSTEID_KV_TABLE_BASE_CFG, 0x3808) ++ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_MEMATTR, 0, 4) ++ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_SH, 4, 2) ++ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_BANK_NUM, 8, 8) ++ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_DEPTH, 16, 16) ++ ++REG32(UMMU_DSTEID_KV_TABLE_HASH_CFG0, 0x380C) ++ FIELD(UMMU_DSTEID_KV_TABLE_HASH_CFG0, DSTEID_KV_TABLE_HASH_SEL, 0, 4) ++ FIELD(UMMU_DSTEID_KV_TABLE_HASH_CFG0, DSTEID_KV_TABLE_HASH_WIDTH, 4, 4) ++ ++REG32(UMMU_DSTEID_KV_TABLE_HASH_CFG1, 0x3810) ++ FIELD(UMMU_DSTEID_KV_TABLE_HASH_CFG1, DSTEID_KV_TABLE_HASH_CRC32_SEED, 0, 32) ++ ++REG32(UMMU_DSTEID_CAM_TABLE_BASE0, 0x3820) ++ FIELD(UMMU_DSTEID_CAM_TABLE_BASE0, DSTEID_CAM_TABLE_BASE_ADDR0, 5, 27) ++ ++REG32(UMMU_DSTEID_CAM_TABLE_BASE1, 0x3824) ++ FIELD(UMMU_DSTEID_CAM_TABLE_BASE1, DSTEID_CAM_TABLE_BASE_ADDR1, 0, 20) ++ ++REG32(UMMU_DSTEID_CAM_TABLE_BASE_CFG, 0x3828) ++ FIELD(UMMU_DSTEID_CAM_TABLE_BASE_CFG, DSTEID_CAM_TABLE_MEMATTR, 0, 4) ++ FIELD(UMMU_DSTEID_CAM_TABLE_BASE_CFG, DSTEID_CAM_TABLE_SH, 4, 2) ++ FIELD(UMMU_DSTEID_CAM_TABLE_BASE_CFG, DSTEID_CAM_TABLE_DEPTH, 16, 32) ++ ++#define MAPT_CMDQ_CTRLR_PAGE_SIZE_4K 1 ++#define MAPT_CMDQ_CTRLR_PAGE_SIZE_64K 0 ++#define UMCMD_PAGE_SEL_WMASK 0x1 ++REG32(UMCMD_PAGE_SEL, 0x3834) ++ FIELD(UMCMD_PAGE_SEL, PAGE_MODEL_SEL_EN, 0, 1) ++ ++ ++/* ummu user logic register define */ ++REG32(UMMU_USER_CONFIG0, 0x4C00) ++ ++REG32(UMMU_USER_CONFIG1, 0x4C04) ++ ++REG32(UMMU_USER_CONFIG2, 0x4C08) ++ FIELD(UMMU_USER_CONFIG2, INV_TLB_ALL_NS, 0, 1) ++ FIELD(UMMU_USER_CONFIG2, TBU_L2_MEM_INIT_EN, 1, 1) ++ FIELD(UMMU_USER_CONFIG2, TBU_L2_MEM_INITING, 2, 1) ++ FIELD(UMMU_USER_CONFIG2, MCMDQ_MEM_INIT_EN, 3, 1) ++ FIELD(UMMU_USER_CONFIG2, MCMDQ_MEM_INITING, 4, 1) ++ ++REG32(UMMU_USER_CONFIG3, 0x4C0C) ++ ++REG32(UMMU_USER_CONFIG4, 0x4C10) ++ ++REG32(UMMU_USER_CONFIG5, 0x4C14) ++ ++REG32(UMMU_USER_CONFIG6, 0x4C18) ++ ++REG32(UMMU_USER_CONFIG7, 0x4C1C) ++ ++REG32(UMMU_USER_CONFIG8, 0x4C20) ++ ++REG32(UMMU_USER_CONFIG9, 0x4C24) ++ ++REG32(UMMU_USER_CONFIG10, 0x4C28) ++ ++REG32(UMMU_USER_CONFIG11, 0x4C2C) ++ ++REG32(UMMU_MEM_USI_ADDR0, 0x4D90) ++ FIELD(UMMU_MEM_USI_ADDR0, UBIF_MEM_USI_ADDR0, 2, 30) ++ ++REG32(UMMU_MEM_USI_ADDR1, 0x4D94) ++ FIELD(UMMU_MEM_USI_ADDR1, UBIF_MEM_USI_ADDR1, 0, 20) ++ ++REG32(UMMU_MEM_USI_DATA, 0x4D98) ++ FIELD(UMMU_MEM_USI_DATA, UBIF_MEM_USI_DATA, 0, 32) ++ ++REG32(UMMU_MEM_USI_ATTR, 0x4D9C) ++ FIELD(UMMU_MEM_USI_ATTR, UBIF_MEM_USI_MEM_ATTR_CFG, 0, 4) ++ FIELD(UMMU_MEM_USI_ATTR, UBIF_MEM_USI_SH_CFG, 4, 2) ++ ++#define TYPE_UMMU_IOMMU_MEMORY_REGION "ummu-iommu-memory-region" ++ ++#define CMD_TYPE(x) extract32((x)->word[0], 0, 8) ++#define CMD_SYNC_CM(x) extract32((x)->word[0], 12, 2) ++#define CMD_SYNC_CM_NONE 0x0 ++#define CMD_SYNC_CM_USI 0x1 ++#define CMD_SYNC_CM_SEV 0x2 ++#define CMD_SYNC_USI_SH(x) extract32((x)->word[0], 14, 2) ++#define CMD_SYNC_USI_ATTR(x) extract32((x)->word[0], 16, 4) ++#define CMD_SYNC_USI_DATA(x) extract32((x)->word[1], 0, 32) ++#define CMD_SYNC_USI_ADDR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(51, 2)) ++#define CMD_CREATE_KVTBL_DEST_EID(x) extract32((x)->word[4], 0, 32) ++#define CMD_CREATE_KVTBL_BASE_ADDR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(51, 6)) ++#define CMD_CREATE_KVTBL_TECTE_TAG(x) extract32((x)->word[0], 16, 16) ++#define CMD_DELETE_KVTBL_DEST_EID(x) extract32((x)->word[4], 0, 32) ++#define CMD_DELETE_KVTBL_TECTE_TAG(x) extract32((x)->word[0], 16, 16) ++#define CMD_TECTE_TAG(x) extract32((x)->word[4], 0, 16) ++#define CMD_TECTE_RANGE(x) extract32((x)->word[1], 20, 5) ++/* according to UB SPEC, if range val is 31, invalid all tecte */ ++#define CMD_TECTE_RANGE_INVILID_ALL(x) ((x) == 31) ++#define CMD_NULL_SUBOP_CHECK_PA_CONTINUITY 1 ++#define CMD_NULL_SUBOP(x) extract32((x)->word[0], 8, 8) ++#define CMD_NULL_CHECK_PA_CONTI_SIZE(x) (1 << extract32((x)->word[0], 24, 6)) ++#define CMD_NULL_CHECK_PA_CONTI_ADDR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(47, 12)) ++#define UMMU_RUN_IN_VM_FLAG 0x10 ++#define PA_CONTINUITY 0x00 ++#define PA_NOT_CONTINUITY 0x01 ++ ++#define MCMDQ_BASE_ADDR_MASK ~0xf0UL ++#define MCMDQ_IDX_MASK 0xf0 ++#define MCMDQ_PROD_WMASK 0x808fffff ++#define MCMDQ_CONS_WMASK 0x878fffff ++#define MCMDQ_PROD_BASE_ADDR 0x108 ++#define MCMDQ_CONS_BASE_ADDR 0x10C ++#define MCMD_QUE_LOG2SIZE(x) extract32(x, 0, 5) ++#define MCMD_QUE_BASE_ADDR(que) ((que)->base & GENMASK_ULL(51, 5)) ++#define MCMD_QUE_RD_IDX(que) (extract32((que)->cons, 0, 19) & ((1 << (que)->log2size) - 1)) ++#define MCMD_QUE_WD_IDX(que) (extract32((que)->prod, 0, 19) & ((1 << (que)->log2size) - 1)) ++#define MCMD_QUE_RD_WRAP(que) extract32((que)->cons, (que)->log2size, 1) ++#define MCMD_QUE_WD_WRAP(que) extract32((que)->prod, (que)->log2size, 1) ++#define MCMD_QUE_EN_BIT(que) extract32((que)->prod, 31, 1) ++#define MCMD_QUE_EN_RESP_BIT 31 ++ ++#define EVENT_QUE_LOG2SIZE(x) extract32(x, 0, 5) ++#define EVENT_QUE_BASE_ADDR(que) ((que)->base & GENMASK_ULL(51, 6)) ++#define EVENT_QUE_RD_IDX(que) (extract32((que)->cons, 0, 19) & ((1 << (que)->log2size) - 1)) ++#define EVENT_QUE_WR_IDX(que) (extract32((que)->prod, 0, 19) & ((1 << (que)->log2size) - 1)) ++#define EVENT_QUE_RD_WRAP(que) extract32((que)->cons, (que)->log2size, 1) ++#define EVENT_QUE_WR_WRAP(que) extract32((que)->prod, (que)->log2size, 1) ++ ++#define TECT_BASE_ADDR(x) ((x) & GENMASK_ULL(51, 6)) ++#define TECT_L2TECTE_PTR(x) ((*(uint64_t *)&(x)->word[0]) & GENMASK_ULL(51, 6)) ++#define TECT_DESC_V(x) extract32((x)->word[0], 0, 1) ++#define TECTE_TCT_PTR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(51, 6)) ++#define TECTE_TCT_NUM(x) extract32((x)->word[2], 0, 5) ++#define TECTE_TCT_FMT(x) extract32((x)->word[3], 20, 2) ++#define TECTE_VALID(x) extract32((x)->word[0], 0, 1) ++#define TECTE_ST_MODE(x) extract32((x)->word[0], 1, 3) ++#define TECTE_ST_MODE_ABORT 0 ++#define TECTE_ST_MODE_BYPASS 4 ++#define TECTE_ST_MODE_S1 5 ++#define TECTE_ST_MODE_S2 6 ++#define TECTE_ST_MODE_NESTED 7 ++ ++#define TCT_FMT_LINEAR 0 ++#define TCT_FMT_LVL2_4K 1 ++#define TCT_FMT_LVL2_64K 2 ++#define TCT_SPLIT_64K 10 ++#define TCT_L2_ENTRIES (1UL << TCT_SPLIT_64K) ++#define TCT_L1TCTE_V(x) extract32((x)->word[0], 0, 1) ++#define TCT_L2TCTE_PTR(x) ((*(uint64_t *)&(x)->word[0]) & GENMASK_ULL(51, 12)) ++#define TCTE_TTBA(x) ((*(uint64_t *)&(x)->word[4]) & GENMASK_ULL(51, 4)) ++#define TCTE_TCT_V(x) extract32((x)->word[0], 0, 1) ++#define TCTE_SZ(x) extract32((x)->word[2], 0, 6) ++#define TCTE_TGS(x) extract32((x)->word[2], 6, 2) ++/* according ub spec Chapter 9, tct max num is 2 ^ tct_num */ ++#define TCTE_MAX_NUM(x) (1 << (x)) ++ ++#define MAPT_CMDQ_CTXT_BASE_BYTES 64 ++#define MAPT_CMDQ_CTXT_BASE_ADDR(x) ((x) & GENMASK_ULL(51, 6)) ++#define UCMDQ_UCPLQ_CI_PI_MASK 0xFULL ++#define UCMDQ_PI 0x00 ++#define UCMDQ_CI 0x04 ++#define UCPLQ_PI 0x08 ++#define UCPLQ_CI 0x0C ++#define MAPT_4K_CMDQ_CTXT_QID(offset) ((((offset) & (~0xFULL)) - A_UCMDQ_PI_START_REG) / 0x1000) ++#define MAPT_64K_CMDQ_CTXT_QID(offset) ((((offset) & (~0xFULL)) - A_UCMDQ_PI_START_REG) / 0x10000) ++#define MAPT_UCMDQ_LOG2SIZE(base) extract32((base)->word[0], 2, 4) ++#define MAPT_UCMDQ_PI(base) (extract32((base)->word[10], 0, 16) & \ ++ ((1 << MAPT_UCMDQ_LOG2SIZE(base)) - 1)) ++#define MAPT_UCMDQ_PI_WRAP(base) extract32((base)->word[10], MAPT_UCMDQ_LOG2SIZE(base), 1) ++#define MAPT_UCMDQ_CI(base) (extract32((base)->word[10], 16, 16) & \ ++ ((1 << MAPT_UCMDQ_LOG2SIZE(base)) - 1)) ++#define MAPT_UCMDQ_CI_WRAP(base) extract32((base)->word[10], 16 + MAPT_UCMDQ_LOG2SIZE(base), 1) ++#define MAPT_UCMDQ_BASE_ADDR(base) ((*(uint64_t *)&(base)->word[0]) & GENMASK_ULL(51, 12)) ++ ++#define MAPT_UCMD_TYPE_PSYNC 0x01 ++#define MAPT_UCMD_TYPE_PLBI_USR_ALL 0x10 ++#define MAPT_UCMD_TYPE_PLBI_USR_VA 0x11 ++#define MAPT_UCMD_TYPE(cmd) ((cmd)->word[0] & GENMASK(7, 0)) ++ ++#define MAPT_UCPLQ_LOG2SIZE(base) extract32((base)->word[2], 2, 4) ++#define MAPT_UCPLQ_PI(base) (extract32((base)->word[11], 0, 16) & \ ++ ((1 << MAPT_UCPLQ_LOG2SIZE(base)) - 1)) ++#define MAPT_UCPLQ_PI_WRAP(base) extract32((base)->word[11], MAPT_UCPLQ_LOG2SIZE(base), 1) ++#define MAPT_UCPLQ_CI(base) (extract32((base)->word[11], 16, 16) & \ ++ ((1 << MAPT_UCPLQ_LOG2SIZE(base)) - 1)) ++#define MAPT_UCPLQ_CI_WRAP(base) extract32((base)->word[11], 16 + MAPT_UCPLQ_LOG2SIZE(base), 1) ++#define MAPT_UCPLQ_BASE_ADDR(base) ((*(uint64_t *)&(base)->word[2]) & GENMASK_ULL(51, 12)) ++#define MAPT_UCPL_STATUS_INVALID 0x0 ++#define MAPT_UCPL_STATUS_PSYNC_SUCCESS 0x1 ++#define MAPT_UCPL_STATUS_TYPE_ERROR 0x2 ++#define MAPT_UCPL_STATUS_PROCESS_ERROR 0x3 ++ ++typedef struct UMMUMcmdqCmd { ++ uint32_t word[8]; ++} UMMUMcmdqCmd; ++ ++typedef struct UMMUEvent { ++ uint32_t word[16]; ++} UMMUEvent; ++ ++typedef enum UmmuMcmdqCmdType { ++ CMD_SYNC = 0x1, ++ CMD_STALL_RESUME = 0x02, ++ CMD_PREFET_CFG = 0x04, ++ CMD_CFGI_TECT = 0x08, ++ CMD_CFGI_TECT_RANGE = 0x09, ++ CMD_CFGI_TCT = 0x0A, ++ CMD_CFGI_TCT_ALL = 0x0B, ++ CMD_CFGI_VMS_PIDM = 0x0C, ++ CMD_PLBI_OS_EID = 0x14, ++ CMD_PLBI_OS_EIDTID = 0x15, ++ CMD_PLBI_OS_VA = 0x16, ++ CMD_TLBI_OS_ALL = 0x10, ++ CMD_TLBI_OS_TID = 0x11, ++ CMD_TLBI_OS_VA = 0x12, ++ CMD_TLBI_OS_VAA = 0x13, ++ CMD_TLBI_HYP_ALL = 0x18, ++ CMD_TLBI_HYP_TID = 0x19, ++ CMD_TLBI_HYP_VA = 0x1A, ++ CMD_TLBI_HYP_VAA = 0x1B, ++ CMD_TLBI_S1S2_VMALL = 0x28, ++ CMD_TLBI_S2_IPA = 0x2a, ++ CMD_TLBI_NS_OS_ALL = 0x2C, ++ CMD_RESUME = 0x44, ++ CMD_CREATE_KVTBL = 0x60, ++ CMD_DELETE_KVTBL = 0x61, ++ CMD_NULL = 0x62, ++ CMD_TLBI_OS_ALL_U = 0x90, ++ CMD_TLBI_OS_ASID_U = 0x91, ++ CMD_TLBI_OS_VA_U = 0x92, ++ CMD_TLBI_OS_VAA_U = 0x93, ++ CMD_TLBI_HYP_ASID_U = 0x99, ++ CMD_TLBI_HYP_VA_U = 0x9A, ++ CMD_TLBI_S1S2_VMALL_U = 0xA8, ++ CMD_TLBI_S2_IPA_U = 0xAa, ++ MCMDQ_CMD_MAX, ++} UmmuMcmdqCmdType; ++ ++typedef struct UMMUS2Hwpt { ++ IOMMUFDBackend *iommufd; ++ uint32_t hwpt_id; ++ uint32_t ioas_id; ++} UMMUS2Hwpt; ++ ++typedef struct UMMUViommu { ++ UMMUState *ummu; ++ IOMMUFDBackend *iommufd; ++ IOMMUFDViommu *core; ++ UMMUS2Hwpt *s2_hwpt; ++ QLIST_HEAD(, UMMUDevice) device_list; ++ QLIST_ENTRY(UMMUViommu) next; ++} UMMUViommu; ++ ++typedef struct UMMUS1Hwpt { ++ void *ummu; ++ IOMMUFDBackend *iommufd; ++ UMMUViommu *viommu; ++ uint32_t hwpt_id; ++ QLIST_HEAD(, UMMUDevice) device_list; ++ QLIST_ENTRY(UMMUViommu) next; ++} UMMUS1Hwpt; ++ ++typedef struct UMMUVdev { ++ UMMUViommu *vummu; ++ IOMMUFDVdev *core; ++ uint32_t sid; ++} UMMUVdev; ++ ++typedef struct UMMUDevice { ++ UMMUState *ummu; ++ IOMMUMemoryRegion iommu; ++ AddressSpace as; ++ AddressSpace as_sysmem; ++ HostIOMMUDeviceIOMMUFD *idev; ++ UMMUViommu *viommu; ++ UMMUS1Hwpt *s1_hwpt; ++ UBDevice *udev; ++ UMMUVdev *vdev; ++ QLIST_ENTRY(UMMUDevice) next; ++} UMMUDevice; ++ ++typedef struct UMMUTransCfg { ++ dma_addr_t tct_ptr; ++ uint64_t tct_num; ++ uint64_t tct_fmt; ++ dma_addr_t tct_ttba; ++ uint32_t tct_sz; ++ uint32_t tct_tgs; ++ uint32_t tecte_tag; ++ uint32_t tid; ++} UMMUTransCfg; ++ ++typedef enum UMMUEventType { ++ EVT_NONE = 0, ++ /* unsupport translation type */ ++ EVT_UT, ++ /* dstEid overflow */ ++ EVT_BAD_DSTEID, ++ /* abort when visit tect, or addr overflow */ ++ EVT_TECT_FETCH, ++ /* TECT not valid, (V=0) */ ++ EVT_BAD_TECT, ++ /* tect ent lack tokenid */ ++ EVT_RESERVE_0 = 5, ++ /* reserved, no content */ ++ EVT_BAD_TOKENID, ++ /* 1. TECT.TCT_MAXNUM = 0, tokenid disable, ++ * 2. TECT.ST_MODE[0] = 0, stage 1 translation close. ++ * 3. tokenid > TECT.TCT_MAXNUM ++ * 4. lvl1 tct invalid in two-level tct ++ */ ++ EVT_TCT_FETCH, ++ /* invalid tct */ ++ EVT_BAD_TCT, ++ /* error when Address Table walk */ ++ EVT_A_PTW_EABT, ++ /* translation input bigger than max valid value, ++ * or no valid translation table descriptor ++ */ ++ EVT_A_TRANSLATION = 10, ++ /* address translation out put bigger than max valid value */ ++ EVT_A_ADDR_SIZE, ++ /* Access flag fault because of AF=0 */ ++ EVT_ACCESS, ++ /* address translation permission error */ ++ EVT_A_PERMISSION, ++ /* TLB or PLB conflicted in translation */ ++ EVT_TBU_CONFLICT, ++ /* config cache conflicted in translation */ ++ EVT_CFG_CONFLICT = 15, ++ /* error occured when getting VMS */ ++ EVT_VMS_FETCH, ++ /* error when Permission Table walk */ ++ EVT_P_PTW_EABT, ++ /* abnormal software configuration in PTW */ ++ EVT_P_CFG_ERROR, ++ /* permission exception in PTW process */ ++ EVT_P_PERMISSION, ++ /* E-Bit verification failed */ ++ EVT_RESERVE_1 = 20, ++ /* reserved, no content */ ++ EVT_EBIT_DENY, ++ /* the UMMU hardware reports the execution result ++ * of the CMD_CREAT_DSTEID_TECT_RELATION command ++ * to the software. ++ */ ++ EVT_CREATE_DSTEID_TECT_RELATION_RESULT = 60, ++ /* the UMMU hardware reports the execution result ++ * of the CMD_DELETE_DSTEID_TECT_RELATION command ++ * to the software. ++ */ ++ EVT_DELETE_DSTEID_TECT_RELATION_RESULT, ++ EVT_MAX ++} UMMUEventType; ++ ++typedef struct UMMUEventInfo { ++ UMMUEventType type; ++ uint32_t tecte_tag; ++ uint32_t tid; ++ union { ++ struct { ++ bool stall; ++ } f_translation; ++ } u; ++ /* TODO */ ++} UMMUEventInfo; ++ ++typedef enum { ++ UMMU_PTW_ERR_NONE, ++ UMMU_PTW_ERR_TRANSLATION, ++ UMMU_PTW_ERR_PERMISSION ++} UMMUPTWEventType; ++ ++typedef struct UMMUPTWEventInfo { ++ UMMUPTWEventType type; ++} UMMUPTWEventInfo; ++ ++#define EVT_SET_TYPE(x, v) ((x)->word[0] = deposit32((x)->word[0], 0, 8, v)) ++#define EVT_SET_TECTE_TAG(x, v) ((x)->word[8] = deposit32((x)->word[8], 0, 16, v)) ++#define EVT_SET_TID(x, v) ((x)->word[1] = deposit32((x)->word[1], 0, 20, v)) ++ ++/* TECTE Level 1 Description */ ++typedef struct TECTEDesc { ++ uint32_t word[2]; ++} TECTEDesc; ++ ++/* TCTE Level1 Description */ ++typedef struct TCTEDesc { ++ uint32_t word[2]; ++} TCTEDesc; ++ ++/* Target Entity Config Table Entry (TECTE) */ ++typedef struct TECTE { ++ uint32_t word[16]; ++} TECTE; ++ ++/* Target Contex Table Entry (TCTE) */ ++typedef struct TCTE { ++ uint32_t word[16]; ++} TCTE; ++ ++typedef struct MAPTCmdqBase { ++ uint32_t word[16]; ++} MAPTCmdqBase; ++ ++typedef struct MAPTCmd { ++ uint32_t word[4]; ++} MAPTCmd; ++ ++typedef struct MAPTCmdCpl { ++ uint32_t cpl_status : 4; ++ uint32_t rsv : 12; ++ uint32_t cmdq_ci : 16; ++} MAPTCmdCpl; ++ ++typedef struct UMMUTecteRange { ++ bool invalid_all; ++ uint32_t start; ++ uint32_t end; ++} UMMUTecteRange; ++ ++static inline void update_reg32_by_wmask(uint32_t *old, uint32_t new, uint32_t wmask) ++{ ++ *old = (*old & ~wmask) | (new & wmask); ++} ++ ++static inline void update_reg64_by_wmask(uint64_t *old, uint64_t new, uint64_t wmask) ++{ ++ *old = (*old & ~wmask) | (new & wmask); ++} ++ ++static inline bool ummu_mcmdq_enabled(UMMUMcmdQueue *mcmdq) ++{ ++ return MCMD_QUE_EN_BIT(&mcmdq->queue); ++} ++ ++static inline void ummu_mcmdq_enable_resp(UMMUMcmdQueue *mcmdq) ++{ ++ mcmdq->queue.cons |= GENMASK(MCMD_QUE_EN_RESP_BIT, MCMD_QUE_EN_RESP_BIT); ++} ++ ++static inline void ummu_mcmdq_disable_resp(UMMUMcmdQueue *mcmdq) ++{ ++ mcmdq->queue.cons &= ~(GENMASK(MCMD_QUE_EN_RESP_BIT, MCMD_QUE_EN_RESP_BIT)); ++} ++ ++static inline bool ummu_mcmdq_empty(UMMUMcmdQueue *mcmdq) ++{ ++ UMMUQueue *q = &mcmdq->queue; ++ ++ return MCMD_QUE_WD_IDX(q) == MCMD_QUE_RD_IDX(q) && ++ MCMD_QUE_WD_WRAP(q) == MCMD_QUE_RD_WRAP(q); ++} ++ ++static inline void ummu_mcmdq_cons_incr(UMMUMcmdQueue *mcmdq) ++{ ++ mcmdq->queue.cons = ++ deposit32(mcmdq->queue.cons, 0, mcmdq->queue.log2size + 1, mcmdq->queue.cons + 1); ++} ++ ++static inline void ummu_set_event_que_int_en(UMMUState *u, uint64_t data) ++{ ++ u->eventq.event_que_int_en = FIELD_EX32(data, GLB_INT_EN, EVENT_QUE_INT_EN); ++} ++ ++static inline void ummu_set_glb_err_int_en(UMMUState *u, uint64_t data) ++{ ++ u->glb_err.glb_err_int_en = FIELD_EX32(data, GLB_INT_EN, GLB_ERR_INT_EN); ++} ++ ++static inline bool ummu_event_que_int_en(UMMUState *u) ++{ ++ return u->eventq.event_que_int_en; ++} ++ ++static inline bool ummu_glb_err_int_en(UMMUState *u) ++{ ++ return u->glb_err.glb_err_int_en; ++} ++ ++static inline USIMessage ummu_get_eventq_usi_message(UMMUState *u) ++{ ++ USIMessage msg; ++ ++ msg.address = u->eventq.usi_addr; ++ msg.data = u->eventq.usi_data; ++ ++ return msg; ++} ++ ++static inline USIMessage ummu_get_gerror_usi_message(UMMUState *u) ++{ ++ USIMessage msg; ++ ++ msg.address = u->glb_err.usi_addr; ++ msg.data = u->glb_err.usi_data; ++ ++ return msg; ++} ++ ++#define UMMU_TECT_MODE_SPARSE_TABLE 0x1 ++static inline uint32_t ummu_tect_mode_sparse_table(UMMUState *u) ++{ ++ return FIELD_EX32(u->ctrl[1], CTRL1, TECT_MODE_SEL) & UMMU_TECT_MODE_SPARSE_TABLE; ++} ++ ++#define UMMU_FEAT_2_LVL_TECT 0x1 ++static inline uint32_t ummu_tect_fmt_2level(UMMUState *u) ++{ ++ return FIELD_EX32(u->tect_base_cfg, TECT_BASE_CFG, TECT_FMT) & UMMU_FEAT_2_LVL_TECT; ++} ++ ++static inline uint32_t ummu_tect_split(UMMUState *u) ++{ ++ return FIELD_EX32(u->tect_base_cfg, TECT_BASE_CFG, TECT_SPLIT); ++} ++ ++static inline int tgs2granule(int bits) ++{ ++ switch (bits) { ++ case 0: ++ /* Translation Granule Size: 2 ^ 12 = 4K */ ++ return 12; ++ case 1: ++ /* Translation Granule Size: 2 ^ 16 = 64K */ ++ return 16; ++ case 2: ++ /* Translation Granule Size: 2 ^ 14 = 16K */ ++ return 14; ++ default: ++ return 0; ++ } ++} ++ ++static inline bool ummu_eventq_enabled(UMMUState *u) ++{ ++ return !!FIELD_EX32(u->ctrl[0], CTRL0, EVENTQ_EN); ++} ++ ++static inline bool ummu_eventq_full(UMMUEventQueue *eventq) ++{ ++ UMMUQueue *q = &eventq->queue; ++ ++ return EVENT_QUE_WR_IDX(q) == EVENT_QUE_RD_IDX(q) && ++ EVENT_QUE_WR_WRAP(q) != EVENT_QUE_RD_WRAP(q); ++} ++ ++static inline bool ummu_eventq_empty(UMMUEventQueue *eventq) ++{ ++ UMMUQueue *q = &eventq->queue; ++ ++ return EVENT_QUE_WR_IDX(q) == EVENT_QUE_RD_IDX(q) && ++ EVENT_QUE_WR_WRAP(q) == EVENT_QUE_RD_WRAP(q); ++} ++ ++static inline void ummu_eventq_prod_incr(UMMUEventQueue *eventq) ++{ ++ UMMUQueue *q = &eventq->queue; ++ ++ /* qlog2size + 1: add 1 which is consider for queue wrap bit. ++ * when cons == prod, the queue may full or empty, according warp bit ++ * to detemin full or emtpy. if cons.wrap == prod.wrap, the queue empty, ++ * if cons.wrap != prod.wrap, the queue full. ++ * */ ++ q->prod = deposit32(q->prod, 0, q->log2size + 1, q->prod + 1); ++} ++ ++/* ++ * MAPT Cmd Queue Base Struct ++ * ┌──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┐ ++ * │31│30│29│28│27│26│25│24│23│22│21│20│19│18│17│16│15│14│13│12│11│10│ 9│ 8│ 7│ 6│ 5│ 4│ 3│ 2│ 1│ 0│ ++ * 0 │ UCMD QUEUE BASE ADDRESS[31:12] │ │ ++ * 1 │ │ UCMD QUEUE BASE ADDRESS[51:32] │ ++ * 2 │ UCPL QUEUE BASE ADDRESS[31:12] │ │ ++ * 3 │ │ UCPL QUEUE BASE ADDRESS[51:32] │ ++ * 4 │ │ TECTE_TAG │ ++ * 5 │ │ ++ * 6 │ │ ++ * 7 │ │ ++ * 8 │ │ TokenID │ ++ * 9 │ │ ++ * 10 │ UCMQ_QUEUE CI │ UCMQ_QUEUE PI │ ++ * 11 │ UCPL_QUEUE CI │ UCPL_QUEUE PI │ ++ * 12 │ │ ++ * 13 │ │ ++ * 14 │ │ ++ * 15 │ │ ++ * └───────────────────────────────────────────────────────────────────────────────────────────────┘ ++ */ ++static inline void ummu_mapt_cmdq_base_update_ucmdq_pi(MAPTCmdqBase *base, uint16_t data) ++{ ++ base->word[10] = deposit32(base->word[10], 0, 16, data); ++} ++ ++static inline void ummu_mapt_cmdq_base_update_ucmdq_ci(MAPTCmdqBase *base, uint16_t data) ++{ ++ base->word[10] = deposit32(base->word[10], 16, 16, data); ++} ++ ++static inline void ummu_mapt_cmdq_base_update_ucplq_pi(MAPTCmdqBase *base, uint16_t data) ++{ ++ base->word[11] = deposit32(base->word[11], 0, 16, data); ++} ++ ++static inline void ummu_mapt_cmdq_base_update_ucplq_ci(MAPTCmdqBase *base, uint16_t data) ++{ ++ base->word[11] = deposit32(base->word[11], 16, 16, data); ++} ++ ++static inline uint16_t ummu_mapt_cmdq_base_get_ucmdq_pi(MAPTCmdqBase *base) ++{ ++ return extract32(base->word[10], 0, 16); ++} ++ ++static inline uint16_t ummu_mapt_cmdq_base_get_ucmdq_ci(MAPTCmdqBase *base) ++{ ++ return extract32(base->word[10], 16, 16); ++} ++ ++static inline uint16_t ummu_mapt_cmdq_base_get_ucplq_pi(MAPTCmdqBase *base) ++{ ++ return extract32(base->word[11], 0, 16); ++} ++ ++static inline uint16_t ummu_mapt_cmdq_base_get_ucplq_ci(MAPTCmdqBase *base) ++{ ++ return extract32(base->word[11], 16, 16); ++} ++ ++static inline uint16_t ummu_mapt_cmdq_base_get_tecte_tag(MAPTCmdqBase *base) ++{ ++ return extract32(base->word[4], 0, 16); ++} ++ ++static inline uint32_t ummu_mapt_cmdq_base_get_token_id(MAPTCmdqBase *base) ++{ ++ return extract32(base->word[8], 0, 20); ++} ++ ++static inline bool ummu_mapt_ucmdq_empty(MAPTCmdqBase *base) ++{ ++ return MAPT_UCMDQ_PI(base) == MAPT_UCMDQ_CI(base) && ++ MAPT_UCMDQ_PI_WRAP(base) == MAPT_UCMDQ_CI_WRAP(base); ++} ++ ++static inline void ummu_mapt_ucmdq_cons_incr(MAPTCmdqBase *base) ++{ ++ base->word[10] = deposit32(base->word[10], 16, ++ MAPT_UCMDQ_LOG2SIZE(base) + 1, ++ ummu_mapt_cmdq_base_get_ucmdq_ci(base) + 1); ++} ++ ++static inline bool ummu_mapt_ucplq_full(MAPTCmdqBase *base) ++{ ++ return MAPT_UCPLQ_PI(base) == MAPT_UCPLQ_CI(base) && ++ MAPT_UCPLQ_PI_WRAP(base) != MAPT_UCPLQ_CI_WRAP(base); ++} ++ ++static inline void ummu_mapt_ucqlq_prod_incr(MAPTCmdqBase *base) ++{ ++ base->word[11] = deposit32(base->word[11], 0, ++ MAPT_UCPLQ_LOG2SIZE(base) + 1, ++ ummu_mapt_cmdq_base_get_ucplq_pi(base) + 1); ++} ++ ++static inline void ummu_mapt_ucplq_set_cpl(MAPTCmdCpl *cpl, uint16_t status, uint16_t ci) ++{ ++ cpl->cpl_status = status; ++ cpl->cmdq_ci = ci; ++} ++ ++static inline uint32_t ummu_mapt_cmdq_get_qid(UMMUState *u, uint64_t offset) ++{ ++ if (u->ucmdq_page_sel == MAPT_CMDQ_CTRLR_PAGE_SIZE_4K) { ++ return MAPT_4K_CMDQ_CTXT_QID(offset); ++ } else { ++ return MAPT_64K_CMDQ_CTXT_QID(offset); ++ } ++} ++ ++static inline void ummu_mcmdq_construct_plbi_os_eidtid(UMMUMcmdqCmd *mcmd_cmd, uint32_t tid, uint16_t tag) ++{ ++ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 0, 8, CMD_PLBI_OS_EIDTID); ++ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 12, 20, tid); ++ mcmd_cmd->word[4] = deposit32(mcmd_cmd->word[4], 0, 16, tag); ++} ++ ++static inline void ummu_plib_usr_va_to_pibi_os_va(MAPTCmd *mapt_cmd, UMMUMcmdqCmd *mcmd_cmd, ++ uint32_t tid, uint16_t tag) ++{ ++ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 0, 8, CMD_PLBI_OS_VA); ++ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 12, 20, tid); ++ mcmd_cmd->word[1] = deposit32(mcmd_cmd->word[1], 0, 6, extract32(mapt_cmd->word[1], 0, 6)); ++ mcmd_cmd->word[2] = mapt_cmd->word[2] & 0xFFFFF000; ++ mcmd_cmd->word[3] = mapt_cmd->word[3]; ++ mcmd_cmd->word[4] = deposit32(mcmd_cmd->word[4], 0, 16, tag); ++} ++ ++#endif +diff --git a/include/hw/ub/hisi/ummu.h b/include/hw/ub/hisi/ummu.h +new file mode 100644 +index 0000000000..192f45e7e6 +--- /dev/null ++++ b/include/hw/ub/hisi/ummu.h +@@ -0,0 +1,22 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef HISI_UMMU_H ++#define HISI_UMMU_H ++#include "hw/ub/hisi/ubc.h" ++ ++#endif +diff --git a/include/hw/ub/ub_ummu.h b/include/hw/ub/ub_ummu.h +index f8b65a0bbe..262b3d6ec2 100644 +--- a/include/hw/ub/ub_ummu.h ++++ b/include/hw/ub/ub_ummu.h +@@ -26,4 +26,97 @@ + + #define UMMU_INTERRUPT_ID 0x8989 // UMMU DEVICE ID need allocate later + ++#define __bf_shf(x) (__builtin_ffsll(x) - 1) ++ ++#define TYPE_UB_UMMU "ub-ummu" ++OBJECT_DECLARE_TYPE(UMMUState, UMMUBaseClass, UB_UMMU) ++ ++typedef struct UMMUQueue { ++ uint64_t base; /* base register */ ++ uint32_t prod; ++ uint32_t cons; ++ uint64_t entry_size; ++ uint8_t log2size; ++} UMMUQueue; ++ ++typedef struct UMMUMcmdQueue { ++ UMMUQueue queue; ++} UMMUMcmdQueue; ++ ++typedef struct UMMUEventQueue { ++ UMMUQueue queue; ++ uint64_t usi_addr; ++ uint32_t usi_data; ++ uint32_t usi_attr; ++ bool event_que_int_en; ++} UMMUEventQueue; ++ ++typedef struct UMMUGlbErr { ++ uint64_t usi_addr; ++ uint32_t usi_data; ++ uint32_t usi_attr; ++ bool glb_err_int_en; ++ uint32_t glb_err; ++ uint32_t glb_err_resp; ++} UMMUGlbErr; ++ ++typedef enum UMMUUSIVectorType { ++ UMMU_USI_VECTOR_EVETQ, ++ UMMU_USI_VECTOR_GERROR, ++ UMMU_USI_VECTOR_MAX, ++} UMMUUSIVectorType; ++ ++typedef struct UMMUKVTblEntry { ++ uint32_t dst_eid; ++ uint32_t tecte_tag; ++ QLIST_ENTRY(UMMUKVTblEntry) list; ++} UMMUKVTblEntry; ++ ++#define UMMU_MAX_MCMDQS 32 ++#define UMMU_TECTE_TAG_MAX_NUM 32 ++struct UMMUState { ++ /* */ ++ SysBusDevice dev; ++ const char *mrtypename; ++ MemoryRegion ummu_reg_mem; ++ uint64_t ummu_reg_size; ++ MemoryRegion root; ++ MemoryRegion stage2; ++ MemoryRegion sysmem; ++ ++ /* Nested */ ++ bool nested; ++ UMMUViommu *viommu; ++ ++ /* spec register define */ ++ uint32_t cap[7]; ++ uint32_t ctrl[4]; ++ uint32_t ctrl0_ack; ++ uint64_t tect_base; ++ uint32_t tect_base_cfg; ++ UMMUMcmdQueue mcmdqs[UMMU_MAX_MCMDQS]; ++ UMMUEventQueue eventq; ++ UMMUGlbErr glb_err; ++ uint64_t mapt_cmdq_ctxt_base; ++ uint32_t release_um_queue; ++ uint32_t release_um_queue_id; ++ uint32_t ucmdq_page_sel; ++ ++ int usi_virq[UMMU_USI_VECTOR_MAX]; ++ uint8_t bus_num; ++ QLIST_ENTRY(UMMUState) node; ++ uint32_t tecte_tag_cache[UMMU_TECTE_TAG_MAX_NUM]; ++ uint32_t tecte_tag_num; ++ ++ UBBus *primary_bus; ++ GHashTable *ummu_devs; ++ GHashTable *configs; ++ QLIST_HEAD(, UMMUKVTblEntry) kvtbl; ++}; ++ ++struct UMMUBaseClass { ++ /* */ ++ SysBusDeviceClass parent_class; ++}; ++ + #endif +diff --git a/include/hw/ub/ub_usi.h b/include/hw/ub/ub_usi.h +new file mode 100644 +index 0000000000..a9df04e686 +--- /dev/null ++++ b/include/hw/ub/ub_usi.h +@@ -0,0 +1,27 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#ifndef UB_USI_H ++#define UB_USI_H ++#include "qemu/typedefs.h" ++#include "hw/ub/ub.h" ++ ++struct USIMessage { ++ uint64_t address; ++ uint32_t data; ++}; ++ ++#endif +diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h +index a1b15dd219..f52ceea7a0 100644 +--- a/include/qemu/typedefs.h ++++ b/include/qemu/typedefs.h +@@ -140,7 +140,9 @@ typedef struct VMStateDescription VMStateDescription; + + /* UB typedef */ + typedef struct UBDevice UBDevice; ++typedef struct USIMessage USIMessage; + typedef struct UBBus UBBus; ++typedef struct UMMUViommu UMMUViommu; + + /* + * Pointer types +-- +2.33.0 + diff --git a/ub-and-some-hmp-cmd-for-query-ub-info.patch b/ub-and-some-hmp-cmd-for-query-ub-info.patch new file mode 100644 index 0000000000000000000000000000000000000000..3f6366de17807ec1cc07a7ed91dcd59447872e1b --- /dev/null +++ b/ub-and-some-hmp-cmd-for-query-ub-info.patch @@ -0,0 +1,828 @@ +From ed3f11d39c455b613fb25bc3f4401d25f6e3a857 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 21 Nov 2025 11:11:31 +0800 +Subject: [PATCH 6/7] ub: and some hmp cmd for query ub info + +add hmp cmd support query ub ers/config-space/device info + +Signed-off-by: caojinhuahw +--- + hmp-commands-info.hx | 48 ++++ + hw/ub/ub.c | 530 +++++++++++++++++++++++++++++++++++++++++- + include/hw/ub/ub.h | 5 + + include/monitor/hmp.h | 5 + + monitor/hmp-cmds.c | 157 +++++++++++++ + 5 files changed, 744 insertions(+), 1 deletion(-) + +diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx +index f5b37eb74a..c0d9796f45 100644 +--- a/hmp-commands-info.hx ++++ b/hmp-commands-info.hx +@@ -880,6 +880,54 @@ SRST + Show intel SGX information. + ERST + ++#if defined(CONFIG_UB) ++ { ++ .name = "ub", ++ .args_type = "id:s?", ++ .params = "[id]", ++ .help = "show UB info", ++ .cmd = hmp_info_ub, ++ }, ++#endif ++ ++SRST ++ ``info ub`` ++ Show UB information. ++ ++ERST ++ ++#if defined(CONFIG_UB) ++ { ++ .name = "ub-config", ++ .args_type = "id:s,offset:l,len:l", ++ .params = "id offset len", ++ .help = "show UB config space information", ++ .cmd = hmp_info_ub_config, ++ }, ++#endif ++ ++SRST ++ ``info ub-config`` *name* ++ Show UB config space information. ++ ++ERST ++ ++#if defined(CONFIG_UB) ++ { ++ .name = "ub-ers", ++ .args_type = "id:s,index:l,offset:l,len:l", ++ .params = "id index offset len", ++ .help = "show UB Function Entity Resource Space information", ++ .cmd = hmp_info_ub_ers, ++ }, ++#endif ++ ++SRST ++ ``info ub-fers`` *name* ++ Show UB Function Entity Resource Space information. ++ ++ERST ++ + #if defined(CONFIG_MOS6522) + { + .name = "via", +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 6377005d9f..9eb5729f20 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -27,8 +27,11 @@ + #include "hw/ub/ub_config.h" + #include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_ummu.h" + #include "hw/ub/ub_usi.h" + #include "hw/ub/ub_acpi.h" ++#include "hw/vfio/ub.h" ++#include "ub_ummu_internal.h" + #include "qemu/log.h" + #include "qapi/error.h" + #include "hw/ub/ub_bus.h" +@@ -1317,4 +1320,529 @@ enum UbDeviceType ub_dev_get_type(UBDevice *udev) + default: + return UB_TYPE_UNINIT; + } +-} +\ No newline at end of file ++} ++ ++int ub_dev_dump_config(const char *id, uint64_t offset, uint64_t len, ++ char *buff, int buff_size) ++{ ++ UBDevice *dev = ub_find_device_by_id(id); ++ uint64_t emulated_offset; ++ uint64_t origin_len = len; ++ ++ if (!dev) { ++ qemu_log("UB device not found, id %s\n", id); ++ return -1; ++ } ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(offset, false); ++ if (emulated_offset == UINT64_MAX) { ++ qemu_log("ub dev dump config out of emulated cfg range, " ++ "offset is 0x%lx\n", offset); ++ return -1; ++ } ++ ++ if (emulated_offset + len > ub_emulated_config_size()) { ++ len = ub_emulated_config_size() - emulated_offset; ++ qemu_log("ub dev dump config len out of eulated cfg range, " ++ "adjust len from 0x%lx to 0x%lx\n", origin_len, len); ++ } ++ ++ return ub_hexdump(dev->config, emulated_offset, len, buff, buff_size); ++} ++ ++void ub_dev_dump_ers(const char *id, uint8_t idx, uint64_t offset, uint64_t len, ++ char *buff, int buff_size) ++{ ++ UBDevice *udev = ub_find_device_by_id(id); ++ VFIOUBDevice *vdev = NULL; ++ VFIOERS *ers = NULL; ++ VFIORegion *region = NULL; ++ uint64_t emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ UbCfg1Basic *cfg1 = NULL; ++ int i; ++ int l = 0; ++ uint64_t len_printed = 0; ++ uint64_t len_remain = 0; ++ ++ if (!udev) { ++ qemu_log("do not have ub device %s\n", id); ++ return; ++ } ++ ++ cfg1 = (UbCfg1Basic *)(udev->config + emulated_offset); ++ l += snprintf(buff + l, buff_size - l, "io_region[%u] size 0x%lx addr 0x%lx\n", ++ idx, udev->io_regions[idx].size, udev->io_regions[idx].addr); ++ ++ vdev = VFIO_UB_SAFE(udev); ++ if (!vdev) { ++ l += snprintf(buff + l, buff_size - l, "only support vfio-ub dev\n"); ++ return; ++ } ++ ers = &vdev->ers[idx]; ++ region = &ers->region; ++ l += snprintf(buff + l, buff_size - l, "ers[%u] size 0x%zx gpa 0x%lx\n", ++ idx, ers->size, cfg1->ers_ubba[idx]); ++ l += snprintf(buff + l, buff_size - l, "ers[%u] region->nr_mmaps %u\n", ++ idx, region->nr_mmaps); ++ for (i = 0; i < region->nr_mmaps; i++) { ++ l += snprintf(buff + l, buff_size - l, ++ "mmaps[%d]:\n" ++ " +-- mmap %p size 0x%lx offset 0x%lx\n" ++ " +-- memRegion:\n" ++ " +--name %s addr 0x%lx align 0x%lx\n" ++ " +--bool: ram %u readonly %u\n", ++ i, region->mmaps[i].mmap, region->mmaps[i].size, ++ region->mmaps[i].offset, region->mmaps[i].mem.name, ++ region->mmaps[i].mem.addr, region->mmaps[i].mem.align, ++ region->mmaps[i].mem.ram, region->mmaps[i].mem.readonly); ++ if (region->mmaps[i].offset) { ++ if (offset < region->mmaps[i].offset) { ++ l += snprintf(buff + l, buff_size - l, ++ "warn:The query area falls within the simulation area,\n" ++ "querying the simulation area is not supported at present.\n" ++ "please adjust the offset to the queryable area.\n"); ++ return; ++ } else { ++ offset -= region->mmaps[i].offset; ++ } ++ } ++ len_remain = len - len_printed; ++ if (len_remain > region->mmaps[i].size) { ++ len_remain = region->mmaps[i].size; ++ } ++ ub_hexdump(region->mmaps[i].mmap, offset, len_remain, ++ buff + l, buff_size - l); ++ len_printed += region->mmaps[i].size; ++ } ++} ++ ++static void ub_dev_get_usi_info(Monitor *mon, UBDevice *udev) ++{ ++ int i; ++ /* usi enable info */ ++ monitor_printf(mon, "│%-24s│%-25u%-20u│\n", ++ "USI: enable masked", usi_enabled(udev), usi_ue_is_masked(udev)); ++ for (i = 0; i < udev->usi_entries_nr; i++) { ++ USIMessage msg = usi_get_message(udev, i); ++ monitor_printf(mon, "│%-4s%d%-19s│0x%-20lx%7u%8u%8u│\n", ++ "vect", i, ":addr data pend msk", ++ msg.address, msg.data, ++ usi_is_pending(udev, i), usi_is_masked(udev, i)); ++ } ++ /* USI table info */ ++ monitor_printf(mon, "│%-24s│0x%-23lx%-20u│\n", "USI: vector_table nr", ++ udev->usi_vec_table_mmio.addr, udev->usi_entries_nr); ++ monitor_printf(mon, "│%-24s│0x%-23lx%-20u│\n", "USI: addr_table nr", ++ udev->usi_addr_table_mmio.addr, udev->usi_addr_table_nr); ++ monitor_printf(mon, "│%-24s│0x%-43lx│\n", "USI: pend_table", ++ udev->usi_pend_table_mmio.addr); ++ ++ /* USI notify info */ ++ monitor_printf(mon, "│%-24s│%-45p│\n", "USI: UseNotify", ++ udev->usi_vector_use_notifier); ++ monitor_printf(mon, "│%-24s│%-45p│\n", "USI: ReleaseNotify", ++ udev->usi_vector_release_notifier); ++ monitor_printf(mon, "│%-24s│%-45p│\n", "USI: PollNotify", ++ udev->usi_vector_poll_notifier); ++ return; ++} ++ ++static void ub_dev_get_cfg0_info(Monitor *mon, UBDevice *udev) ++{ ++ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_START, true); ++ UbCfg0Basic *cfg0 = (UbCfg0Basic *)(udev->config + offset); ++ ConfigNetAddrInfo *cna; ++ char cap_bitmap[CAP_BITMAP_LEN + 1] = {0}; ++ ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:total ports", cfg0->total_num_of_port); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:total UEs", cfg0->total_num_of_ue); ++ if (bitmap_scnprintf(cap_bitmap, sizeof(cap_bitmap), ++ (unsigned long *)cfg0->cap_bitmap, sizeof(cfg0->cap_bitmap)) <= 0) { ++ snprintf(cap_bitmap, sizeof(cap_bitmap), "failed to get bitmap"); ++ } ++ monitor_printf(mon, "│%-24s│0x%-43s│\n", "cfg0:cap_bitmap", cap_bitmap); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:feat.entity", ++ cfg0->support_feature.bits.entity_available); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:feat.mtu", ++ cfg0->support_feature.bits.mtu_supported); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:feat.route_table", ++ cfg0->support_feature.bits.route_table_supported); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:feat.upi", ++ cfg0->support_feature.bits.upi_supported); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:feat.broker", ++ cfg0->support_feature.bits.broker_supported); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:feat.switch", ++ cfg0->support_feature.bits.switch_supported); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "config0:feat.rsv", ++ cfg0->support_feature.bits.rsv); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:feat.cc", ++ cfg0->support_feature.bits.cc_supported); ++ monitor_printf(mon, "│%-24s│0x%-8x0x%-8x0x%-8x0x%-13x│\n", ++ "cfg0:eid", cfg0->eid.dw0, cfg0->eid.dw1, ++ cfg0->eid.dw2, cfg0->eid.dw3); ++ monitor_printf(mon, "│%-24s│0x%-8x0x%-8x0x%-8x0x%-13x│\n", ++ "cfg0:fm_eid", cfg0->fm_eid.dw0, cfg0->fm_eid.dw1, ++ cfg0->fm_eid.dw2, cfg0->fm_eid.dw3); ++ offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_NA_INFO_START, true); ++ cna = (ConfigNetAddrInfo *)(udev->config + offset); ++ monitor_printf(mon, "│%-24s│0x%-23x%-20u│\n", "cfg0:net_addr.cna", ++ cna->primary_cna, cna->primary_cna); ++ monitor_printf(mon, "│%-24s│0x%-23x%-20u│\n", "cfg0:upi", cfg0->upi, cfg0->upi); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "config0:module_id", cfg0->module_id); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "config0:vendor_id", cfg0->vendor_id); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:dev_rst", cfg0->dev_rst); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:mtu_cfg", cfg0->mtu_cfg); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:cc_en", cfg0->cc_en); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg0:th_en", cfg0->th_en); ++ monitor_printf(mon, "│%-24s│0x%-23x%-20u│\n", "cfg0:fm_cna", ++ cfg0->fm_cna, cfg0->fm_cna); ++ monitor_printf(mon, "│%-24s│0x%-23lx%-20lu│\n", "cfg0:ueid_low", ++ cfg0->ueid_low, cfg0->ueid_low); ++ monitor_printf(mon, "│%-24s│0x%-23lx%-20lu│\n", "cfg0:ueid_high", ++ cfg0->ueid_high, cfg0->ueid_high); ++ return; ++} ++ ++static void ub_dev_get_cfg1_int_type2_capinfo(Monitor *mon, UBDevice *udev) ++{ ++ uint64_t emu_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2, true); ++ UbCfg1IntType2Cap *cap = (UbCfg1IntType2Cap *)(udev->config + emu_offset); ++ ++ monitor_printf(mon, "│%-24s│vec_table_start_addr 0x%-22lx│\n", "cfg1:int type2 CAP", ++ cap->vec_table_start_addr); ++ monitor_printf(mon, "│%-24s│add_table_start_addr 0x%-22lx│\n", "cfg1:int type2 CAP", ++ cap->add_table_start_addr); ++ monitor_printf(mon, "│%-24s│pend_table_start_addr 0x%-21lx│\n", "cfg1:int type2 CAP", ++ cap->pend_table_start_addr); ++ monitor_printf(mon, "│%-24s│int_id 0x%-6xint_mask 0x%-3xint_enable 0x%-3x│\n", ++ "cfg1:int type2 CAP", cap->interrupt_id, cap->interrupt_mask, cap->interrupt_enable); ++ return; ++} ++ ++static void ub_dev_get_cfg1_info(Monitor *mon, UBDevice *udev) ++{ ++ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ UbCfg1Basic *cfg1 = (UbCfg1Basic *)(udev->config + offset); ++ char cap_bitmap[CAP_BITMAP_LEN + 1] = {0}; ++ int i; ++ ++ if (bitmap_scnprintf(cap_bitmap, sizeof(cap_bitmap), ++ (unsigned long *)cfg1->cap_bitmap, sizeof(cfg1->cap_bitmap)) <= 0) { ++ snprintf(cap_bitmap, sizeof(cap_bitmap), "failed to get bitmap"); ++ } ++ monitor_printf(mon, "│%-24s│0x%-43s│\n", "cfg1:cap_bitmap", cap_bitmap); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:feat.mgs", ++ cfg1->support_feature.bits.mgs); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:feat.ubbas", ++ cfg1->support_feature.bits.ubbas); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:feat.ers0s", ++ cfg1->support_feature.bits.ers0s); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:feat.ers1s", ++ cfg1->support_feature.bits.ers1s); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:feat.ers2s", ++ cfg1->support_feature.bits.ers2s); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "config1:feat.cdmas", ++ cfg1->support_feature.bits.cdmas); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "config1:feat.matt_juris", ++ cfg1->support_feature.bits.matt_juris); ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ monitor_printf(mon, "│%-9s%-2usz sa ba(hex)│%-11x%-17lx%-17lx│\n", ++ "cfg1:ERS", i, cfg1->ers_space_size[i], ++ cfg1->ers_start_addr[i], cfg1->ers_ubba[i]); ++ } ++ monitor_printf(mon, "│%-24s│%-20u%-25u│\n", "cfg1:elr elr_done", ++ cfg1->elr, cfg1->elr_done); ++ monitor_printf(mon, "│%-24s│%-20u%-25u│\n", "cfg1:mig ctrl stat", ++ cfg1->mig_ctrl, cfg1->mig_status); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:tpid u num", ++ cfg1->tpid_num); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:ctp_tb_bypass", ++ cfg1->ctp_tb_bypass); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:crystal_dma_en", ++ cfg1->crystal_dma_en); ++ monitor_printf(mon, "│%-24s│0x%-21lx0x%-20x│\n", "cfg1:eid_upi tab ten", ++ cfg1->eid_upi_tab, cfg1->eid_upi_ten); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:bus_access_en", ++ cfg1->bus_access_en); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cfg1:dev_rs_access_en", ++ cfg1->dev_rs_access_en); ++ monitor_printf(mon, "│%-24s│0x%-23x%-20u│\n", "cfg1:dev_token_id", ++ cfg1->dev_token_id, cfg1->dev_token_id); ++ ++ ub_dev_get_cfg1_int_type2_capinfo(mon, udev); ++ ++ return; ++} ++static void ub_dev_get_bus_info(Monitor *mon, UBDevice *udev) ++{ ++ BusControllerState *ubcs = container_of_ubbus(UB_BUS(udev->qdev.parent_bus)); ++ UBDevice *tmp; ++ ++ monitor_printf(mon, "│%-24s│%-45s│\n", ++ "parent_bus name", udev->qdev.parent_bus->name); ++ monitor_printf(mon, "│%-24s│%-45d│\n", ++ "parent_bus max_index", udev->qdev.parent_bus->max_index); ++ monitor_printf(mon, "│%-24s│%-45u│\n", ++ "parent_bus realized", udev->qdev.parent_bus->realized); ++ monitor_printf(mon, "│%-24s│%-45u│\n", ++ "parent_bus full", udev->qdev.parent_bus->full); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "parent_bus num_children", ++ udev->qdev.parent_bus->num_children); ++ QLIST_FOREACH(tmp, &ubcs->bus->devices, node) { ++ monitor_printf(mon, "│%-24s│name %-10s id %-16seid%7u│\n", ++ " device", tmp->name, tmp->qdev.id, tmp->eid); ++ } ++ ++ monitor_printf(mon, "│%-24s│%-45s│\n", ++ "parent_bus p id", udev->qdev.parent_bus->parent->id); ++ monitor_printf(mon, "│%-24s│%-45s│\n", "parent_bus p canon_path", ++ udev->qdev.parent_bus->parent->canonical_path); ++ return; ++} ++ ++static void ub_dev_get_ubc_info(Monitor *mon, UBDevice *udev) ++{ ++ BusControllerState *ubcs = container_of_ubbus(UB_BUS(udev->qdev.parent_bus)); ++ VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); ++ ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cluster_mode", vms->ub_cluster_mode); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "fm_deployment", vms->fm_deployment); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "mmio_size", ubcs->mmio_size); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "mig_enabled", ubcs->mig_enabled); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "msgq_reg_size", ubcs->msgq_reg_size); ++ monitor_printf(mon, "│%-24s│0x%-43lx│\n", "msgq_reg", (uint64_t)ubcs->msgq_reg); ++ monitor_printf(mon, "│%-24s│%-45s│\n", "MR msgq_reg_mem name", ubcs->msgq_reg_mem.name); ++ monitor_printf(mon, "│%-24s│%-45s│\n", "MR io_mmio name", ubcs->io_mmio.name); ++ monitor_printf(mon, "│%-24s│gpa 0x%-10lx hva 0x%-22lx│\n", ++ "hi_msgq_info sq addr", ubcs->msgq.sq_base_addr_gpa, ++ ubcs->msgq.sq_base_addr_hva); ++ monitor_printf(mon, "│%-24s│gpa 0x%-10lx hva 0x%-22lx│\n", ++ "hi_msgq_info cq addr", ubcs->msgq.cq_base_addr_gpa, ++ ubcs->msgq.cq_base_addr_hva); ++ monitor_printf(mon, "│%-24s│gpa 0x%-10lx hva 0x%-22lx│\n", ++ "hi_msgq_info rq addr", ubcs->msgq.rq_base_addr_gpa, ++ ubcs->msgq.rq_base_addr_hva); ++ return; ++} ++ ++static void ub_dev_get_ummu_info(Monitor *mon, UBDevice *udev) ++{ ++ unsigned int bus_num; ++ UMMUState *ummu = NULL; ++ UMMUDevice *ummu_dev = NULL; ++ UMMUKVTblEntry *entry = NULL; ++ UMMUTransCfg *cfg = NULL; ++ int i; ++ ++ if (1 == sscanf(udev->qdev.parent_bus->name, "ubus.%u", &bus_num)) { ++ ummu = ummu_find_by_bus_num(bus_num); ++ } ++ ++ if (!ummu) { ++ return; ++ } ++ monitor_printf(mon, "│%-24s│%-45s│\n", "ummu id ", ummu->dev.parent_obj.id); ++ monitor_printf(mon, "│%-24s│0x%-43lx│\n", " ummu_reg_size ", ummu->ummu_reg_size); ++ for (i = 0; i < ARRAY_SIZE(ummu->mcmdqs); i++) { ++ monitor_printf(mon, "│%-21s%-3u│gpa 0x%-39lx│\n", ++ " que_info cmdq base", i, ummu->mcmdqs[i].queue.base); ++ } ++ monitor_printf(mon, "│%-22s%2u│gpa 0x%-39lx│\n", ++ " que_info eventq base", i, ummu->eventq.queue.base); ++ monitor_printf(mon, "│%-24s│%-8x %-8x %-8x %-8x %-8x │\n", ++ "ummu CAP[0-4]", ummu->cap[0], ummu->cap[1], ++ ummu->cap[2], ummu->cap[3], ummu->cap[4]); ++ monitor_printf(mon, "│%-24s│%-8x %-18x %-17x│\n", ++ "ummu CAP[5-6] ctrl0_ack", ++ ummu->cap[5], ummu->cap[6], ummu->ctrl0_ack); ++ monitor_printf(mon, "│%-24s│%-8x %-8x %-8x %-18x│\n", ++ "ummu CTRL[0-3]", ummu->ctrl[0], ummu->ctrl[1], ++ ummu->ctrl[2], ummu->ctrl[3]); ++ ++ monitor_printf(mon, "│%-24s│0x%-20lx 0x%-20lx│\n", ++ "tect_base_addr", ummu->tect_base, ++ (uint64_t)TECT_BASE_ADDR(ummu->tect_base)); ++ monitor_printf(mon, "│%-24s│0x%-23x%-20u│\n", "tect_base_cfg tag_num", ++ ummu->tect_base_cfg, ummu->tecte_tag_num); ++ for (i = 0; i < ummu->tecte_tag_num; i++) { ++ monitor_printf(mon, "│%-16s%2u%-6s│0x%-43x│\n", ++ " tecte_tag_cahe[", i, "]", ummu->tecte_tag_cache[i]); ++ } ++ monitor_printf(mon, "│%-24s│%-22d%-23d│\n", "usi_virq[EVETQ,GERROR]", ++ ummu->usi_virq[UMMU_USI_VECTOR_EVETQ], ++ ummu->usi_virq[UMMU_USI_VECTOR_GERROR]); ++ QLIST_FOREACH(entry, &ummu->kvtbl, list) { ++ monitor_printf(mon, "│%-24s│eid 0x%-17xtag %-18u│\n", ++ "kvtbl: dst_eid tecte_tag", entry->dst_eid, entry->tecte_tag); ++ } ++ monitor_printf(mon, "│%-24s│fd %-8downed %-4uusers %-6uref %-8u│\n", ++ "UMMUViommu->iommufd", ummu->viommu->iommufd->fd, ++ ummu->viommu->iommufd ? ummu->viommu->iommufd->owned : 0, ++ ummu->viommu->iommufd ? ummu->viommu->iommufd->users : 0, ++ ummu->viommu->iommufd ? ummu->viommu->iommufd->parent.ref : 0); ++ if (ummu->viommu->core) { ++ monitor_printf(mon, "│%-24s│s2_hwpt_id %-12uviommu_id %-12u│\n", ++ " ->core", ummu->viommu->core->s2_hwpt_id, ++ ummu->viommu->core->viommu_id); ++ } else { ++ monitor_printf(mon, "│%-24s│%-45s│\n", ++ " ->core", "IOMMUFDViommu is NULL, viommu not attach yet"); ++ } ++ if (ummu->viommu->s2_hwpt) { ++ monitor_printf(mon, "│%-24s│iommufd %-7uhwpt_id %-7uioas_id %-7u│\n", ++ " ->s2_hwpt", ummu->viommu->s2_hwpt->iommufd->fd, ++ ummu->viommu->s2_hwpt->hwpt_id, ++ ummu->viommu->s2_hwpt->ioas_id); ++ } else { ++ monitor_printf(mon, "│%-24s│%-45s│\n", ++ " ->s2_hwpt", "s2_hwpt is NULL, not attach viommu yet"); ++ } ++ /* UMMUViommu: UMMUDevice device_list info */ ++ QLIST_FOREACH(ummu_dev, &ummu->viommu->device_list, next) { ++ monitor_printf(mon, "│%-12s%-12s│as:name %-37s│\n", ++ " ->dev_list", ummu_dev->udev->qdev.id, ummu_dev->as.name); ++ monitor_printf(mon, "│%-12s%-12s│idev: devid %-5uioas_id %-6uiommufd %-6u│\n", ++ " ->dev_list", ummu_dev->udev->qdev.id, ummu_dev->idev->devid, ++ ummu_dev->idev->ioas_id, ummu_dev->idev->iommufd->fd); ++ if (ummu_dev->s1_hwpt) { ++ monitor_printf(mon, "│%-12s%-12s│s1_hwpt: hwpt_id %-10uiommufd %-10u│\n", ++ " ->dev_list", ummu_dev->udev->qdev.id, ++ ummu_dev->s1_hwpt->hwpt_id, ++ ummu_dev->s1_hwpt->iommufd->fd); ++ } else { ++ monitor_printf(mon, "│%-12s%-12s│%-45s│\n", ++ " ->dev_list", ummu_dev->udev->qdev.id, ++ "s1_hwpt is NULL, tecte not install yet"); ++ } ++ if (ummu_dev->vdev) { ++ monitor_printf(mon, "│%-12s%-12s│vdev: sid %-7uVdevId %-7uVirtId %-7lu│\n", ++ " ->dev_list", ummu_dev->udev->qdev.id, ummu_dev->vdev->sid, ++ ummu_dev->vdev->core->vdev_id, ummu_dev->vdev->core->virt_id); ++ } else { ++ monitor_printf(mon, "│%-12s%-12s│%-45s│\n", ++ " ->dev_list", ummu_dev->udev->qdev.id, ++ "UMMUVdev is NULL, tecte not install yet"); ++ } ++ ++ cfg = g_hash_table_lookup(ummu->configs, ummu_dev); ++ if (cfg) { ++ monitor_printf(mon, "│+TransCfg %-14s│tct_ptr 0x%-16lxtct_num %-5lufmt %-2lu│\n", ++ ummu_dev->udev->qdev.id, cfg->tct_ptr, cfg->tct_num, cfg->tct_fmt); ++ monitor_printf(mon, "│+TransCfg %-14s│tct_ttba 0x%-16lxtct_sz %-11u│\n", ++ ummu_dev->udev->qdev.id, cfg->tct_ttba, cfg->tct_sz); ++ monitor_printf(mon, "│+TransCfg %-14s│tct_tgs 0x%-16xtecte_tag %-9u│\n", ++ ummu_dev->udev->qdev.id, cfg->tct_tgs, cfg->tecte_tag); ++ } ++ } ++ return; ++} ++ ++static void ub_dev_get_vfio_info(Monitor *mon, UBDevice *udev) ++{ ++ VFIOUBDevice *vdev = VFIO_UB_SAFE(udev); ++ int i; ++ char guid[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ ++ if (!vdev) { ++ return; ++ } ++ monitor_printf(mon, "│%-24s│%-27sfd %-3ddevid 0x%-4x│\n", ++ "VFIOUBDev sysfsdev", vdev->vbasedev.sysfsdev, ++ vdev->vbasedev.fd, vdev->vbasedev.devid); ++ ub_device_get_str_from_guid(&vdev->host.guid, guid, ++ UB_DEV_GUID_STRING_LENGTH + 1); ++ monitor_printf(mon, "│%-24s│%-45s│\n", "VFIOUBDev host", guid); ++ ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ monitor_printf(mon, "│vfioers %-16d│hva %-18pofs 0x%-17lx│\n", ++ i, vdev->ers[i].region.mmaps ? ++ vdev->ers[i].region.mmaps[0].mmap : NULL, ++ vdev->ers[i].region.fd_offset); ++ } ++ if (!vdev->usi || !vdev->usi_vectors) { ++ return; ++ } ++ for (i = 0; i < vdev->usi->vec_table_num; i++) { ++ monitor_printf(mon, "│usi_vectors[%-2d] use=%-4u│virq %-6d " ++ "kvm_int %-1u %-5d interrupt %-1u %-5d│\n", ++ i, vdev->usi_vectors[i].use, vdev->usi_vectors[i].virq, ++ vdev->usi_vectors[i].kvm_interrupt.initialized, ++ vdev->usi_vectors[i].kvm_interrupt.rfd, ++ vdev->usi_vectors[i].interrupt.initialized, ++ vdev->usi_vectors[i].interrupt.rfd); ++ } ++ return; ++} ++ ++int ub_dev_get_detail(Monitor *mon, const char *id) ++{ ++ UBDevice *dev = ub_find_device_by_id(id); ++ char guid[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ /* Column 1 width 24, column 2 width 45 */ ++ g_autofree char *line_c1 = line_generator(24); ++ g_autofree char *line_c2 = line_generator(45); ++ int i; ++ ++ if (!dev) { ++ qemu_log("UB device not found, id %s\n", id); ++ return -1; ++ } ++ if (!line_c1 || !line_c2) { ++ qemu_log("failed to alloc mem %p %p\n", ++ line_c1, line_c2); ++ return -1; ++ } ++ ub_device_get_str_from_guid(&dev->guid, guid, ++ UB_DEV_GUID_STRING_LENGTH + 1); ++ monitor_printf(mon, "┌%s┬%s┐\n", line_c1, line_c2); ++ monitor_printf(mon, "│%-24s│%-45s│\n", "id", dev->qdev.id); ++ monitor_printf(mon, "│%-24s│%-11u%-34s│\n", "dev_type", ++ dev->dev_type, ub_dev_get_type_str(dev->dev_type)); ++ monitor_printf(mon, "│%-24s│%-45p│\n", "config", dev->config); ++ monitor_printf(mon, "│%-24s│0x%-21lx0x%-20lx│\n", "config_size", ++ ub_config_size(), ub_config_size()); ++ monitor_printf(mon, "│%-24s│%-45s│\n", "name", dev->name); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "eid", dev->eid); ++ ub_dev_get_cfg0_info(mon, dev); ++ ub_dev_get_cfg1_info(mon, dev); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "cna", dev->cna); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "ue_idx", dev->ue_idx); ++ monitor_printf(mon, "│%-24s│%-45s│\n", "guid", guid); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "port_num", dev->port.port_num); ++ for (i = 0; i < dev->port.port_num; i++) { ++ if ((dev->port.neighbors + i)->neighbor_dev) { ++ monitor_printf(mon, "│neighbor_info lport %-4u│%-10s rport %-28u│\n", ++ (dev->port.neighbors + i)->local_port_idx, ++ (dev->port.neighbors + i)->neighbor_dev->qdev.id, ++ (dev->port.neighbors + i)->neighbor_port_idx); ++ } ++ } ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ monitor_printf(mon, "│io_regions %-13d│gpa 0x%-18lx size 0x%-13lx│\n", ++ i, dev->io_regions[i].addr, dev->io_regions[i].size); ++ } ++ if (dev->dev_type == UB_TYPE_IDEVICE || dev->dev_type == UB_TYPE_DEVICE) { ++ ub_dev_get_vfio_info(mon, dev); ++ } ++ monitor_printf(mon, "│%-24s│%-45s│\n", "canonical_path", dev->qdev.canonical_path); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "realized", dev->qdev.realized); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "pending_del_evt", dev->qdev.pending_deleted_event); ++ monitor_printf(mon, "│%-24s│%-45lu│\n", "pending_del_expr_ms", dev->qdev.pending_deleted_expires_ms); ++ monitor_printf(mon, "│%-24s│%-45d│\n", "hotplugged", dev->qdev.hotplugged); ++ monitor_printf(mon, "│%-24s│%-45d│\n", "allow_unplug_dur_mig", dev->qdev.allow_unplug_during_migration); ++ monitor_printf(mon, "│%-24s│count %-4uhold_pending %-4uexit_progress %-4u│\n", ++ "ResettableState", dev->qdev.reset.count, ++ dev->qdev.reset.hold_phase_pending, ++ dev->qdev.reset.exit_phase_in_progress); ++ monitor_printf(mon, "│%-24s│%-45u│\n", "reset_count", dev->rst_cnt); ++ ub_dev_get_bus_info(mon, dev); ++ ub_dev_get_usi_info(mon, dev); ++ /* ubc info */ ++ if (UB_TYPE_IBUS_CONTROLLER == dev->dev_type) { ++ ub_dev_get_ummu_info(mon, dev); ++ ub_dev_get_ubc_info(mon, dev); ++ } ++ ++ monitor_printf(mon, "└%s┴%s┘", line_c1, line_c2); ++ return 0; ++} +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index 9a5d4c6c33..9e52486c0b 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -284,4 +284,9 @@ void ub_setup_iommu(UBBus *bus, const UBIOMMUOps *ops, void *opaque); + uint32_t ub_dev_get_token_id(UBDevice *udev); + uint32_t ub_dev_get_ueid(UBDevice *udev); + enum UbDeviceType ub_dev_get_type(UBDevice *udev); ++int ub_dev_dump_config(const char *id, uint64_t offset, uint64_t len, ++ char *buff, int buff_size); ++void ub_dev_dump_ers(const char *id, uint8_t idx, uint64_t offset, uint64_t len, ++ char *buff, int buff_size); ++int ub_dev_get_detail(Monitor *mon, const char *id); + #endif +diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h +index 13f9a2dedb..8696774566 100644 +--- a/include/monitor/hmp.h ++++ b/include/monitor/hmp.h +@@ -181,5 +181,10 @@ void hmp_ioport_write(Monitor *mon, const QDict *qdict); + void hmp_boot_set(Monitor *mon, const QDict *qdict); + void hmp_info_mtree(Monitor *mon, const QDict *qdict); + void hmp_info_cryptodev(Monitor *mon, const QDict *qdict); ++#ifdef CONFIG_UB ++void hmp_info_ub(Monitor *mon, const QDict *qdict); ++void hmp_info_ub_config(Monitor *mon, const QDict *qdict); ++void hmp_info_ub_ers(Monitor *mon, const QDict *qdict); ++#endif // CONFIG_UB + + #endif +diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c +index 5bb3c9cd46..535ea3ca95 100644 +--- a/monitor/hmp-cmds.c ++++ b/monitor/hmp-cmds.c +@@ -29,6 +29,13 @@ + #include "hw/intc/intc.h" + #include "qemu/log.h" + #include "sysemu/sysemu.h" ++#ifdef CONFIG_UB ++#include "qapi/qapi-commands-ub.h" ++#include "hw/ub/ub.h" ++#include "hw/vfio/ub.h" ++#include "hw/ub/ub_common.h" ++#include "qemu/units.h" ++#endif // CONFIG_UB + + bool hmp_handle_error(Monitor *mon, Error *err) + { +@@ -444,3 +451,153 @@ void hmp_info_mtree(Monitor *mon, const QDict *qdict) + + mtree_info(flatview, dispatch_tree, owner, disabled); + } ++ ++#ifdef CONFIG_UB ++static void hmp_info_ub_device(Monitor *mon, const UBDeviceInfo *info) ++{ ++ monitor_printf(mon, "%-12s", info->id); ++ monitor_printf(mon, "%-12s", info->name); ++ monitor_printf(mon, "%-8"PRId64, info->bi); ++ monitor_printf(mon, "%-8"PRId64, info->eid); ++ monitor_printf(mon, "%-8"PRId64, info->cna); ++ monitor_printf(mon, "%-6"PRId64, info->feidx); ++ monitor_printf(mon, "%-38s", info->guid); ++ monitor_printf(mon, "%-5lu", info->type); ++ monitor_printf(mon, "%-6"PRId64, info->ports); ++ monitor_printf(mon, "%-5ld", info->usis); ++ monitor_printf(mon, "\n"); ++} ++ ++/* Column width 100 */ ++#define UB_HMP_INFO_UB_WIDTH 100 ++void hmp_info_ub(Monitor *mon, const QDict *qdict) ++{ ++ UBInfoList *info_list, *info; ++ const char *id = qdict_get_try_str(qdict, "id"); ++ g_autofree char *line = NULL; ++ int cnt = 0; ++ ++ if (!id) { ++ Error *err = NULL; ++ info_list = qmp_query_ub(&err); ++ if (!info_list || err) { ++ monitor_printf(mon, "UB devices not found\n"); ++ error_free(err); ++ return; ++ } ++ ++ line = line_generator(UB_HMP_INFO_UB_WIDTH); ++ if (!line) { ++ return; ++ } ++ monitor_printf(mon, "%-12s%-12s%-8s%-8s%-8s%-6s%-38s%-5s%-6s%-5s\n" ++ "%s\n", "ID", "Name", "BI", "Eid", "CNA", ++ "FeIdx", "Guid", "Type", "Ports", "USIs", line); ++ for (info = info_list; info; info = info->next) { ++ UBDeviceInfoList *dev; ++ ++ for (dev = info->value->devices; dev; dev = dev->next) { ++ hmp_info_ub_device(mon, dev->value); ++ cnt++; ++ } ++ } ++ ++ if (!cnt) { ++ monitor_printf(mon, "no UB devices found\n"); ++ return; ++ } ++ ++ monitor_printf(mon, "\n(Tips: Specifies the optional parameter [id]" ++ " to display more detailed information.)\n"); ++ qapi_free_UBInfoList(info_list); ++ } else { ++ if (ub_dev_get_detail(mon, id) < 0) { ++ monitor_printf(mon, "UB device not found\n"); ++ return; ++ } ++ } ++} ++ ++#define HMP_DUMP_STR_LEN (65 * KiB) ++#define MAX_BYTE_SUPPORT (32 * KiB) ++void hmp_info_ub_config(Monitor *mon, const QDict *qdict) ++{ ++ const char *id = qdict_get_str(qdict, "id"); ++ uint64_t offset = qdict_get_int(qdict, "offset"); ++ uint64_t len = qdict_get_int(qdict, "len"); ++ g_autofree char *buff = g_malloc0(HMP_DUMP_STR_LEN); ++ if (len > MAX_BYTE_SUPPORT) { ++ monitor_printf(mon, "you can keep length not exceed %ld and try again.\n", ++ MAX_BYTE_SUPPORT); ++ return; ++ } ++ ++ if (ub_dev_dump_config(id, offset, len, buff, HMP_DUMP_STR_LEN) < 0) { ++ monitor_printf(mon, "Failed to dump UB device config space information.\n"); ++ return; ++ } ++ ++ monitor_printf(mon, "%s\n", buff); ++} ++ ++void hmp_info_ub_ers(Monitor *mon, const QDict *qdict) ++{ ++ const char *id = qdict_get_str(qdict, "id"); ++ uint8_t idx = qdict_get_int(qdict, "index"); ++ uint64_t offset = qdict_get_int(qdict, "offset"); ++ uint64_t len = qdict_get_int(qdict, "len"); ++ UBDevice *udev; ++ VFIOUBDevice *vdev = NULL; ++ VFIOERS *ers = NULL; ++ VFIORegion *region = NULL; ++ uint64_t size_total = 0; ++ ++ g_autofree char *buff = g_malloc0(HMP_DUMP_STR_LEN); ++ if (len > MAX_BYTE_SUPPORT) { ++ monitor_printf(mon, "you can keep length not exceed %ld and try again.\n", ++ MAX_BYTE_SUPPORT); ++ return; ++ } ++ ++ if (idx > VFIO_UB_REGION2_INDEX) { ++ monitor_printf(mon, "invalid ers index, must <= %d.\n", ++ VFIO_UB_REGION2_INDEX); ++ return; ++ } ++ ++ udev = ub_find_device_by_id(id); ++ if (!udev) { ++ monitor_printf(mon, "%s not found, please check your input.\n", id); ++ return; ++ } ++ if (udev->dev_type != UB_TYPE_DEVICE && udev->dev_type != UB_TYPE_IDEVICE) { ++ monitor_printf(mon, "not support type: %s\n", ++ ub_dev_get_type_str(udev->dev_type)); ++ return; ++ } ++ vdev = VFIO_UB_SAFE(udev); ++ if (vdev) { ++ int i; ++ ers = &vdev->ers[idx]; ++ region = &ers->region; ++ for (i = 0; i < region->nr_mmaps; i++) { ++ size_total += region->mmaps[i].size; ++ } ++ if (size_total != udev->io_regions[idx].size) { ++ monitor_printf(mon, "Some areas in this ers are emulated " ++ "by virtualization.\n"); ++ } ++ } ++ if ((vdev && offset + len > size_total) || ++ (offset + len > udev->io_regions[idx].size)) { ++ monitor_printf(mon, "offset or len invalid, please check your input. " ++ "udev region size 0x%lx, ers region size 0x%lx\n", ++ udev->io_regions[idx].size, size_total); ++ return; ++ } ++ ++ ub_dev_dump_ers(id, idx, offset, len, buff, HMP_DUMP_STR_LEN); ++ ++ monitor_printf(mon, "%s\n", buff); ++} ++#endif // CONFIG_UB +\ No newline at end of file +-- +2.33.0 + diff --git a/ub-code-reinforcement.patch b/ub-code-reinforcement.patch new file mode 100644 index 0000000000000000000000000000000000000000..c00b390c62ddcce0abdde3813e7a314f57f8bb8d --- /dev/null +++ b/ub-code-reinforcement.patch @@ -0,0 +1,124 @@ +From 5817871679ce22c07e1a3586f37dd8bc2ea7348b Mon Sep 17 00:00:00 2001 +From: xiangzixuan +Date: Mon, 10 Nov 2025 17:13:05 +0800 +Subject: [PATCH 1/4] ub: code reinforcement + +code reinforcement + +Signed-off-by: xiangzixuan +--- + hw/arm/virt.c | 6 +++--- + hw/core/qdev-properties-system.c | 2 +- + hw/ub/ub.c | 11 +---------- + hw/ub/ub_config.c | 2 +- + hw/ub/ub_ubc.c | 6 +++--- + 5 files changed, 9 insertions(+), 18 deletions(-) + +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index 2c8c8df07c..e2e603a4a0 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -1766,8 +1766,8 @@ static void create_ub(VirtMachineState *vms) + } + + ubc = qdev_new(TYPE_BUS_CONTROLLER); +- qdev_prop_set_uint32(ubc, "ub-bus-controller-msgq-reg-size", UBC_MSGQ_REG_SIZE); +- qdev_prop_set_uint32(ubc, "ub-bus-controller-fm-msgq-reg-size", FM_MSGQ_REG_SIZE); ++ qdev_prop_set_uint32(ubc, "ub-msgq-reg-size", UBC_MSGQ_REG_SIZE); ++ qdev_prop_set_uint32(ubc, "ub-fm-msgq-reg-size", FM_MSGQ_REG_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ubc), &error_fatal); + + /* in ub_bus_controller_realize will call sysbus_init_mmio init memory_region in order, +@@ -1801,7 +1801,7 @@ static void create_ub(VirtMachineState *vms) + ummu = qdev_new(TYPE_UB_UMMU); + ubc_state = BUS_CONTROLLER(ubc); + object_property_set_link(OBJECT(ummu), "primary-bus", OBJECT(ubc_state->bus), &error_abort); +- /* default set ummu nestd */ ++ /* default set ummu nested */ + object_property_set_bool(OBJECT(ummu), "nested", true, &error_abort); + qdev_prop_set_uint64(ummu, "ub-ummu-reg-size", UMMU_REG_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ummu), &error_fatal); +diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c +index 4643ef1fbd..a6a76c8956 100644 +--- a/hw/core/qdev-properties-system.c ++++ b/hw/core/qdev-properties-system.c +@@ -1435,7 +1435,7 @@ static void ub_dev_set_port_num(Object *obj, Visitor *v, const char *name, + return; + } + if ((port_num <= 0) || (port_num > UB_DEV_MAX_NUM_OF_PORT)) { +- error_setg(&local_err, "illegal port num: %d, set port num bettwen 1 to %u", ++ error_setg(&local_err, "illegal port num: %d, set port num between 1 to %u", + port_num, UB_DEV_MAX_NUM_OF_PORT); + error_propagate(errp, local_err); + } +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 9eb5729f20..66e34ebb34 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -358,21 +358,12 @@ static void ub_init_wmask(UBDevice *ub_dev) + cfg1_int_type2_wmask->interrupt_mask = ~0; + cfg1_int_type2_wmask->interrupt_enable = ~0; + +- /* port basic */ +- // set after port_info is initialized +- +- /* port cap */ +- // not support yet +- + /* route table */ + emulated_offset = ub_cfg_offset_to_emulated_offset(UB_ROUTE_TABLE_START, true); + route_table_wmask = (UbRouteTable *)(ub_dev->wmask + emulated_offset); + memset(route_table_wmask, 0xff, UB_CFG_SLICE_SIZE); + route_table_wmask->entry_num = 0; + route_table_wmask->ers = 0; +- +- /* route table entry */ +- // not support yet + } + + static void ub_init_w1cmask(UBDevice *ub_dev) +@@ -427,7 +418,7 @@ static uint64_t ub_er_address(UBDevice *dev, uint8_t ers, uint64_t size) + UbCfg1Basic *cfg1_basic; + uint64_t emulated_offset; + +- if (ers > UB_NUM_REGIONS) { ++ if (ers >= UB_NUM_REGIONS) { + qemu_log("invalid ers %u\n", ers); + return UB_ER_UNMAPPED; + } +diff --git a/hw/ub/ub_config.c b/hw/ub/ub_config.c +index 48598a0230..971984432f 100644 +--- a/hw/ub/ub_config.c ++++ b/hw/ub/ub_config.c +@@ -125,8 +125,8 @@ uint64_t ub_cfg_offset_to_emulated_offset(uint64_t offset, bool check_success) + if (emulate_offset == UINT64_MAX) { + ub_cfg_display_addr_map_table(); + qemu_log("failed to convert offset 0x%lx to emulated offset\n", offset); ++ assert(emulate_offset != UINT64_MAX); + } +- assert(emulate_offset != UINT64_MAX); + } + + return emulate_offset; +diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c +index 0fc5255c05..ce8201d256 100644 +--- a/hw/ub/ub_ubc.c ++++ b/hw/ub/ub_ubc.c +@@ -177,11 +177,11 @@ static bool ub_bus_controller_needed(void *opaque) + } + + static Property ub_bus_controller_properties[] = { +- DEFINE_PROP_UINT32("ub-bus-controller-msgq-reg-size", BusControllerState, ++ DEFINE_PROP_UINT32("ub-msgq-reg-size", BusControllerState, + msgq_reg_size, 0), +- DEFINE_PROP_UINT32("ub-bus-controller-fm-msgq-reg-size", BusControllerState, ++ DEFINE_PROP_UINT32("ub-fm-msgq-reg-size", BusControllerState, + fm_msgq_reg_size, 0), +- DEFINE_PROP_BOOL("ub-bus-controller-migration-enabled", BusControllerState, ++ DEFINE_PROP_BOOL("ub-migration-enabled", BusControllerState, + mig_enabled, true), + DEFINE_PROP_END_OF_LIST(), + }; +-- +2.33.0 + diff --git a/ub-config-introduce-ub-config-base-framework.patch b/ub-config-introduce-ub-config-base-framework.patch new file mode 100644 index 0000000000000000000000000000000000000000..19f12c54e7630956c5ed437d7c1094e7304ee031 --- /dev/null +++ b/ub-config-introduce-ub-config-base-framework.patch @@ -0,0 +1,1121 @@ +From 0e07810c35d4aa44af20423327e7ac50965f05cd Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 16:33:16 +0800 +Subject: [PATCH 1/5] ub config: introduce ub config base framework +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1、add ub config space realize required struct +2、add some ub config space item define according to ub spce +3、add memmap table for ub config addr convert + +Signed-off-by: caojinhuahw +--- + hw/arm/virt.c | 6 + + hw/ub/meson.build | 1 + + hw/ub/ub_config.c | 133 +++++++++ + include/hw/ub/hisi/ubc.h | 215 ++++++++++++++ + include/hw/ub/ub_common.h | 98 +++++++ + include/hw/ub/ub_config.h | 579 ++++++++++++++++++++++++++++++++++++++ + 6 files changed, 1032 insertions(+) + create mode 100644 hw/ub/ub_config.c + create mode 100644 include/hw/ub/ub_config.h + +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index a27d3b5fc3..470a320bc6 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -98,6 +98,7 @@ + #include "hw/ub/hisi/ub_fm.h" + #include "hw/ub/ub_ummu.h" + #include "hw/ub/ub_common.h" ++#include "hw/ub/ub_config.h" + #endif // CONFIG_UB + + #define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \ +@@ -1757,6 +1758,11 @@ static void create_ub(VirtMachineState *vms) + MemoryRegion *mmio_reg; + MemoryRegion *mmio_alias; + ++ if (ub_cfg_addr_map_table_init() < 0) { ++ qemu_log("failed to init ub cfg addr map table\n"); ++ exit(1); ++ } ++ + ubc = qdev_new(TYPE_BUS_CONTROLLER); + qdev_prop_set_uint32(ubc, "ub-bus-controller-msgq-reg-size", UBC_MSGQ_REG_SIZE); + qdev_prop_set_uint32(ubc, "ub-bus-controller-fm-msgq-reg-size", FM_MSGQ_REG_SIZE); +diff --git a/hw/ub/meson.build b/hw/ub/meson.build +index e1146704e6..ffa135dacf 100644 +--- a/hw/ub/meson.build ++++ b/hw/ub/meson.build +@@ -2,6 +2,7 @@ ub_ss = ss.source_set() + ub_ss.add(files( + 'ub.c', + 'ub_ubc.c', ++ 'ub_config.c', + 'ub_acpi.c', + )) + system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss) +diff --git a/hw/ub/ub_config.c b/hw/ub/ub_config.c +new file mode 100644 +index 0000000000..32ae6b91e4 +--- /dev/null ++++ b/hw/ub/ub_config.c +@@ -0,0 +1,133 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "qemu/module.h" ++#include "hw/arm/virt.h" ++#include "hw/qdev-properties.h" ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_bus.h" ++#include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_config.h" ++#include "qemu/log.h" ++#include "migration/vmstate.h" ++#include "qapi/error.h" ++ ++UbCfgAddrMapEntry *g_ub_cfg_addr_map_table = NULL; ++uint32_t g_emulated_ub_cfg_size; ++ ++uint64_t ub_cfg_slice_start_offset[UB_CFG_EMULATED_SLICES_NUM] = { ++ [CFG0_BASIC] = 0x0, ++ [CAP1_RSV] = 0x100, ++ [CAP2_SHP] = 0x200, ++ [CAP3_ERR_RECORD] = 0x300, ++ [CAP4_ERR_INFO] = 0x400, ++ [CAP5_EMQ] = 0x500, ++ [CFG1_BASIC] = 0x10000, ++ [CAP1_DECODER] = 0x10100, ++ [CAP2_JETTY] = 0x10200, ++ [CAP3_INT_TYPE1] = 0x10300, ++ [CAP4_INT_TYPE2] = 0x10400, ++ [CAP5_RSV] = 0x10500, ++ [CAP6_UB_MEM] = 0x10600, ++ [CFG0_PORT_BASIC] = 0x20000, ++ [CFG0_ROUTE_TABLE] = 0xF0000000, ++}; ++ ++static void ub_cfg_display_addr_map_table(void) ++{ ++ int i; ++ ++ for (i = 0; i < UB_CFG_SLICE_NUMS; i++) { ++ qemu_log("map_table[%d]---start_addr: 0x%lx, mapped_offset: 0x%lx\n", i, ++ g_ub_cfg_addr_map_table[i].start_addr, g_ub_cfg_addr_map_table[i].mapped_offset); ++ } ++} ++ ++int ub_cfg_addr_map_table_init(void) ++{ ++ int i, idx; ++ ++ /* used in all qemu lifecycle, be freed when qemu exit */ ++ g_ub_cfg_addr_map_table = malloc(UB_CFG_SLICE_NUMS * sizeof(UbCfgAddrMapEntry)); ++ if (!g_ub_cfg_addr_map_table) { ++ qemu_log("failed to malloc for g_ub_cfg_addr_map_table\n"); ++ return -1; ++ } ++ ++ /* fill general slice map table */ ++ for (i = 0; i < UB_CFG_GENERAL_SLICES_NUM; i++) { ++ g_ub_cfg_addr_map_table[i].start_addr = ub_cfg_slice_start_offset[i]; ++ g_ub_cfg_addr_map_table[i].start_addr *= UB_CFG_START_OFFSET_GRANU; ++ g_ub_cfg_addr_map_table[i].mapped_offset = i * UB_CFG_SLICE_SIZE; ++ } ++ ++ /* fill port info slice map table */ ++ for (i = 0; i < UB_DEV_MAX_NUM_OF_PORT; i++) { ++ idx = UB_CFG_GENERAL_SLICES_NUM + i; ++ g_ub_cfg_addr_map_table[idx].start_addr = ub_cfg_slice_start_offset[CFG0_PORT_BASIC]; ++ g_ub_cfg_addr_map_table[idx].start_addr *= UB_CFG_START_OFFSET_GRANU; ++ g_ub_cfg_addr_map_table[idx].start_addr += i * UB_PORT_SZ; ++ g_ub_cfg_addr_map_table[idx].mapped_offset = idx * UB_CFG_SLICE_SIZE; ++ } ++ ++ /* fill route table slice map table */ ++ idx = UB_CFG_GENERAL_SLICES_NUM + UB_DEV_MAX_NUM_OF_PORT; ++ g_ub_cfg_addr_map_table[idx].start_addr = ub_cfg_slice_start_offset[CFG0_ROUTE_TABLE]; ++ g_ub_cfg_addr_map_table[idx].start_addr *= UB_CFG_START_OFFSET_GRANU; ++ g_ub_cfg_addr_map_table[idx].mapped_offset = idx * UB_CFG_SLICE_SIZE; ++ ++ g_emulated_ub_cfg_size = UB_CFG_SLICE_NUMS * UB_CFG_SLICE_SIZE; ++ qemu_log("each ub-dev emulated ub cfg size is 0x%x bytes\n", g_emulated_ub_cfg_size); ++ ++ return 0; ++} ++ ++uint32_t ub_emulated_config_size(void) ++{ ++ return g_emulated_ub_cfg_size; ++} ++ ++uint64_t ub_cfg_offset_to_emulated_offset(uint64_t offset, bool check_success) ++{ ++ uint64_t emulate_offset = UINT64_MAX; ++ int i; ++ uint64_t diff; ++ ++ for (i = 0; i < UB_CFG_SLICE_NUMS; i++) { ++ if (offset < g_ub_cfg_addr_map_table[i].start_addr) { ++ break; ++ } ++ ++ diff = offset - g_ub_cfg_addr_map_table[i].start_addr; ++ if (diff >= UB_CFG_SLICE_SIZE) { ++ continue; ++ } ++ ++ emulate_offset = g_ub_cfg_addr_map_table[i].mapped_offset + diff; ++ break; ++ } ++ ++ if (check_success) { ++ if (emulate_offset == UINT64_MAX) { ++ ub_cfg_display_addr_map_table(); ++ qemu_log("failed to convert offset 0x%lx to emulated offset\n", offset); ++ } ++ assert(emulate_offset != UINT64_MAX); ++ } ++ ++ return emulate_offset; ++} +\ No newline at end of file +diff --git a/include/hw/ub/hisi/ubc.h b/include/hw/ub/hisi/ubc.h +index c34693accb..f9201741a9 100644 +--- a/include/hw/ub/hisi/ubc.h ++++ b/include/hw/ub/hisi/ubc.h +@@ -184,4 +184,219 @@ + #define HI_MSGQ_MAX_DEPTH 1024 + #define HI_MSGQ_MIN_DEPTH 4 + ++/* ++ * msgq sq memory layout ++ * +----------------------------+ ++ * | sqe 1 | ++ * |----------------------------| 12Byte ++ * +----| payload addr(offset) | ++ * | +----------------------------+ ++ * | | sqe 2 | ++ * | |----------------------------| 12Byte ++ * +-------| payload addr(offset) | ++ * | | +----------------------------+ ++ * | | | ..... | ++ * | | | | ++ * | | +----------------------------+ ++ * | | | sqe (depth) | ++ * | | |----------------------------| 12Byte ++ * +---------| payload addr(offset) | ++ * | | | +----------------------------+ ++ * | | +--> | payload 1 | 1K ++ * | | +----------------------------+ ++ * | +-----> | payload 2 | 1K ++ * | +----------------------------+ ++ * | | ....... | 1K ++ * | +----------------------------+ ++ * +-------> | payload (depth) | 1K ++ * +----------------------------+ ++ * ++ * SQE layout ++ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ ++ * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0| ++ * +-----------------------------------------------+-----------------------+-----------+-----------+ ++ * | payload length | msg id |submsg code| msg code | ++ * +-----------------------------------------------+-----+--+--+-----------+-----------+-----------+ ++ * | rsvd |e1|e2| vl | rsvd | e1:icrc e2:local ++ * +-----------------------------------------------------+--+--+-----------+-----------------------+ ++ * | payload addr | ++ * +-----------------------------------------------------------------------------------------------+ ++ */ ++typedef struct HiMsgSqe { ++ /* DW0 */ ++ uint32_t task_type : 2; ++ uint32_t rsvd0 : 2; ++ uint32_t local : 1; ++ uint32_t dev_type : 2; ++ uint32_t icrc : 1; ++ union { ++ struct { ++ uint8_t type : 1; ++ uint8_t msg_code : 3; ++ uint8_t sub_msg_code : 4; ++ }; ++ uint8_t opcode; ++ }; ++ uint32_t p_len : 12; ++ uint32_t rsvd1 : 4; ++ ++ /* DW1 */ ++ uint32_t msn : 16; ++ uint32_t rsvd3 : 16; ++ ++ /* DW2 */ ++ uint32_t p_addr; ++ ++ /* DW3 */ ++ uint32_t rsvd2; ++} HiMsgSqe; ++#define HI_MSG_SQE_SIZE sizeof(HiMsgSqe) ++ ++typedef struct HiMsgCqe { ++ /* DW0 */ ++ uint32_t task_type : 2; ++ uint32_t rsvd0 : 6; ++ union { ++ struct { ++ uint8_t type : 1; ++ uint8_t msg_code : 3; ++ uint8_t sub_msg_code : 4; ++ }; ++ uint8_t opcode; ++ }; ++ uint32_t p_len : 12; ++ uint32_t rsvd1 : 4; ++ ++ /* DW1 */ ++ uint32_t msn : 16; ++ uint32_t rsvd5 : 16; ++ ++ /* DW2 */ ++ uint32_t rq_pi : 10; ++ uint32_t rsvd2 : 6; ++ uint32_t status : 8; ++ uint32_t rsvd3 : 8; ++ ++ /* DW3 */ ++ uint32_t rsvd4; ++} HiMsgCqe; ++#define HI_MSG_CQE_SIZE sizeof(HiMsgCqe) ++ ++typedef struct HiMsgSqePld { ++ char packet[HI_MSG_SQE_PLD_SIZE]; ++} HiMsgSqePld; ++ ++typedef struct HiMsgqInfo { ++ uint64_t sq_base_addr_gpa; ++ uint64_t sq_base_addr_hva; ++ uint64_t sq_sz; ++ uint64_t cq_base_addr_gpa; ++ uint64_t cq_base_addr_hva; ++ uint64_t cq_sz; ++ uint64_t rq_base_addr_gpa; ++ uint64_t rq_base_addr_hva; ++ uint64_t rq_sz; ++} HiMsgqInfo; ++ ++typedef enum HiMsgqIdx { ++ MSG_SQ = 0, ++ MSG_RQ = 1, ++ MSG_CQ = 2, ++ MSGQ_NUM ++} HiMsgqIdx_t; ++ ++enum HiCqeStatus { ++ CQE_SUCCESS, ++ CQE_FAIL ++}; ++ ++enum HiCqSwState { ++ CQ_SW_INIT, ++ CQ_SW_HANDLED ++}; ++ ++struct HiMsgQueue { ++ HiMsgqIdx_t idx; ++ ++ union { ++ struct HiMsgSqe *sqe; ++ void *rqe; ++ struct HiMsgCqe *cqe; ++ void *entry; ++ }; ++ ++ uint16_t entry_size; ++ uint8_t depth; ++ uint8_t ci; ++ uint8_t pi; ++ ++ pthread_spinlock_t lock; ++}; ++ ++#define UB_MSG_CODE_ENUM 0x8 /* hisi private */ ++enum HiEnumSubMsgCode { ++ ENUM_QUERY_REQ = 0, ++ ENUM_QUERY_RSP, ++ CNA_CFG_REQ, ++ CNA_CFG_RSP ++}; ++ ++enum UB_MSG_RSP_STATUS_CODE { ++ UB_MSG_RSP_SUCCESS, ++ UB_MSG_RSP_INVALID_MESSAGE, ++ UB_MSG_RSP_UPI_BEYOND_AUTH, ++ UB_MSG_RSP_INVALID_TOKEN, ++ UB_MSG_RSP_REG_ATTR_MISMATCH, ++ UB_MSG_RSP_INVALID_ADDR, ++ UB_MSG_RSP_HW_EXEC_FAILED, ++ UB_MSG_RSP_LACK_OF_EID, ++}; ++ ++enum HiTaskType { ++ PROTOCOL_MSG = 0, ++ PROTOCOL_ENUM = 1, ++ HISI_PRIVATE = 2 ++}; ++ ++typedef enum HiMsgqPrivateOpcode { ++ CC_CTX_CFG_CMD = 0, ++ QUERY_UB_MEM_ROUTE_CMD = 1, ++ EU_TABLE_CFG_CMD = 2, ++ CC_CTX_QUERY_CMD = 3 ++} HiMsgqPrivateOpcode; ++ ++typedef enum HiEuCfgStatus { ++ EU_CFG_FAIL, ++ EU_CFG_SUCCESS ++} HiEuCfgStatus; ++ ++typedef struct HiEuCfgReq { ++ uint32_t eu_msg_code : 4; ++ uint32_t cfg_entry_num : 10; ++ uint32_t tbl_cfg_mode : 1; ++ uint32_t tbl_cfg_status : 1; ++ uint32_t entry_start_id : 16; ++ uint32_t eid : 20; ++ uint32_t rsv0 : 12; ++ uint32_t upi : 16; ++ uint32_t rsv1 : 16; ++} HiEuCfgReq; ++#define HI_EU_CFG_REQ_SIZE 12 ++ ++typedef struct HiEuCfgRsp { ++ uint32_t eu_msg_code : 4; ++ uint32_t cfg_entry_num : 10; ++ uint32_t tbl_cfg_mode : 1; ++ uint32_t tbl_cfg_status : 1; ++ uint32_t entry_start_id : 16; ++} HiEuCfgRsp; ++#define HI_EU_CFG_RSP_SIZE 4 ++ ++typedef struct HiEuCfgPld { ++ union { ++ HiEuCfgReq req; ++ HiEuCfgRsp rsp; ++ }; ++} HiEuCfgPld; ++ + #endif +diff --git a/include/hw/ub/ub_common.h b/include/hw/ub/ub_common.h +index 1336ea3ed3..b8a0287e56 100644 +--- a/include/hw/ub/ub_common.h ++++ b/include/hw/ub/ub_common.h +@@ -314,5 +314,103 @@ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + #define DASH_SZ 3 ++/* The caller is responsible for free memory. */ ++char *line_generator(uint8_t len); ++enum UbMsgType { ++ MSG_REQ = 0, ++ MSG_RSP = 1 ++}; ++ ++enum UbMsgCode { ++ UB_MSG_CODE_RAS = 0, ++ UB_MSG_CODE_LINK = 1, ++ UB_MSG_CODE_CFG = 2, ++ UB_MSG_CODE_VDM = 3, ++ UB_MSG_CODE_EXCH = 4, ++ UB_MSG_CODE_SEC = 5, ++ UB_MSG_CODE_POOL = 6, ++ UB_MSG_CODE_MAX = 7 ++}; ++ ++struct UbLinkHeader { ++ uint32_t plen : 14; ++ uint32_t rm : 2; ++ uint32_t cfg : 4; ++ uint32_t rsvd1 : 1; ++ uint32_t vl : 4; ++ uint32_t rsvd0 : 1; ++ uint32_t crd_vl : 4; ++ uint32_t ack : 1; ++ uint32_t crd : 1; ++}; ++#define UB_CLAN_LINK_CFG 6 ++ ++struct ClanNetworkHeader { ++ /* DW0 */ ++ uint32_t dcna : 16; ++ uint32_t scna : 16; ++ /* DW1 */ ++#define NTH_NLP_WITH_TPH 0 ++#define NTH_NLP_WITHOUT_TPH 1 ++ uint32_t nth_nlp : 3; ++ uint32_t mgmt : 1; ++ uint32_t sl : 4; ++ uint32_t lb : 8; ++ uint32_t cc : 16; ++}; ++ ++typedef struct MsgExtendedHeader { ++ uint32_t plen : 12; ++ uint32_t rsvd : 4; ++ uint32_t rsp_status : 8; ++ union { ++ struct { ++ uint8_t type : 1; ++ uint8_t msg_code : 3; ++ uint8_t sub_msg_code : 4; ++ }; ++ uint8_t code; ++ }; ++} MsgExtendedHeader; ++ ++typedef struct MsgPktHeader { /* TODO, check byte order */ ++ /* DW0 */ ++ struct UbLinkHeader ulh; ++ /* DW1-DW2 */ ++ struct ClanNetworkHeader nth; ++ /* DW3 */ ++ uint32_t seid_h : 8; ++ uint32_t upi : 16; ++#define CTPH_NLP_UPI_40BITS_UEID 2 ++ uint32_t ctph_nlp : 4; /* tp header */ ++ uint32_t pad : 2; ++#define CTPH_OPCODE_NOT_CNP 0 ++ uint32_t tp_opcode : 2; ++ /* DW4 */ ++ uint32_t deid : 20; ++ uint32_t seid_l : 12; ++ /* DW5 */ ++ uint32_t src_tassn : 16; ++ uint32_t taver : 3; ++ uint32_t tk_vld : 1; ++ uint32_t udf : 4; ++#define TAH_OPCODE_MSG 0x14 ++ uint32_t ta_opcode : 8; ++ /* DW6 */ ++ uint32_t sjetty : 20; ++ uint32_t sjt_type : 2; ++ uint32_t rsv0 : 3; ++ uint32_t retry : 1; ++ uint32_t se : 1; ++ uint32_t jetty_en : 1; ++ uint32_t rsv1 : 1; ++ uint32_t odr : 3; ++ /* DW7 */ ++ struct MsgExtendedHeader msgetah; ++ ++ /* DW8~DW11 */ ++ char payload[0]; /* payload */ ++} MsgPktHeader; ++#define MSG_PKT_HEADER_SIZE 32 + + #endif +diff --git a/include/hw/ub/ub_config.h b/include/hw/ub/ub_config.h +new file mode 100644 +index 0000000000..05b2c19c57 +--- /dev/null ++++ b/include/hw/ub/ub_config.h +@@ -0,0 +1,579 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_CONFIG_H ++#define UB_CONFIG_H ++ ++#include "hw/ub/hisi/ubc.h" ++#include "hw/ub/ub_common.h" ++#include "hw/ub/ub.h" ++#include "qemu/units.h" ++ ++enum UbCfgEmulatedSlice { ++ CFG0_BASIC = 0, ++ /* CFG0_CAP START */ ++ CAP1_RSV, ++ CAP2_SHP, ++ CAP3_ERR_RECORD, ++ CAP4_ERR_INFO, ++ CAP5_EMQ, ++ /* CFG0_CAP END */ ++ CFG1_BASIC, ++ /* CFG1_CAP START */ ++ CAP1_DECODER, ++ CAP2_JETTY, ++ CAP3_INT_TYPE1, ++ CAP4_INT_TYPE2, ++ CAP5_RSV, ++ CAP6_UB_MEM, ++ /* CFG1_CAP END */ ++ UB_CFG_GENERAL_SLICES_NUM, ++ ++ /* dont add new here */ ++ CFG0_PORT_BASIC, ++ CFG0_ROUTE_TABLE, ++ UB_CFG_EMULATED_SLICES_NUM, ++ /* dont add new here */ ++}; ++ ++/* In UB spec, route table slice is 1GB, in virtualization, ++ * route table is not used. To redece mem overhead, the route ++ * table also emulated with 1k slice, so add 1 extra */ ++#define UB_CFG_SLICE_NUMS (UB_CFG_GENERAL_SLICES_NUM + UB_DEV_MAX_NUM_OF_PORT + 1) ++#define UB_CFG_START_OFFSET_GRANU 4 ++#define UB_CFG_SLICE_SIZE (1 * KiB) ++ ++typedef struct UbCfgAddrMapEntry { ++ uint64_t start_addr; ++ uint64_t mapped_offset; ++} UbCfgAddrMapEntry; ++ ++int ub_cfg_addr_map_table_init(void); ++ ++enum UbCfgSubMsgCode { ++ UB_CFG0_READ = 0, ++ UB_CFG0_WRITE = 1, ++ UB_CFG1_READ = 2, ++ UB_CFG1_WRITE = 3, ++ UB_CFG_MAX_SUB_MSG_CODE, ++}; ++ ++typedef struct CfgMsgPldReq { ++ /* DW0 */ ++ uint32_t rsvd0 : 4; ++ uint32_t byte_enable : 4; ++ uint32_t rsvd1 : 8; ++ uint32_t entity_idx : 16; ++ ++ /* DW1 */ ++ uint32_t req_addr; ++ ++ /* DW2 */ ++ uint32_t rsvd2; ++ /* DW3 */ ++ uint32_t write_data; ++} CfgMsgPldReq; ++ ++typedef struct CfgMsgPldRsp { ++ /* DW0 */ ++ uint32_t read_data; ++ /* DW1 */ ++ uint32_t rsvd1; ++ /* DW2 */ ++ uint32_t rsvd2; ++ /* DW3 */ ++ uint32_t rsvd3; ++} CfgMsgPldRsp; ++ ++typedef struct CfgMsgPld { ++ union { ++ CfgMsgPldReq req; ++ CfgMsgPldRsp rsp; ++ }; ++} CfgMsgPld; ++#define CFG_MSG_PLD_SIZE 16 ++#define MSG_CFG_PKT_SIZE (MSG_PKT_HEADER_SIZE + CFG_MSG_PLD_SIZE) /* header 32bytes, pld 16bytes */ ++ ++void handle_msg_cfg(void *opaque, HiMsgSqe *sqe, void *payload); ++enum UbCfgBlockType { ++ UB_CFG0_BASIC_BLOCK_TYPE = 0, ++ UB_CFG_ROUTING_BLOCK_TYPE = 1, ++ UB_CFG_CAP_BLOCK_TYPE = 2, ++ UB_CFG_PORT_BLOCK_TYPE = 3, ++ UB_CFG_VD_BLOCK_TYPE = 4, ++ UB_CFG1_BASIC_BLOCK_TYPE = 5, ++ UB_CFG_BLOCK_NUMS ++}; ++ ++typedef struct CfgMsgPkt { ++ MsgPktHeader header; ++ CfgMsgPld pld; ++} CfgMsgPkt; ++ ++typedef struct __attribute__ ((__packed__)) ConfigNetAddrInfo { ++ uint32_t primary_cna : 24; /* 0x1A */ ++ uint32_t rsv : 8; ++ uint32_t rsv1; /* 0x1B */ ++ uint32_t rsv2; /* 0x1C */ ++ uint32_t rsv3; /* 0x1D */ ++ uint32_t rsv4; /* 0x1E */ ++} ConfigNetAddrInfo; ++ ++typedef struct SliceHeader { ++ uint32_t slice_version : 4; ++ uint32_t slice_used_size : 28; ++} SliceHeader; ++ ++typedef struct __attribute__ ((__packed__)) Cfg0SupportFeature { ++ union { ++ uint32_t rsv[4]; ++ struct { ++ uint8_t entity_available : 1; ++ uint8_t mtu_supported : 3; ++ uint8_t route_table_supported : 1; ++ uint8_t upi_supported : 1; ++ uint8_t broker_supported : 1; ++ uint8_t switch_supported : 1; ++ uint8_t rsv : 1; ++ uint8_t cc_supported : 1; ++ } bits; ++ }; ++} Cfg0SupportFeature; ++ ++typedef struct __attribute__ ((__packed__)) UbEid { ++ uint32_t dw0; ++ uint32_t dw1; ++ uint32_t dw2; ++ uint32_t dw3; ++} UbEid; ++ ++#define CAP_BITMAP_LEN 32 ++#define RSV_LEN 4 ++typedef struct __attribute__ ((__packed__)) UbCfg0Basic { ++ /* dw0 */ ++ SliceHeader header; // RO ++ /* dw1 */ ++ uint16_t total_num_of_port; // RO ++ uint16_t total_num_of_ue; // RO ++ /* dw2~dw9 */ ++ uint8_t cap_bitmap[CAP_BITMAP_LEN]; // RO ++ /* dw10~dw14 */ ++ Cfg0SupportFeature support_feature; // RO ++ /* dw14~dw17 */ ++ UbGuid guid; // RO ++ /* dw18~dw21 */ ++ UbEid eid; // RW ++ /* dw22~dw25 */ ++ UbEid fm_eid; ++ /* dw26~dw30 */ ++ ConfigNetAddrInfo net_addr_info; // RW ++ /* dw31~dw44 */ ++ uint32_t upi : 16; // RW ++ uint32_t rsv1 : 16; ++ uint32_t module_id : 16; // HwInit ++ uint32_t vendor_id : 16; ++ uint32_t dev_rst : 1; // RW ++ uint32_t rsv3 : 31; ++ uint32_t rsv4; ++ uint32_t mtu_cfg : 3; // RW ++ uint32_t rsv5 : 29; ++ uint32_t cc_en : 1; // RW ++ uint32_t rsv6 : 31; ++ uint32_t th_en : 1; // RW ++ uint32_t rsv7 : 31; ++ uint32_t fm_cna : 24; // RW ++ uint32_t rsv8 : 8; ++ uint64_t ueid_low; // RW ++ uint64_t ueid_high; // RW ++ uint32_t ucna : 24; // RW ++ uint32_t rsv9 : 8; ++ uint32_t rsv10; ++} UbCfg0Basic; ++ ++typedef struct __attribute__ ((__packed__)) UbSlotInfo { ++ /* dw2 */ ++ uint8_t pps : 1; ++ uint8_t wlps : 1; ++ uint8_t plps : 1; ++ uint8_t pdss : 1; ++ uint8_t pwcs : 1; ++ uint32_t rsv1 : 27; ++ /* dw3 */ ++ uint16_t start_port_idx; ++ uint16_t end_port_idx; ++ /* dw4~dw10 */ ++ uint8_t pp_ctrl : 1; ++ uint32_t rsv2 : 31; ++ uint8_t wl_ctrl : 2; ++ uint32_t rsv3 : 30; ++ uint8_t pl_ctrl : 2; ++ uint32_t rsv4 : 30; ++ uint8_t ms_ctrl : 1; ++ uint32_t rsv5 : 31; ++ uint8_t pd_ctrl : 1; ++ uint32_t rsv6 : 31; ++ uint8_t pds_ctrl : 1; ++ uint32_t rsv7 : 31; ++ uint8_t pw_ctrl : 1; ++ uint32_t rsv8 : 31; ++ /* dw11~dw13 */ ++ uint8_t pp_st : 1; ++ uint32_t rsv9 : 31; ++ uint8_t pd_st : 1; ++ uint32_t rsv10 : 31; ++ uint8_t pdsc_st : 1; ++ uint32_t rsv11 : 31; ++ /* dw14~dw17 */ ++ uint32_t rsv[2]; ++} UbSlotInfo; ++ ++typedef struct __attribute__ ((__packed__)) UbCfg0ShpCap { ++ /* dw0 */ ++ SliceHeader header; // RO ++ /* dw1 */ ++ uint16_t slot_num; // RO ++ uint16_t rsv1; ++ /* dw2 ~ */ ++ UbSlotInfo slot_info[0]; // RO ++} UbCfg0ShpCap; ++ ++typedef struct __attribute__ ((__packed__)) ErrorMsgQueCtrl { ++ uint64_t correctable_err_report_enable : 1; ++ uint64_t uncorrectable_nonfatal_err_report_enable : 1; ++ uint64_t uncorrectable_fatal_err_report_enable : 1; ++ uint64_t rsv_1 : 5; ++ uint64_t interrupt_generation_enable : 1 ; ++ uint64_t rsv_2 : 55; ++} ErrorMsgQueCtrl; ++ ++typedef struct __attribute__ ((__packed__)) UbCfg0EmqCap { ++ /* dw0 */ ++ uint64_t segment_header; ++ ErrorMsgQueCtrl error_msg_que_ctrlr; ++} UbCfg0EmqCap; ++ ++typedef struct __attribute__ ((__packed__)) Cfg1SupportFeature { ++ union { ++ uint32_t rsv[4]; ++ struct { ++ uint8_t rsv1 : 2; ++ uint8_t mgs : 1; ++ uint8_t rsv2 : 2; ++ uint8_t ubbas : 1; ++ uint8_t ers0s : 1; ++ uint8_t ers1s : 1; ++ uint8_t ers2s : 1; ++ uint8_t cdmas : 1; ++ uint8_t matt_juris : 1; ++ } bits; ++ }; ++} Cfg1SupportFeature; ++ ++typedef struct __attribute__ ((__packed__)) UbCfg1DecoderCap { ++ /* dw0 */ ++ SliceHeader header; ++#define DECODER_CAP_EVENT_SIZE 5 ++#define DECODER_CAP_CMD_SIZE 5 ++#define DECODER_CAP_MMIO_SIZE 7 ++ /* dw1 */ ++ struct { ++ uint16_t rsv1 : 4; ++ uint16_t event_size_sup : 4; ++ uint16_t rsv2 : 4; ++ uint16_t cmd_size_sup : 4; ++ uint16_t mmio_size_sup : 3; ++ uint16_t rsv3 : 13; ++ } decoder; ++ /* dw2 */ ++ struct { ++ uint32_t decoder_en : 1; ++ uint32_t rsv : 31; ++ } decoder_ctrl; ++ /* dw3-4 */ ++ uint64_t dec_matt_ba; ++ /* dw5-6 */ ++ uint64_t dec_mmio_ba; ++ /* dw7 */ ++ uint32_t dev_usi_idx; ++ /* dw 8-0xf */ ++#define DECODER_CAP_RESERVED1_BYTES 8 ++ uint32_t rsv1[DECODER_CAP_RESERVED1_BYTES]; ++ /* dw 0x10 */ ++ struct { ++ uint32_t cmdq_en : 1; ++ uint32_t rsv1 : 7; ++ uint32_t cmdq_size_use : 4; ++ uint32_t rsv2 : 20; ++ } decoder_cmdq_cfg; ++ /* dw 0x11 */ ++ struct { ++ uint32_t cmdq_wr_idx : 11; ++ uint32_t rsv1 : 5; ++ uint32_t cmdq_err_resp : 1; ++ uint32_t rsv2 : 15; ++ } decoder_cmdq_prod; ++ /* dw 0x12 */ ++ struct { ++ uint32_t cmdq_rd_idx : 11; ++ uint32_t rsv1 : 5; ++ uint32_t cmdq_err : 1; ++ uint32_t cmdq_err_res : 3; ++ uint32_t rsv2 : 12; ++ } decoder_cmdq_cons; ++ /* dw 0x13-0x14 */ ++ struct { ++ uint64_t rsv1 : 6; ++ uint64_t cmdq_ba : 42; ++ uint64_t rsv2 : 16; ++ } decoder_cmdq_ba; ++ /* dw 0x15-0x1f */ ++#define DECODER_CAP_RESERVED2_BYTES 11 ++ uint32_t rsv2[DECODER_CAP_RESERVED2_BYTES]; ++ /* dw 0x20 */ ++ struct { ++ uint32_t evtq_en : 1; ++ uint32_t rsv1 : 7; ++ uint32_t evtq_size_use : 4; ++ uint32_t rsv2 : 20; ++ } decoder_evtq_cfg; ++ /* dw 0x21 */ ++ struct { ++ uint32_t evtq_wr_idx : 11; ++ uint32_t rsv : 20; ++ uint32_t evtq_ovrl_err : 1; ++ } decoder_evtq_prod; ++ /* dw 0x22 */ ++ struct { ++ uint32_t evtq_rd_idx : 11; ++ uint32_t rsv : 20; ++ uint32_t evtq_ovrl_err_resp : 1; ++ } decoder_evtq_cons; ++ /* dw 0x23 */ ++ struct { ++ uint64_t rsv1 : 6; ++ uint64_t evtq_ba : 42; ++ uint64_t rsv2 : 16; ++ } decoder_evtq_ba; ++} UbCfg1DecoderCap; ++ ++typedef struct __attribute__ ((__packed__)) UbCfg1IntType1Cap { ++ /* dw0 */ ++ SliceHeader header; ++ /* dw1 */ ++ uint32_t interrupt_enable : 1; ++ uint32_t rsv1 : 31; ++ /* dw2 */ ++ uint32_t support_int_num : 3; ++ uint32_t rsv2 : 29; ++ /* dw3 */ ++ uint32_t interrupt_enable_num : 3; ++ uint32_t rsv3 : 29; ++ /* dw4 */ ++ uint32_t interrupt_data; ++ /* dw5-dw6 */ ++ uint64_t interrupt_address; ++ /* dw7 */ ++ uint32_t interrupt_id; ++ /* dw8 */ ++ uint32_t interrupt_mask; ++ /* dw9 */ ++ uint32_t interrupt_pending; ++} UbCfg1IntType1Cap; ++ ++typedef struct __attribute__ ((__packed__)) UbCfg1IntType2Cap { ++ /* dw0 */ ++ SliceHeader header; ++ /* dw1 */ ++ uint16_t vec_table_num; ++ uint16_t add_table_num; ++ /* dw2 ~ dw8 */ ++ uint64_t vec_table_start_addr; ++ uint64_t add_table_start_addr; ++ uint64_t pend_table_start_addr; ++ uint32_t interrupt_id; ++ uint32_t interrupt_mask : 1; ++ uint32_t rsv1 : 31; ++ uint32_t interrupt_enable : 1; ++ uint32_t rsv2 : 31; ++} UbCfg1IntType2Cap; ++ ++typedef struct __attribute__ ((__packed__)) UbCfg1Basic { ++ /* dw0 */ ++ SliceHeader header; // RO ++ /* dw1~dw8 */ ++ uint8_t cap_bitmap[CAP_BITMAP_LEN]; // RO ++ /* dw9~dw12 */ ++ Cfg1SupportFeature support_feature; // RO ++ /* dw13~dw42 */ ++ uint32_t ers_space_size[UB_NUM_REGIONS]; ++ uint64_t ers_start_addr[UB_NUM_REGIONS]; ++ uint64_t ers_ubba[UB_NUM_REGIONS]; ++ uint32_t elr : 1; ++ uint32_t rsv1 : 31; ++ uint32_t elr_done : 1; ++ uint32_t rsv2 : 31; ++ uint32_t mig_ctrl : 8; ++ uint32_t rsv3 : 24; ++ uint32_t mig_status : 8; ++ uint32_t rsv4 : 24; ++ uint32_t ers_att : 3; ++ uint32_t rsv5 : 29; ++ uint32_t sys_pgs : 1; ++ uint32_t rsv6 : 31; ++ uint64_t eid_upi_tab; ++ uint32_t eid_upi_ten; ++ uint64_t rsv7; ++ uint64_t rsv8; ++ uint32_t class_code : 16; ++ uint32_t rsv9 : 16; ++ uint32_t tpid_num : 16; ++ uint32_t rsv10 : 16; ++ uint32_t ctp_tb_bypass : 1; ++ uint32_t rsv11 : 31; ++ uint32_t crystal_dma_en : 1; ++ uint32_t rsv12 : 31; ++ uint32_t dev_token_id : 20; ++ uint32_t rsv13 : 12; ++ uint32_t bus_access_en : 1; ++ uint32_t rsv14 : 31; ++ uint32_t dev_rs_access_en : 1; ++ uint32_t rsv15 : 31; ++} UbCfg1Basic; ++ ++typedef struct __attribute__ ((__packed__)) ConfigPortInfo { ++ uint16_t port_idx : 16; ++ uint8_t port_type : 1; ++ uint8_t enum_boundary : 1; ++ uint16_t rsv : 14; ++} ConfigPortInfo; ++ ++typedef struct __attribute__ ((__packed__)) ConfigNeighborPortInfo { ++ uint16_t neighbor_port_idx : 16; ++ uint16_t rsv : 16; ++ UbGuid neighbot_port_guid; ++} ConfigNeighborPortInfo; ++ ++#define PORT_CAP_BITMAP_LEN 32 ++typedef struct __attribute__ ((__packed__)) ConfigPortBasic { ++ SliceHeader header; ++ uint8_t port_cap_bitmap[PORT_CAP_BITMAP_LEN]; ++ ConfigPortInfo port_info; ++ ConfigNeighborPortInfo neighbor_port_info; ++ uint32_t port_cna : 24; ++ uint32_t rsv1 : 8; ++ uint8_t port_reset : 1; ++ uint32_t rsv2 : 31; ++} ConfigPortBasic; ++ ++typedef struct __attribute__ ((__packed__)) UbRouteTable { ++ SliceHeader header; ++ uint32_t entry_num : 16; ++ uint32_t ers : 1; ++ uint32_t rsv1 : 15; ++ uint32_t er_en : 1; ++ uint32_t rsv2 : 31; ++ uint32_t entry[0]; ++} UbRouteTable; ++ ++#define SUPPORTED 1 ++#define NOT_SUPPORTED 0 ++ ++#define UBFM 1 ++#define UB_DRIVE 0 ++ ++/* slice header default value, unit (4 bytes) */ ++#define UB_SLICE_VERSION 0x0 ++#define UB_CFG0_BASIC_SLICE_USED_SIZE 0x24 ++#define UB_CFG1_BASIC_SLICE_USED_SIZE 0x20 ++#define UB_PORT_BASIC_SLICE_USED_SIZE 0x11 ++ ++/* ub dev cap */ ++#define BITS_PER_CAP_BIT_MAP 128 ++#define CFG0_RSV_INDEX 1 ++#define CFG0_CAP2_SHP_INDEX 2 ++#define CFG1_DECODER_CAP_INDEX 1 ++#define CFG1_JETTY_CAP_INDEX 2 ++#define CFG1_INT_CAP_INDEX 3 ++ ++/* ub dev config space CFG0 addr offset, unit (bytes) */ ++#define UB_SLICE_SZ (0x00000100 * DWORD_SIZE) ++#define UB_CFG0_BASIC_START 0x00000000 ++#define UB_CFG0_BASIC_CAP_BITMAP (UB_CFG0_BASIC_START + 0x02 * DWORD_SIZE) ++#define UB_CFG0_BASIC_GUID_START (UB_CFG0_BASIC_START + 0x0E * DWORD_SIZE) ++#define UB_CFG0_BASIC_NA_INFO_START (UB_CFG0_BASIC_START + 0x1A * DWORD_SIZE) ++#define UB_CFG0_DEV_UEID_OFFSET (UB_CFG0_BASIC_START + 0x27 * DWORD_SIZE) ++#define UB_CFG0_CAP1_RSV_START (UB_CFG0_BASIC_START + UB_SLICE_SZ) ++#define UB_CFG0_CAP2_SHP_START (UB_CFG0_CAP1_RSV_START + UB_SLICE_SZ) ++#define UB_CFG0_CAP3_ERR_RECORD_START (UB_CFG0_CAP2_SHP_START + UB_SLICE_SZ) ++#define UB_CFG0_CAP4_ERR_INFO_START (UB_CFG0_CAP3_ERR_RECORD_START + UB_SLICE_SZ) ++#define UB_CFG0_EMQ_CAP_START (UB_CFG0_CAP4_ERR_INFO_START + UB_SLICE_SZ) ++/* ub dev config space CFG1 addr offset, unit (bytes) */ ++#define UB_CFG1_BASIC_START (0x00010000 * DWORD_SIZE) ++#define UB_CFG1_CAP1_DECODER (UB_CFG1_BASIC_START + UB_SLICE_SZ) ++#define UB_CFG1_CAP2_JETTY (UB_CFG1_CAP1_DECODER + UB_SLICE_SZ) ++#define UB_CFG1_CAP3_INT_TYPE1 (UB_CFG1_CAP2_JETTY + UB_SLICE_SZ) ++#define UB_CFG1_CAP4_INT_TYPE2 (UB_CFG1_CAP3_INT_TYPE1 + UB_SLICE_SZ) ++#define UB_CFG1_CAP5_RSV (UB_CFG1_CAP4_INT_TYPE2 + UB_SLICE_SZ) ++#define UB_CFG1_CAP6_UB_MEM (UB_CFG1_CAP5_RSV + UB_SLICE_SZ) ++/* ub dev config space PORT addr offset, unit (bytes) */ ++#define UB_PORT_SLICE_START (0x00020000 * DWORD_SIZE) ++#define UB_PORT_SZ (0x00010000 * DWORD_SIZE) ++/* ub dev config space ROUT TABLE addr offset, unit (bytes) */ ++#define UB_ROUTE_TABLE_START (0xF0000000ULL * DWORD_SIZE) ++#define UB_ROUTE_TABLE_SIZE (0x10000000 * DWORD_SIZE) ++/* ub dev config space CFG1 system page granule size define */ ++#define UB_CFG1_BASIC_SYSTEM_GRANULE_SIZE_4K (4 * 1024) ++#define UB_CFG1_BASIC_SYSTEM_GRANULE_SIZE_64K (64 * 1024) ++/* ub dev config space CFG1 dev_toke id offset 0xB4 */ ++#define UB_CFG1_DEV_TOKEN_ID_OFFSET (UB_CFG1_BASIC_START + 0x2D * DWORD_SIZE) ++#define UB_TOKEN_ID_MASK 0xfffff ++/* ub dev config space CFG1 dev_rs_access_en offset 0xBC */ ++#define UB_CFG1_DEV_RS_ACCESS_EN_OFFSET (UB_CFG1_BASIC_START + 0x2F * DWORD_SIZE) ++#define UB_DEV_RS_ACCESS_EN_MASK 0x1 ++/* ub dev config space CFG1 bus_access_en offset 0xB8 */ ++#define UB_CFG1_BUS_ACCESS_EN_OFFSET (UB_CFG1_BASIC_START + 0x2E * DWORD_SIZE) ++#define UB_BUS_ACCESS_EN_MASK 0x1 ++/* ub dev config space INT TYPE2 CAP addr offset, unit (bytes) */ ++#define UB_CFG1_CAP4_INT_TYPE2_NUMOF_INT_VEC_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 1 * DWORD_SIZE) ++#define UB_CFG1_CAP4_INT_TYPE2_NUMOF_INT_ADDR_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 1 * DWORD_SIZE + WORD_SIZE) ++#define UB_CFG1_CAP4_INT_TYPE2_INT_VEC_TAB_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 2 * DWORD_SIZE) ++#define UB_CFG1_CAP4_INT_TYPE2_INT_ADDR_TAB_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 4 * DWORD_SIZE) ++#define UB_CFG1_CAP4_INT_TYPE2_INT_PENDING_TAB_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 6 * DWORD_SIZE) ++#define UB_CFG1_CAP4_INT_TYPE2_INT_ID_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 8 * DWORD_SIZE) ++#define UB_CFG1_CAP4_INT_TYPE2_INT_MASK_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 9 * DWORD_SIZE) ++/* ub dev usi vec&addr&pend table entrys size uint (bytes) */ ++#define USI_VEC_TABLE_ENTRY_SIZE 0x8 ++#define USI_ADDR_TABLE_ENTRY_SIZE 0x20 ++#define USI_PEND_TABLE_ENTRY_SIZE 0x4 ++#define USI_PEND_TABLE_ENTRY_BIT_NUM 32 ++/* ub dev usi addr table valid bit offset */ ++#define USI_ADDR_TABLE_VALID_BIT_OFFSET 10 ++#define USI_ADDR_TABLE_VALID_BIT_MASK 0x10 ++/* usi config space */ ++#define UB_CFG1_CAP4_INT_TYPE2_MASK_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 0x24) ++#define UB_CFG1_CAP4_INT_TYPE2_MASKBIT 0x1 ++#define UB_CFG1_CAP4_INT_TYPE2_ENABLE_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 0x28) ++#define UB_CFG1_CAP4_INT_TYPE2_ENABLEBIT 0x1 ++/* usi vec table source */ ++#define USI_VEC_TABLE_MASK_OFFSET 0x6 ++#define USI_VEC_TABLE_MASKBIT 0x1 ++#define USI_VEC_TABLE_ADDR_INDEX_OFFSET 0x4 ++ ++uint32_t ub_emulated_config_size(void); ++uint64_t ub_cfg_offset_to_emulated_offset(uint64_t offset, bool check_success); ++ ++#endif +\ No newline at end of file +-- +2.33.0 + diff --git a/ub-do-more-realize-for-ub-device.patch b/ub-do-more-realize-for-ub-device.patch new file mode 100644 index 0000000000000000000000000000000000000000..c8586fbf5e66778ed040819bfc64588c4057bd00 --- /dev/null +++ b/ub-do-more-realize-for-ub-device.patch @@ -0,0 +1,345 @@ +From 4fe54a9d146bfb4cf2085ffac980accdbb8328a4 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 17:18:49 +0800 +Subject: [PATCH 2/5] ub: do more realize for ub device +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1、realize default config space read/write +2、support ub register +3、init default wmask for ub device + +Signed-off-by: caojinhuahw +--- + hw/ub/ub.c | 273 ++++++++++++++++++++++++++++++++++++++++++++- + include/hw/ub/ub.h | 6 + + 2 files changed, 277 insertions(+), 2 deletions(-) + +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 974df5d0f7..b6503c62e2 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -24,6 +24,7 @@ + #include "hw/qdev-properties-system.h" + #include "hw/ub/ub_common.h" + #include "hw/ub/ub.h" ++#include "hw/ub/ub_config.h" + #include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" + #include "qemu/log.h" +@@ -117,13 +118,250 @@ static const TypeInfo ub_bus_info = { + .class_init = ub_bus_class_init, + }; + ++static void ub_config_alloc(UBDevice *ub_dev) ++{ ++ size_t config_size = ub_emulated_config_size(); ++ ub_dev->config = g_malloc0(config_size); ++ ub_dev->wmask = g_malloc0(config_size); ++ ub_dev->w1cmask = g_malloc0(config_size); ++} ++ ++static void ub_port_info_alloc(UBDevice *ub_dev) ++{ ++ ub_dev->port.neighbors = g_malloc0(sizeof(NeighborInfo) * ++ ub_dev->port.port_num); ++ ub_dev->port.port_info_exist = false; ++} ++ ++static void ub_config_free(UBDevice *ub_dev) ++{ ++ g_free(ub_dev->config); ++ g_free(ub_dev->wmask); ++ g_free(ub_dev->w1cmask); ++} ++ ++static void ub_port_info_free(UBDevice *ub_dev) ++{ ++ if (ub_dev->port.neighbors_cmd) { ++ g_free(ub_dev->port.neighbors_cmd); ++ } ++ if (ub_dev->port.neighbors) { ++ g_free(ub_dev->port.neighbors); ++ } ++} ++ ++static void ub_config_set_guid(UBDevice *ub_dev) ++{ ++ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_GUID_START, true); ++ uint8_t *ub_config_guid_ptr = ub_dev->config + offset; ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ ++ ub_device_get_str_from_guid(&ub_dev->guid, guid_str, ++ UB_DEV_GUID_STRING_LENGTH + 1); ++ memcpy(ub_config_guid_ptr, &ub_dev->guid, sizeof(UbGuid)); ++} ++ ++static void ub_init_wmask(UBDevice *ub_dev) ++{ ++ UbCfg0Basic *cfg0_basic_wmask; ++ UbCfg0EmqCap *cfg0_emq_cap_wmask; ++ UbCfg1Basic *cfg1_basic_wmask; ++ UbCfg1IntType1Cap *cfg1_int_type1_wmask; ++ UbCfg1IntType2Cap *cfg1_int_type2_wmask; ++ UbRouteTable *route_table_wmask; ++ uint64_t emulated_offset; ++ ++ /* cfg0 basic */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_START, true); ++ cfg0_basic_wmask = (UbCfg0Basic *)(ub_dev->wmask + emulated_offset); ++ memset(cfg0_basic_wmask, 0, sizeof(UbCfg0Basic)); ++ memset(&cfg0_basic_wmask->eid, 0xff, sizeof(UbEid)); ++ memset(&cfg0_basic_wmask->fm_eid, 0xff, sizeof(UbEid)); ++ cfg0_basic_wmask->net_addr_info.primary_cna = 0xffffff; ++ cfg0_basic_wmask->upi = ~0; ++ cfg0_basic_wmask->dev_rst = ~0; ++ cfg0_basic_wmask->mtu_cfg = ~0; ++ cfg0_basic_wmask->cc_en = ~0; ++ cfg0_basic_wmask->th_en = ~0; ++ cfg0_basic_wmask->fm_cna = ~0; ++ cfg0_basic_wmask->ueid_low = ~0UL; ++ cfg0_basic_wmask->ueid_high = ~0UL; ++ cfg0_basic_wmask->ucna = ~0; ++ ++ /* cfg0 emq cap */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_EMQ_CAP_START, true); ++ cfg0_emq_cap_wmask = (UbCfg0EmqCap *)(ub_dev->wmask + emulated_offset); ++ memset(cfg0_emq_cap_wmask, 0, sizeof(UbCfg0EmqCap)); ++ cfg0_emq_cap_wmask->error_msg_que_ctrlr.correctable_err_report_enable = ~0; ++ cfg0_emq_cap_wmask->error_msg_que_ctrlr.uncorrectable_nonfatal_err_report_enable = ~0; ++ cfg0_emq_cap_wmask->error_msg_que_ctrlr.uncorrectable_fatal_err_report_enable = ~0; ++ cfg0_emq_cap_wmask->error_msg_que_ctrlr.interrupt_generation_enable = ~0; ++ ++ /* cfg1 basic */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic_wmask = (UbCfg1Basic *)(ub_dev->wmask + emulated_offset); ++ memset(cfg1_basic_wmask, 0, sizeof(UbCfg1Basic)); ++ cfg1_basic_wmask->elr = ~0; ++ cfg1_basic_wmask->mig_ctrl = ~0; ++ cfg1_basic_wmask->sys_pgs = ~0; ++ cfg1_basic_wmask->eid_upi_tab = ~0UL; ++ cfg1_basic_wmask->ctp_tb_bypass = ~0; ++ cfg1_basic_wmask->crystal_dma_en = ~0; ++ cfg1_basic_wmask->dev_token_id = ~0; ++ cfg1_basic_wmask->bus_access_en = ~0; ++ cfg1_basic_wmask->dev_rs_access_en = ~0; ++ ++ /* cfg1 int type1 cap */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP3_INT_TYPE1, true); ++ cfg1_int_type1_wmask = (UbCfg1IntType1Cap *)(ub_dev->wmask + emulated_offset); ++ memset(cfg1_int_type1_wmask, 0, sizeof(UbCfg1IntType1Cap)); ++ cfg1_int_type1_wmask->interrupt_enable = ~0; ++ cfg1_int_type1_wmask->interrupt_enable_num = ~0; ++ cfg1_int_type1_wmask->interrupt_data = ~0U; ++ cfg1_int_type1_wmask->interrupt_address = ~0UL; ++ cfg1_int_type1_wmask->interrupt_id = ~0U; ++ cfg1_int_type1_wmask->interrupt_mask = ~0U; ++ ++ /* cfg1 int type2 cap */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2, true); ++ cfg1_int_type2_wmask = (UbCfg1IntType2Cap *)(ub_dev->wmask + emulated_offset); ++ memset(cfg1_int_type2_wmask, 0, sizeof(UbCfg1IntType2Cap)); ++ cfg1_int_type2_wmask->interrupt_id = ~0U; ++ cfg1_int_type2_wmask->interrupt_mask = ~0; ++ cfg1_int_type2_wmask->interrupt_enable = ~0; ++ ++ /* port basic */ ++ // set after port_info is initialized ++ ++ /* port cap */ ++ // not support yet ++ ++ /* route table */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_ROUTE_TABLE_START, true); ++ route_table_wmask = (UbRouteTable *)(ub_dev->wmask + emulated_offset); ++ memset(route_table_wmask, 0xff, UB_CFG_SLICE_SIZE); ++ route_table_wmask->entry_num = 0; ++ route_table_wmask->ers = 0; ++ ++ /* route table entry */ ++ // not support yet ++} ++ ++static void ub_init_w1cmask(UBDevice *ub_dev) ++{ ++ UbCfg0Basic *cfg0_basic_w1cmask; ++ UbCfg1Basic *cfg1_basic_w1cmask; ++ uint64_t emulated_offset; ++ ++ /* cfg0 basic */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_START, true); ++ cfg0_basic_w1cmask = (UbCfg0Basic *)(ub_dev->w1cmask + emulated_offset); ++ memset(cfg0_basic_w1cmask, 0, sizeof(UbCfg0Basic)); ++ cfg0_basic_w1cmask->dev_rst = ~0; ++ ++ /* cfg1 basic */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic_w1cmask = (UbCfg1Basic *)(ub_dev->w1cmask + emulated_offset); ++ memset(cfg1_basic_w1cmask, 0, sizeof(UbCfg1Basic)); ++ cfg1_basic_w1cmask->elr = ~0; ++ ++ /* port cap */ ++ // not support yet ++} ++ ++static void ub_config_space_init(UBDevice *ub_dev) ++{ ++ ub_config_set_guid(ub_dev); ++ ub_init_wmask(ub_dev); ++ ub_init_w1cmask(ub_dev); ++} ++ ++void ub_default_read_config(UBDevice *dev, uint64_t offset, ++ uint32_t *val, uint32_t dw_mask) ++{ ++ uint32_t read_data; ++ uint64_t emulated_offset = ub_cfg_offset_to_emulated_offset(offset, false); ++ ++ if (emulated_offset == UINT64_MAX) { ++ *val = 0; ++ qemu_log("ub default read config out of emulated range, offset " ++ "is 0x%lx\n", offset); ++ return; ++ } ++ ++ memcpy(&read_data, dev->config + emulated_offset, DWORD_SIZE); ++ *val = read_data & dw_mask; ++} ++ ++void ub_default_write_config(UBDevice *dev, uint64_t offset, ++ uint32_t *val, uint32_t dw_mask) ++{ ++ uint32_t write_data = *val; ++ uint32_t dw_wmask, dw_w1cmask; ++ uint64_t emulated_offset; ++ uint32_t *dst_data = NULL; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(offset, false); ++ if (emulated_offset == UINT64_MAX) { ++ qemu_log("ub default write config out of emulated range, offset " ++ "is 0x%lx\n", offset); ++ return; ++ } ++ ++ dst_data = (uint32_t *)(dev->config + emulated_offset); ++ dw_wmask = *(uint32_t *)(dev->wmask + emulated_offset) & dw_mask; ++ dw_w1cmask = *(uint32_t *)(dev->w1cmask + emulated_offset) & dw_mask; ++ *dst_data = (*dst_data & ~dw_wmask) | (write_data & dw_wmask); ++ *dst_data &= ~(write_data & dw_w1cmask); ++} ++ + static UBDevice *do_ub_register_device(UBDevice *ub_dev, const char *name, Error **errp) + { +- return NULL; ++ UBBus *bus = ub_get_bus(ub_dev); ++ UBDeviceClass *uc = UB_DEVICE_GET_CLASS(ub_dev); ++ UBConfigReadFunc *config_read = uc->config_read; ++ UBConfigWriteFunc *config_write = uc->config_write; ++ ++ if (ub_dev->eid < UB_SUPPORT_MIN_EID || ub_dev->eid > UB_SUPPORT_MAX_EID) { ++ qemu_log("expect eid val is [0x%x, 0x%x], but current eid val is 0x%x\n", ++ UB_SUPPORT_MIN_EID, UB_SUPPORT_MAX_EID, ub_dev->eid); ++ error_setg(errp, "expect eid val is [0x%x, 0x%x], but current eid val is 0x%x\n", ++ UB_SUPPORT_MIN_EID, UB_SUPPORT_MAX_EID, ub_dev->eid); ++ return NULL; ++ } ++ if (ub_find_device_by_guid(&ub_dev->guid)) { ++ qemu_log("%s guid already exists.\n", ub_dev->qdev.id); ++ error_setg(errp, "%s guid already exists.\n", ub_dev->qdev.id); ++ return NULL; ++ } ++ if (ub_find_device_by_eid(bus, ub_dev->eid)) { ++ qemu_log("%s eid already exists.\n", ub_dev->qdev.id); ++ error_setg(errp, "%s eid already exists.\n", ub_dev->qdev.id); ++ return NULL; ++ } ++ pstrcpy(ub_dev->name, sizeof(ub_dev->name), name); ++ QLIST_INSERT_HEAD(&bus->devices, ub_dev, node); ++ ++ /* allocate memory for ub device config space */ ++ ub_config_alloc(ub_dev); ++ ub_config_space_init(ub_dev); ++ /* allocate memory for ub device port info */ ++ ub_port_info_alloc(ub_dev); ++ ++ if (!config_read) ++ config_read = ub_default_read_config; ++ if (!config_write) ++ config_write = ub_default_write_config; ++ ub_dev->config_read = config_read; ++ ub_dev->config_write = config_write; ++ ++ return ub_dev; + } + + static void do_ub_unregister_device(UBDevice *ub_dev) + { ++ ub_config_free(ub_dev); ++ ub_port_info_free(ub_dev); + } + + static void ub_qdev_realize(DeviceState *qdev, Error **errp) +@@ -168,6 +406,19 @@ static Property ub_props[] = { + DEFINE_PROP_END_OF_LIST() + }; + ++UBDevice *ub_find_device_by_eid(UBBus *bus, uint32_t eid) ++{ ++ UBDevice *dev; ++ ++ QLIST_FOREACH(dev, &bus->devices, node) { ++ if (dev->eid == eid) { ++ return dev; ++ } ++ } ++ ++ return NULL; ++} ++ + static void ub_device_class_init(ObjectClass *klass, void *data) + { + DeviceClass *k = DEVICE_CLASS(klass); +@@ -262,4 +513,22 @@ BusControllerState *container_of_ubbus(UBBus *bus) + } + + return NULL; +-} +\ No newline at end of file ++} ++ ++UBDevice *ub_find_device_by_guid(UbGuid *guid) ++{ ++ BusControllerState *ubc = NULL; ++ UBDevice *dev = NULL; ++ ++ QLIST_FOREACH(ubc, &ub_bus_controllers, node) { ++ if (!ubc->bus->qbus.num_children) { ++ continue; ++ } ++ QLIST_FOREACH(dev, &ubc->bus->devices, node) { ++ if (dev && !memcmp(guid, &dev->guid, sizeof(UbGuid))) { ++ return dev; ++ } ++ } ++ } ++ return NULL; ++} +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index 5cb6b2b207..2f408d874d 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -221,4 +221,10 @@ static inline uint64_t ub_get_quad(const uint8_t *config) + return ldq_le_p(config); + } + ++void ub_default_read_config(UBDevice *dev, uint64_t offset, ++ uint32_t *val, uint32_t dw_mask); ++void ub_default_write_config(UBDevice *dev, uint64_t offset, ++ uint32_t *val, uint32_t dw_mask); ++UBDevice *ub_find_device_by_guid(UbGuid *guid); ++ + #endif +-- +2.33.0 + diff --git a/ub-extract-common-mmu-translate.patch b/ub-extract-common-mmu-translate.patch new file mode 100644 index 0000000000000000000000000000000000000000..3fe17ca2e0754950a470909b3f7548269a6db478 --- /dev/null +++ b/ub-extract-common-mmu-translate.patch @@ -0,0 +1,329 @@ +From 6d1410494ee43de801af48c85133379471019955 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Wed, 12 Nov 2025 17:51:13 +0800 +Subject: [PATCH 7/7] ub: extract common mmu translate + +extract mmu translate code to common file, this prepare for later ummu +realize + +Signed-off-by: caojinhuahw +--- + hw/arm/smmu-common.c | 35 ------- + hw/arm/smmu-internal.h | 77 -------------- + include/hw/arm/mmu-translate-common.h | 141 ++++++++++++++++++++++++++ + include/hw/arm/smmu-common.h | 11 +- + 4 files changed, 142 insertions(+), 122 deletions(-) + create mode 100644 include/hw/arm/mmu-translate-common.h + +diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c +index 6c4b82757f..65314e0e44 100644 +--- a/hw/arm/smmu-common.c ++++ b/hw/arm/smmu-common.c +@@ -248,41 +248,6 @@ static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte, + return 0; + } + +-/* VMSAv8-64 Translation Table Format Descriptor Decoding */ +- +-/** +- * get_page_pte_address - returns the L3 descriptor output address, +- * ie. the page frame +- * ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format +- */ +-static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz) +-{ +- return PTE_ADDRESS(pte, granule_sz); +-} +- +-/** +- * get_table_pte_address - return table descriptor output address, +- * ie. address of next level table +- * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats +- */ +-static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz) +-{ +- return PTE_ADDRESS(pte, granule_sz); +-} +- +-/** +- * get_block_pte_address - return block descriptor output address and block size +- * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats +- */ +-static inline hwaddr get_block_pte_address(uint64_t pte, int level, +- int granule_sz, uint64_t *bsz) +-{ +- int n = level_shift(level, granule_sz); +- +- *bsz = 1ULL << n; +- return PTE_ADDRESS(pte, n); +-} +- + SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova) + { + bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi); +diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h +index 5a81dd1b82..d95fdeb224 100644 +--- a/hw/arm/smmu-internal.h ++++ b/hw/arm/smmu-internal.h +@@ -24,83 +24,6 @@ + #define TBI0(tbi) ((tbi) & 0x1) + #define TBI1(tbi) ((tbi) & 0x2 >> 1) + +-/* PTE Manipulation */ +- +-#define ARM_LPAE_PTE_TYPE_SHIFT 0 +-#define ARM_LPAE_PTE_TYPE_MASK 0x3 +- +-#define ARM_LPAE_PTE_TYPE_BLOCK 1 +-#define ARM_LPAE_PTE_TYPE_TABLE 3 +- +-#define ARM_LPAE_L3_PTE_TYPE_RESERVED 1 +-#define ARM_LPAE_L3_PTE_TYPE_PAGE 3 +- +-#define ARM_LPAE_PTE_VALID (1 << 0) +- +-#define PTE_ADDRESS(pte, shift) \ +- (extract64(pte, shift, 47 - shift + 1) << shift) +- +-#define is_invalid_pte(pte) (!(pte & ARM_LPAE_PTE_VALID)) +- +-#define is_reserved_pte(pte, level) \ +- ((level == 3) && \ +- ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_L3_PTE_TYPE_RESERVED)) +- +-#define is_block_pte(pte, level) \ +- ((level < 3) && \ +- ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_PTE_TYPE_BLOCK)) +- +-#define is_table_pte(pte, level) \ +- ((level < 3) && \ +- ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_PTE_TYPE_TABLE)) +- +-#define is_page_pte(pte, level) \ +- ((level == 3) && \ +- ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_L3_PTE_TYPE_PAGE)) +- +-/* access permissions */ +- +-#define PTE_AP(pte) \ +- (extract64(pte, 6, 2)) +- +-#define PTE_APTABLE(pte) \ +- (extract64(pte, 61, 2)) +- +-#define PTE_AF(pte) \ +- (extract64(pte, 10, 1)) +-/* +- * TODO: At the moment all transactions are considered as privileged (EL1) +- * as IOMMU translation callback does not pass user/priv attributes. +- */ +-#define is_permission_fault(ap, perm) \ +- (((perm) & IOMMU_WO) && ((ap) & 0x2)) +- +-#define is_permission_fault_s2(s2ap, perm) \ +- (!(((s2ap) & (perm)) == (perm))) +- +-#define PTE_AP_TO_PERM(ap) \ +- (IOMMU_ACCESS_FLAG(true, !((ap) & 0x2))) +- +-/* Level Indexing */ +- +-static inline int level_shift(int level, int granule_sz) +-{ +- return granule_sz + (3 - level) * (granule_sz - 3); +-} +- +-static inline uint64_t level_page_mask(int level, int granule_sz) +-{ +- return ~(MAKE_64BIT_MASK(0, level_shift(level, granule_sz))); +-} +- +-static inline +-uint64_t iova_level_offset(uint64_t iova, int inputsize, +- int level, int gsz) +-{ +- return ((iova & MAKE_64BIT_MASK(0, inputsize)) >> level_shift(level, gsz)) & +- MAKE_64BIT_MASK(0, gsz - 3); +-} +- + /* FEAT_LPA2 and FEAT_TTST are not implemented. */ + static inline int get_start_level(int sl0 , int granule_sz) + { +diff --git a/include/hw/arm/mmu-translate-common.h b/include/hw/arm/mmu-translate-common.h +new file mode 100644 +index 0000000000..21f324417c +--- /dev/null ++++ b/include/hw/arm/mmu-translate-common.h +@@ -0,0 +1,141 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#ifndef MMU_TRANSLATE_COMMON_H ++#define MMU_TRANSLATE_COMMON_H ++ ++/* VMSAv8-64 Translation constants and functions */ ++#define VMSA_LEVELS 4 ++#define VMSA_MAX_S2_CONCAT 16 ++ ++#define VMSA_STRIDE(gran) ((gran) - VMSA_LEVELS + 1) ++#define VMSA_BIT_LVL(isz, strd, lvl) ((isz) - (strd) * \ ++ (VMSA_LEVELS - (lvl))) ++#define VMSA_IDXMSK(isz, strd, lvl) ((1ULL << \ ++ VMSA_BIT_LVL(isz, strd, lvl)) - 1) ++ ++/* PTE Manipulation */ ++#define ARM_LPAE_PTE_TYPE_SHIFT 0 ++#define ARM_LPAE_PTE_TYPE_MASK 0x3 ++ ++#define ARM_LPAE_PTE_TYPE_BLOCK 1 ++#define ARM_LPAE_PTE_TYPE_TABLE 3 ++ ++#define ARM_LPAE_L3_PTE_TYPE_RESERVED 1 ++#define ARM_LPAE_L3_PTE_TYPE_PAGE 3 ++ ++#define ARM_LPAE_PTE_VALID (1 << 0) ++ ++#define PTE_ADDRESS(pte, shift) \ ++ (extract64(pte, shift, 47 - shift + 1) << shift) ++ ++#define is_invalid_pte(pte) (!(pte & ARM_LPAE_PTE_VALID)) ++ ++#define is_reserved_pte(pte, level) \ ++ ((level == 3) && \ ++ ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_L3_PTE_TYPE_RESERVED)) ++ ++#define is_block_pte(pte, level) \ ++ ((level < 3) && \ ++ ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_PTE_TYPE_BLOCK)) ++ ++#define is_table_pte(pte, level) \ ++ ((level < 3) && \ ++ ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_PTE_TYPE_TABLE)) ++ ++#define is_page_pte(pte, level) \ ++ ((level == 3) && \ ++ ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_L3_PTE_TYPE_PAGE)) ++ ++/* access permissions */ ++ ++#define PTE_AP(pte) \ ++ (extract64(pte, 6, 2)) ++ ++#define PTE_APTABLE(pte) \ ++ (extract64(pte, 61, 2)) ++ ++#define PTE_AF(pte) \ ++ (extract64(pte, 10, 1)) ++/* ++ * TODO: At the moment all transactions are considered as privileged (EL1) ++ * as IOMMU translation callback does not pass user/priv attributes. ++ */ ++#define is_permission_fault(ap, perm) \ ++ (((perm) & IOMMU_WO) && ((ap) & 0x2)) ++ ++#define is_permission_fault_s2(s2ap, perm) \ ++ (!(((s2ap) & (perm)) == (perm))) ++ ++#define PTE_AP_TO_PERM(ap) \ ++ (IOMMU_ACCESS_FLAG(true, !((ap) & 0x2))) ++ ++/* Level Indexing */ ++ ++static inline int level_shift(int level, int granule_sz) ++{ ++ return granule_sz + (3 - level) * (granule_sz - 3); ++} ++ ++static inline uint64_t level_page_mask(int level, int granule_sz) ++{ ++ return ~(MAKE_64BIT_MASK(0, level_shift(level, granule_sz))); ++} ++ ++static inline ++uint64_t iova_level_offset(uint64_t iova, int inputsize, ++ int level, int gsz) ++{ ++ return ((iova & MAKE_64BIT_MASK(0, inputsize)) >> level_shift(level, gsz)) & ++ MAKE_64BIT_MASK(0, gsz - 3); ++} ++ ++/* VMSAv8-64 Translation Table Format Descriptor Decoding */ ++ ++/** ++ * get_page_pte_address - returns the L3 descriptor output address, ++ * ie. the page frame ++ * ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format ++ */ ++static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz) ++{ ++ return PTE_ADDRESS(pte, granule_sz); ++} ++ ++/** ++ * get_table_pte_address - return table descriptor output address, ++ * ie. address of next level table ++ * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats ++ */ ++static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz) ++{ ++ return PTE_ADDRESS(pte, granule_sz); ++} ++ ++/** ++ * get_block_pte_address - return block descriptor output address and block size ++ * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats ++ */ ++static inline hwaddr get_block_pte_address(uint64_t pte, int level, ++ int granule_sz, uint64_t *bsz) ++{ ++ int n = level_shift(level, granule_sz); ++ ++ *bsz = 1ULL << n; ++ return PTE_ADDRESS(pte, n); ++} ++ ++#endif +\ No newline at end of file +diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h +index 8ae33c3753..4cc036dbdc 100644 +--- a/include/hw/arm/smmu-common.h ++++ b/include/hw/arm/smmu-common.h +@@ -24,21 +24,12 @@ + #include "qom/object.h" + #include "sysemu/iommufd.h" + #include ++#include "hw/arm/mmu-translate-common.h" + + #define SMMU_PCI_BUS_MAX 256 + #define SMMU_PCI_DEVFN_MAX 256 + #define SMMU_PCI_DEVFN(sid) (sid & 0xFF) + +-/* VMSAv8-64 Translation constants and functions */ +-#define VMSA_LEVELS 4 +-#define VMSA_MAX_S2_CONCAT 16 +- +-#define VMSA_STRIDE(gran) ((gran) - VMSA_LEVELS + 1) +-#define VMSA_BIT_LVL(isz, strd, lvl) ((isz) - (strd) * \ +- (VMSA_LEVELS - (lvl))) +-#define VMSA_IDXMSK(isz, strd, lvl) ((1ULL << \ +- VMSA_BIT_LVL(isz, strd, lvl)) - 1) +- + /* + * Page table walk error types + */ +-- +2.33.0 + diff --git a/ub-fix-ummu_dev_set_iommu_dev-s-return.patch b/ub-fix-ummu_dev_set_iommu_dev-s-return.patch new file mode 100644 index 0000000000000000000000000000000000000000..1ee3b7692b37b281079e11c304b4fdb0c6871383 --- /dev/null +++ b/ub-fix-ummu_dev_set_iommu_dev-s-return.patch @@ -0,0 +1,42 @@ +From 4b4f1d578925f35b708fd9241f5d72cc63e43b6a Mon Sep 17 00:00:00 2001 +From: xiangzixuan +Date: Mon, 10 Nov 2025 19:19:49 +0800 +Subject: [PATCH 2/4] ub: fix ummu_dev_set_iommu_dev's return + +ummu_dev_set_iommu_dev has wrong return when everything is normal, correct it. + +Signed-off-by: xiangzixuan +--- + hw/ub/ub.c | 2 +- + hw/ub/ub_ummu.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 66e34ebb34..0043f2189f 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -1147,7 +1147,7 @@ int ub_device_set_iommu_device(UBDevice *dev, HostIOMMUDevice *hoid, Error **err + UBBus *bus = ub_get_bus(dev); + + if (bus->iommu_ops && bus->iommu_ops->set_iommu_device) { +- return bus->iommu_ops->set_iommu_device(bus, bus->iommu_opaque, dev->eid, hoid, errp); ++ return !bus->iommu_ops->set_iommu_device(bus, bus->iommu_opaque, dev->eid, hoid, errp); + } + + return 0; +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index a7a4a33af3..5eff0c1f6e 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -1580,7 +1580,7 @@ static bool ummu_dev_set_iommu_dev(UBBus *bus, void *opaque, uint32_t eid, + ummu_dev->viommu = u->viommu; + QLIST_INSERT_HEAD(&u->viommu->device_list, ummu_dev, next); + +- return 0; ++ return true; + } + + static void ummu_dev_unset_iommu_dev(UBBus *bus, void *opaque, uint32_t eid) +-- +2.33.0 + diff --git a/ub-fix-use-gpa-instead-of-hva-to-R-W-msgq-data-probl.patch b/ub-fix-use-gpa-instead-of-hva-to-R-W-msgq-data-probl.patch new file mode 100644 index 0000000000000000000000000000000000000000..593a26dd16af323ef103b13d8d53b99d155b8836 --- /dev/null +++ b/ub-fix-use-gpa-instead-of-hva-to-R-W-msgq-data-probl.patch @@ -0,0 +1,413 @@ +From 32cf4afb32440b099fa94abcde5a851cd9768f75 Mon Sep 17 00:00:00 2001 +From: xiangzixuan +Date: Fri, 21 Nov 2025 17:03:49 +0800 +Subject: [PATCH 4/4] ub: fix use gpa instead of hva to R/W msgq data problem + +fix use gpa instead of hva to R/W msgq data problem + +Signed-off-by: xiangzixuan +--- + hw/ub/hisi/trace-events | 6 ++-- + hw/ub/hisi/ubc_msgq.c | 55 +++++++----------------------------- + hw/ub/ub.c | 9 ------ + hw/ub/ub_cna_mgmt.c | 56 ++++++++++++++++++++++++++++++++----- + hw/ub/ub_enum.c | 35 +++++++++++++++++++---- + include/hw/ub/hisi/ubc.h | 3 -- + include/hw/ub/ub_cna_mgmt.h | 3 +- + 7 files changed, 94 insertions(+), 73 deletions(-) + +diff --git a/hw/ub/hisi/trace-events b/hw/ub/hisi/trace-events +index afe421dd93..44839d5091 100644 +--- a/hw/ub/hisi/trace-events ++++ b/hw/ub/hisi/trace-events +@@ -2,6 +2,6 @@ + + # ubc_msgq.c + handle_eu_table_cfg_cmd(uint32_t msg_code, uint32_t entry_num, uint32_t tbl_cfg_mode, uint32_t tbl_cfg_status, uint32_t entry_start_id, uint32_t eid, uint32_t upi) "eu_msg_code(%u), cfg_entry_num(%u), tbl_cfg_mode(%u), tbl_cfg_status(%u), entry_start_id(%u), eid(%u), upi(%u)" +-msgq_sq_init(uint64_t gpa, uint64_t hva, uint32_t depth) "sq_base_addr_gpa 0x%lx sq_base_addr_hva 0x%lx depth %u" +-msgq_cq_init(uint64_t gpa, uint64_t hva, uint32_t depth) "cq_base_addr_gpa 0x%lx cq_base_addr_hva 0x%lx depth %u" +-msgq_rq_init(uint64_t gpa, uint64_t hva, uint32_t depth) "rq_base_addr_gpa 0x%lx rq_base_addr_hva 0x%lx depth %u" ++msgq_sq_init(uint64_t gpa, uint32_t depth) "sq_base_addr_gpa 0x%lx depth %u" ++msgq_cq_init(uint64_t gpa, uint32_t depth) "cq_base_addr_gpa 0x%lx depth %u" ++msgq_rq_init(uint64_t gpa, uint32_t depth) "rq_base_addr_gpa 0x%lx depth %u" +diff --git a/hw/ub/hisi/ubc_msgq.c b/hw/ub/hisi/ubc_msgq.c +index 18403fbf4a..c393957d78 100644 +--- a/hw/ub/hisi/ubc_msgq.c ++++ b/hw/ub/hisi/ubc_msgq.c +@@ -91,7 +91,7 @@ static void handle_task_type_msg(BusControllerState *s, HiMsgSqe *sqe) + payload = g_malloc0(sizeof(MsgPktHeader)); + if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + p_addr, + payload, sizeof(MsgPktHeader), MEMTXATTRS_UNSPECIFIED)) { +- qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); + g_free(payload); + return; + } +@@ -100,7 +100,7 @@ static void handle_task_type_msg(BusControllerState *s, HiMsgSqe *sqe) + payload = g_malloc0(sizeof(MsgPktHeader) + plen); + if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + p_addr, + payload, sizeof(MsgPktHeader) + plen, MEMTXATTRS_UNSPECIFIED)) { +- qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); + g_free(payload); + return; + } +@@ -115,10 +115,8 @@ static void handle_task_type_msg(BusControllerState *s, HiMsgSqe *sqe) + + static void handle_task_type_enum(BusControllerState *s, HiMsgSqe *sqe) + { +- EnumPktHeader *payload = NULL; +- EnumPldScanHeader *scan_header = NULL; ++ void *payload = NULL; + uint32_t p_addr = sqe->p_addr; +- uint32_t header_size; + + if (p_addr + HI_MSG_SQE_PLD_SIZE > s->msgq.sq_sz) { + qemu_log("invalid p_addr %u, total size %ld\n", +@@ -126,26 +124,8 @@ static void handle_task_type_enum(BusControllerState *s, HiMsgSqe *sqe) + return; + } + +- scan_header = g_malloc0(sizeof(EnumPldScanHeader)); +- if (dma_memory_read(&address_space_memory, +- s->msgq.sq_base_addr_gpa + p_addr + ENUM_PKT_HEADER_SIZE, +- scan_header, sizeof(EnumPldScanHeader), MEMTXATTRS_UNSPECIFIED)) { +- qemu_log("Fail to read sq_base_addr_gpa entry\n"); +- g_free(scan_header); +- return; +- } +- header_size = ENUM_PKT_HEADER_SIZE + calc_enum_pld_header_size(scan_header, true) + ENUM_NA_CFG_REQ_SIZE; +- g_free(scan_header); +- payload = g_malloc0(header_size); +- if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + p_addr, +- payload, header_size, MEMTXATTRS_UNSPECIFIED)) { +- qemu_log("Fail to read sq_base_addr_gpa entry\n"); +- g_free(payload); +- return; +- } +- ++ payload = (void *)s->msgq.sq_base_addr_gpa + p_addr; + handle_msg_enum(s, sqe, payload); +- g_free(payload); + } + + static void handle_eu_table_cfg_cmd(BusControllerState *s, HiMsgSqe *sqe, void *payload) +@@ -200,7 +180,7 @@ static void handle_task_type_hisi_private(BusControllerState *s, HiMsgSqe *sqe) + payload = g_malloc0(sizeof(HiEuCfgReq)); + if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + p_addr, + payload, sizeof(HiEuCfgReq), MEMTXATTRS_UNSPECIFIED)) { +- qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); + g_free(payload); + return; + } +@@ -236,9 +216,9 @@ void msgq_process_task(void *opaque, uint64_t val) + sqe = g_malloc0(sizeof(HiMsgSqe)); + cnt = (pi + depth - ci) % depth; + for (i = 0; i < cnt; i++) { +- if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + ci, ++ if (dma_memory_read(&address_space_memory, (unsigned long)((HiMsgSqe *)s->msgq.sq_base_addr_gpa + ci), + sqe, sizeof(HiMsgSqe), MEMTXATTRS_UNSPECIFIED)) { +- qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); + g_free(sqe); + return; + } +@@ -278,14 +258,13 @@ void msgq_sq_init(void *opaque) + uint64_t size = (uint64_t)depth * (HI_MSG_SQE_SIZE + HI_MSG_SQE_PLD_SIZE); + + s->msgq.sq_base_addr_gpa = addr_l | ((uint64_t)addr_h << 32); +- s->msgq.sq_base_addr_hva = (uint64_t)cpu_physical_memory_map(s->msgq.sq_base_addr_gpa, &size, true); + if (size != depth * (HI_MSG_SQE_SIZE + HI_MSG_SQE_PLD_SIZE)) { + qemu_log("sq size %lu != %lu, depth=%u\n", size, + depth * (HI_MSG_SQE_SIZE + HI_MSG_SQE_PLD_SIZE), depth); + return; + } + s->msgq.sq_sz = size; +- trace_msgq_sq_init(s->msgq.sq_base_addr_gpa, s->msgq.sq_base_addr_hva, depth); ++ trace_msgq_sq_init(s->msgq.sq_base_addr_gpa, depth); + } + + void msgq_cq_init(void *opaque) +@@ -297,14 +276,13 @@ void msgq_cq_init(void *opaque) + uint64_t size = (uint64_t)depth * HI_MSG_CQE_SIZE; + + s->msgq.cq_base_addr_gpa = addr_l | ((uint64_t)addr_h << 32); +- s->msgq.cq_base_addr_hva = (uint64_t)cpu_physical_memory_map(s->msgq.cq_base_addr_gpa, &size, true); + if (size != depth * HI_MSG_CQE_SIZE) { + qemu_log("cq size %lu != %lu, depth=%u\n", size, + depth * HI_MSG_CQE_SIZE, depth); + return; + } + s->msgq.cq_sz = size; +- trace_msgq_cq_init(s->msgq.cq_base_addr_gpa, s->msgq.cq_base_addr_hva, depth); ++ trace_msgq_cq_init(s->msgq.cq_base_addr_gpa, depth); + } + + void msgq_rq_init(void *opaque) +@@ -316,14 +294,13 @@ void msgq_rq_init(void *opaque) + uint64_t size = (uint64_t)depth * HI_MSG_RQE_SIZE; + + s->msgq.rq_base_addr_gpa = addr_l | ((uint64_t)addr_h << 32); +- s->msgq.rq_base_addr_hva = (uint64_t)cpu_physical_memory_map(s->msgq.rq_base_addr_gpa, &size, true); + if (size != depth * HI_MSG_RQE_SIZE) { + qemu_log("rq size %lu != %u, depth=%u\n", size, + depth * HI_MSG_RQE_SIZE, depth); + return; + } + s->msgq.rq_sz = size; +- trace_msgq_rq_init(s->msgq.rq_base_addr_gpa, s->msgq.rq_base_addr_hva, depth); ++ trace_msgq_rq_init(s->msgq.rq_base_addr_gpa, depth); + } + + void msgq_handle_rst(void *opaque) +@@ -346,17 +323,5 @@ void msgq_handle_rst(void *opaque) + ub_set_long(s->msgq_reg + RQ_ADDR_H, 0); + ub_set_long(s->msgq_reg + RQ_DEPTH, 0); + +- if (s->msgq.rq_sz && s->msgq.rq_base_addr_hva) { +- cpu_physical_memory_unmap((void *)s->msgq.rq_base_addr_hva, +- s->msgq.rq_sz, true, s->msgq.rq_sz); +- } +- if (s->msgq.sq_sz && s->msgq.sq_base_addr_hva) { +- cpu_physical_memory_unmap((void *)s->msgq.sq_base_addr_hva, +- s->msgq.sq_sz, true, s->msgq.sq_sz); +- } +- if (s->msgq.cq_sz && s->msgq.cq_base_addr_hva) { +- cpu_physical_memory_unmap((void *)s->msgq.cq_base_addr_hva, +- s->msgq.cq_sz, true, s->msgq.cq_sz); +- } + memset(&s->msgq, 0, sizeof(s->msgq)); + } +\ No newline at end of file +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 0043f2189f..4bd970b560 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -1604,15 +1604,6 @@ static void ub_dev_get_ubc_info(Monitor *mon, UBDevice *udev) + monitor_printf(mon, "│%-24s│0x%-43lx│\n", "msgq_reg", (uint64_t)ubcs->msgq_reg); + monitor_printf(mon, "│%-24s│%-45s│\n", "MR msgq_reg_mem name", ubcs->msgq_reg_mem.name); + monitor_printf(mon, "│%-24s│%-45s│\n", "MR io_mmio name", ubcs->io_mmio.name); +- monitor_printf(mon, "│%-24s│gpa 0x%-10lx hva 0x%-22lx│\n", +- "hi_msgq_info sq addr", ubcs->msgq.sq_base_addr_gpa, +- ubcs->msgq.sq_base_addr_hva); +- monitor_printf(mon, "│%-24s│gpa 0x%-10lx hva 0x%-22lx│\n", +- "hi_msgq_info cq addr", ubcs->msgq.cq_base_addr_gpa, +- ubcs->msgq.cq_base_addr_hva); +- monitor_printf(mon, "│%-24s│gpa 0x%-10lx hva 0x%-22lx│\n", +- "hi_msgq_info rq addr", ubcs->msgq.rq_base_addr_gpa, +- ubcs->msgq.rq_base_addr_hva); + return; + } + +diff --git a/hw/ub/ub_cna_mgmt.c b/hw/ub/ub_cna_mgmt.c +index 4339e20e20..1f74cbdb65 100644 +--- a/hw/ub/ub_cna_mgmt.c ++++ b/hw/ub/ub_cna_mgmt.c +@@ -21,6 +21,8 @@ + #include "hw/ub/ub_config.h" + #include "qemu/log.h" + #include "trace.h" ++#include "sysemu/dma.h" ++#include "hw/ub/ub_cna_mgmt.h" + + static void enum_set_cna_config_space(uint8_t opcode, EnumCnaCfgReq *cna_cfg_req) + { +@@ -60,8 +62,9 @@ void handle_enum_cna_config_request(BusControllerState *s, + HiMsgSqe *sqe, void *buf) + { + /* req message */ ++ void *payload; + size_t header_sz; +- EnumPktHeader *header = (EnumPktHeader *)buf; ++ EnumPktHeader *header; + EnumPldScanHeader *scan_header; + EnumCnaCfgReq *cna_cfg_req; + /* rsp message */ +@@ -74,10 +77,29 @@ void handle_enum_cna_config_request(BusControllerState *s, + HiMsgCqe cqe; + char guid[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; + +- scan_header = (EnumPldScanHeader *)((uint8_t *)buf + ENUM_PKT_HEADER_SIZE); ++ scan_header = g_malloc0(sizeof(EnumPldScanHeader)); ++ if (dma_memory_read(&address_space_memory, ++ (unsigned long)(buf + ENUM_PKT_HEADER_SIZE), ++ scan_header, sizeof(EnumPldScanHeader), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); ++ g_free(scan_header); ++ return; ++ } ++ header_sz = ENUM_PKT_HEADER_SIZE + calc_enum_pld_header_size(scan_header, true) + ENUM_NA_CFG_REQ_SIZE; ++ g_free(scan_header); ++ payload = g_malloc0(header_sz); ++ if (dma_memory_read(&address_space_memory, (unsigned long)(buf), ++ payload, header_sz, MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); ++ g_free(payload); ++ return; ++ } ++ ++ header = (EnumPktHeader *)payload; ++ scan_header = (EnumPldScanHeader *)((uint8_t *)payload + ENUM_PKT_HEADER_SIZE); + header_sz = ENUM_PKT_HEADER_SIZE + +- calc_enum_pld_header_size(scan_header, true); +- cna_cfg_req = (EnumCnaCfgReq *)((uint8_t *)buf + header_sz); ++ calc_enum_pld_header_size(scan_header, true); ++ cna_cfg_req = (EnumCnaCfgReq *)((uint8_t *)payload + header_sz); + if (header->ulh.cfg != UB_CLAN_LINK_CFG || + header->cnth.nth_nlp != NTH_NLP_WITHOUT_TPH || + header->upi != UB_CP_UPI || +@@ -129,8 +151,9 @@ void handle_enum_cna_query_request(BusControllerState *s, + HiMsgSqe *sqe, void *buf) + { + /* req message */ ++ void *payload; + size_t header_sz; +- EnumPktHeader *header = (EnumPktHeader *)buf; ++ EnumPktHeader *header; + EnumPldScanHeader *scan_header; + EnumCnaQueryReq *cna_query_req; + /* rsp message */ +@@ -146,10 +169,29 @@ void handle_enum_cna_query_request(BusControllerState *s, + uint64_t emulated_offset; + size_t forward_path_size; + +- scan_header = (EnumPldScanHeader *)((uint8_t *)buf + ENUM_PKT_HEADER_SIZE); ++ scan_header = g_malloc0(sizeof(EnumPldScanHeader)); ++ if (dma_memory_read(&address_space_memory, ++ (unsigned long)(buf + ENUM_PKT_HEADER_SIZE), ++ scan_header, sizeof(EnumPldScanHeader), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); ++ g_free(scan_header); ++ return; ++ } ++ header_sz = ENUM_PKT_HEADER_SIZE + calc_enum_pld_header_size(scan_header, true) + ENUM_NA_QRY_REQ_SIZE; ++ g_free(scan_header); ++ payload = g_malloc0(header_sz); ++ if (dma_memory_read(&address_space_memory, (unsigned long)(buf), ++ payload, header_sz, MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); ++ g_free(payload); ++ return; ++ } ++ ++ header = (EnumPktHeader *)payload; ++ scan_header = (EnumPldScanHeader *)((uint8_t *)payload + ENUM_PKT_HEADER_SIZE); + header_sz = ENUM_PKT_HEADER_SIZE + + calc_enum_pld_header_size(scan_header, true); +- cna_query_req = (EnumCnaQueryReq *)((uint8_t *)buf + header_sz); ++ cna_query_req = (EnumCnaQueryReq *)((uint8_t *)payload + header_sz); + if (header->ulh.cfg != UB_CLAN_LINK_CFG || + header->cnth.nth_nlp != NTH_NLP_WITHOUT_TPH || + header->upi != UB_CP_UPI || +diff --git a/hw/ub/ub_enum.c b/hw/ub/ub_enum.c +index e514f1732f..8e145e8f2d 100644 +--- a/hw/ub/ub_enum.c ++++ b/hw/ub/ub_enum.c +@@ -26,6 +26,8 @@ + #include "qemu/log.h" + #include "qapi/error.h" + #include "trace.h" ++#include "sysemu/dma.h" ++#include "hw/ub/ub_cna_mgmt.h" + + static void enum_get_port_info_from_config_space(UBDevice *dev, uint16_t port_idx, + EnumTlvPortInfo *port_info) +@@ -156,10 +158,11 @@ static void handle_enum_query_request(BusControllerState *s, HiMsgSqe *sqe, + void *buf) + { + /* req message */ ++ void *payload; + size_t header_sz; +- EnumPktHeader *header = (EnumPktHeader *)buf; +- struct ClanNetworkHeader *cnth = &header->cnth; +- struct UbLinkHeader *ulh = &header->ulh; ++ EnumPktHeader *header; ++ struct ClanNetworkHeader *cnth; ++ struct UbLinkHeader *ulh; + char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; + EnumPldScanHeader *scan_header; + EnumTopoQueryReq *scan_pdu; +@@ -175,6 +178,28 @@ static void handle_enum_query_request(BusControllerState *s, HiMsgSqe *sqe, + EnumTopoQueryRspPdu *rsp_pdu; + HiMsgCqe cqe; + ++ scan_header = g_malloc0(sizeof(EnumPldScanHeader)); ++ if (dma_memory_read(&address_space_memory, ++ (unsigned long)(buf + ENUM_PKT_HEADER_SIZE), ++ scan_header, sizeof(EnumPldScanHeader), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); ++ g_free(scan_header); ++ return; ++ } ++ header_sz = ENUM_PKT_HEADER_SIZE + calc_enum_pld_header_size(scan_header, true) + ENUM_TOPO_QUERY_REQ_SIZE; ++ g_free(scan_header); ++ payload = g_malloc0(header_sz); ++ if (dma_memory_read(&address_space_memory, (unsigned long)(buf), ++ payload, header_sz, MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Failed to read sq_base_addr_gpa entry\n"); ++ g_free(payload); ++ return; ++ } ++ ++ header = (EnumPktHeader *)payload; ++ cnth = &header->cnth; ++ ulh = &header->ulh; ++ + if (ulh->cfg != UB_CLAN_LINK_CFG || cnth->nth_nlp != NTH_NLP_WITHOUT_TPH || + header->upi != UB_CP_UPI) { + qemu_log("invalid enum pkt header, please check the driver inside guestos:" +@@ -182,9 +207,9 @@ static void handle_enum_query_request(BusControllerState *s, HiMsgSqe *sqe, + return; + } + +- scan_header = (EnumPldScanHeader *)((uint8_t *)buf + ENUM_PKT_HEADER_SIZE); ++ scan_header = (EnumPldScanHeader *)((uint8_t *)payload + ENUM_PKT_HEADER_SIZE); + header_sz = ENUM_PKT_HEADER_SIZE + calc_enum_pld_header_size(scan_header, true); +- scan_pdu = (EnumTopoQueryReq *)((uint8_t *)buf + header_sz); ++ scan_pdu = (EnumTopoQueryReq *)((uint8_t *)payload + header_sz); + scan_pdu_com = (EnumPldScanPduCommon *)scan_pdu; + ub_device_get_str_from_guid(&scan_pdu_com->guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); + dev = ub_find_device_by_guid(&scan_pdu_com->guid); +diff --git a/include/hw/ub/hisi/ubc.h b/include/hw/ub/hisi/ubc.h +index dc923f3a13..c94759956e 100644 +--- a/include/hw/ub/hisi/ubc.h ++++ b/include/hw/ub/hisi/ubc.h +@@ -288,13 +288,10 @@ typedef struct HiMsgSqePld { + + typedef struct HiMsgqInfo { + uint64_t sq_base_addr_gpa; +- uint64_t sq_base_addr_hva; + uint64_t sq_sz; + uint64_t cq_base_addr_gpa; +- uint64_t cq_base_addr_hva; + uint64_t cq_sz; + uint64_t rq_base_addr_gpa; +- uint64_t rq_base_addr_hva; + uint64_t rq_sz; + } HiMsgqInfo; + +diff --git a/include/hw/ub/ub_cna_mgmt.h b/include/hw/ub/ub_cna_mgmt.h +index f317216af3..046d708582 100644 +--- a/include/hw/ub/ub_cna_mgmt.h ++++ b/include/hw/ub/ub_cna_mgmt.h +@@ -29,6 +29,7 @@ typedef struct EnumCnaQueryReq { + uint32_t port_idx : 16; + uint32_t rsv : 16; + } EnumCnaQueryReq; ++#define ENUM_NA_QRY_REQ_SIZE 28 + + typedef struct EnumCnaQueryRsp { + /* DW0~DW5 */ +@@ -56,7 +57,7 @@ typedef struct EnumCnaCfgReq { + uint32_t cna : 24; + uint8_t rsvd1; + } EnumCnaCfgReq; +-#define ENUM_NA_CFG_REQ_SIZE 44 ++#define ENUM_NA_CFG_REQ_SIZE 32 + + typedef struct EnumNaCfgRsp { + /* DW0~DW5 */ +-- +2.33.0 + diff --git a/ub-init-ub-bus-controller-dev-config-space.patch b/ub-init-ub-bus-controller-dev-config-space.patch new file mode 100644 index 0000000000000000000000000000000000000000..f851f9223322a3affeeeba8f0fc0ed4f3ca61d06 --- /dev/null +++ b/ub-init-ub-bus-controller-dev-config-space.patch @@ -0,0 +1,235 @@ +From 7f97fabe53c964a9f522e0708d518dafcc6a0747 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 18:43:51 +0800 +Subject: [PATCH 4/5] ub: init ub bus controller dev config space +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1、init default config value for bus controller dev config space +2、init default wmask value for bus controller dev config space + +Signed-off-by: caojinhuahw +--- + hw/ub/ub_ubc.c | 169 +++++++++++++++++++++++++++++++++++++++++ + include/hw/ub/ub_ubc.h | 9 +++ + 2 files changed, 178 insertions(+) + +diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c +index e371f4f35a..1023ec9deb 100644 +--- a/hw/ub/ub_ubc.c ++++ b/hw/ub/ub_ubc.c +@@ -24,7 +24,11 @@ + #include "hw/qdev-properties.h" + #include "hw/qdev-properties-system.h" + #include "hw/ub/ub.h" ++#include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_config.h" ++#include "hw/ub/hisi/ubc.h" ++#include "hw/ub/hisi/ub_mem.h" + #include "migration/vmstate.h" + + static uint64_t ub_msgq_reg_read(void *opaque, hwaddr addr, unsigned len) +@@ -205,6 +209,170 @@ static const TypeInfo ub_bus_controller_type_info = { + .class_init = ub_bus_controller_class_init, + }; + ++static void ub_bus_controller_cfg0_route_table_init(UBDevice *ub_dev) ++{ ++ uint64_t emulated_offset = ub_cfg_offset_to_emulated_offset(UB_ROUTE_TABLE_START, true); ++ UbRouteTable *route_table = (UbRouteTable *)(ub_dev->config + emulated_offset); ++ ++ /* The prerequisite is that each device uses only one port. ++ * The Ub controller own a CNA, each port own a CNA, and each device own a CNA. */ ++ route_table->entry_num = UB_DEV_MAX_NUM_OF_PORT * 2 + 1; ++ route_table->ers = 1; /* support exact route */ ++} ++ ++static void ub_bus_controller_space_cfg0_init(UBDevice *ub_dev) ++{ ++ UbCfg0Basic *cfg0_basic; ++ Cfg0SupportFeature *support_feature; ++ UbCfg0ShpCap *shp_cap; ++ UbSlotInfo *slot_info; ++ uint64_t emulated_offset; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_START, true); ++ cfg0_basic = (UbCfg0Basic *)(ub_dev->config + emulated_offset); ++ cfg0_basic->header.slice_version = UB_SLICE_VERSION; ++ cfg0_basic->header.slice_used_size = UB_CFG0_BASIC_SLICE_USED_SIZE; ++ cfg0_basic->total_num_of_port = ub_dev->port.port_num & UINT16_MASK; ++ cfg0_basic->total_num_of_ue = 1; ++ cfg0_basic->cap_bitmap[CFG0_CAP2_SHP_INDEX / BITS_PER_BYTE] = ++ 1 << (CFG0_CAP2_SHP_INDEX % BITS_PER_BYTE); ++ support_feature = &cfg0_basic->support_feature; ++ support_feature->bits.entity_available = 1; ++ support_feature->bits.mtu_supported = 1; ++ support_feature->bits.route_table_supported = SUPPORTED; ++ support_feature->bits.upi_supported = SUPPORTED; ++ support_feature->bits.broker_supported = NOT_SUPPORTED; ++ support_feature->bits.switch_supported = SUPPORTED; ++ support_feature->bits.cc_supported = NOT_SUPPORTED; ++ /* SHP CAP */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_CAP2_SHP_START, true); ++ shp_cap = (UbCfg0ShpCap *)(ub_dev->config + emulated_offset); ++ shp_cap->slot_num = 1; ++ shp_cap->header.slice_version = UB_SLICE_VERSION; ++ shp_cap->header.slice_used_size = (shp_cap->slot_num * sizeof(UbSlotInfo) + sizeof(UbCfg0ShpCap)) / DWORD_SIZE; ++ for (int i = 0; i < shp_cap->slot_num; ++i) { ++ slot_info = (UbSlotInfo *)((uint8_t *)shp_cap->slot_info + i * sizeof(UbSlotInfo)); ++ slot_info->start_port_idx = 0; ++ slot_info->end_port_idx = cfg0_basic->total_num_of_port - 1; ++ slot_info->pp_ctrl = 1; ++ slot_info->ms_ctrl = 1; ++ slot_info->pd_ctrl = 1; ++ slot_info->pds_ctrl = 1; ++ } ++ ub_bus_controller_cfg0_route_table_init(ub_dev); ++} ++ ++static void ub_bus_controller_space_cfg1_init(UBDevice *ub_dev) ++{ ++ UbCfg1Basic *cfg1_basic; ++ Cfg1SupportFeature *support_feature; ++ UbCfg1DecoderCap *dec_cap; ++ uint64_t emulated_offset; ++ ++ /* basic */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic = (UbCfg1Basic *)(ub_dev->config + emulated_offset); ++ cfg1_basic->header.slice_version = UB_SLICE_VERSION; ++ cfg1_basic->header.slice_used_size = UB_CFG1_BASIC_SLICE_USED_SIZE; ++ ++ cfg1_basic->cap_bitmap[CFG1_DECODER_CAP_INDEX / BITS_PER_BYTE] |= ++ 1 << (CFG1_DECODER_CAP_INDEX % BITS_PER_BYTE); ++ ++ support_feature = &cfg1_basic->support_feature; ++ support_feature->bits.mgs = SUPPORTED; ++ support_feature->bits.ubbas = NOT_SUPPORTED; ++ support_feature->bits.ers0s = SUPPORTED; ++ support_feature->bits.ers1s = NOT_SUPPORTED; ++ support_feature->bits.ers2s = SUPPORTED; ++ support_feature->bits.cdmas = SUPPORTED; ++ support_feature->bits.matt_juris = UB_DRIVE; ++ cfg1_basic->ers_space_size[0] = UBC_ERS0_SPACE_SIZE; ++ cfg1_basic->ers_space_size[1] = UBC_ERS1_SPACE_SIZE; ++ cfg1_basic->ers_space_size[2] = UBC_ERS2_SPACE_SIZE; ++ cfg1_basic->ers_start_addr[0] = UBC_ERS0_SPACE_ADDR; ++ cfg1_basic->ers_start_addr[1] = UBC_ERS1_SPACE_ADDR; ++ cfg1_basic->ers_start_addr[2] = UBC_ERS2_SPACE_ADDR; ++ cfg1_basic->eid_upi_ten = UBC_EID_UPI_TEN_DEFAULT_VAL; ++ cfg1_basic->class_code = UBC_CLASS_CODE; ++ /* decoder cap */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP1_DECODER, true); ++ dec_cap = (UbCfg1DecoderCap *)(ub_dev->config + emulated_offset); ++ dec_cap->header.slice_version = UB_SLICE_VERSION; ++ dec_cap->header.slice_used_size = sizeof(UbCfg1DecoderCap) / DWORD_SIZE; ++ dec_cap->decoder.event_size_sup = DECODER_CAP_EVENT_SIZE; ++ dec_cap->decoder.cmd_size_sup = DECODER_CAP_CMD_SIZE; ++ dec_cap->decoder.mmio_size_sup = DECODER_CAP_MMIO_SIZE; ++} ++ ++static void ub_bus_controller_wmask_init(UBDevice *ub_dev) ++{ ++ UbCfg1DecoderCap *dec_cap_mask; ++ UbCfg0ShpCap *cfg0_shp_wmask, *cfg0_shp; ++ uint64_t emulated_offset; ++ ++ /* cfg0 cap */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_CAP2_SHP_START, true); ++ cfg0_shp_wmask = (UbCfg0ShpCap *)(ub_dev->wmask + emulated_offset); ++ cfg0_shp = (UbCfg0ShpCap *)(ub_dev->config + emulated_offset); ++ memset(cfg0_shp_wmask, 0, UB_SLICE_SZ); ++ for (int i = 0; i < cfg0_shp->slot_num; ++i) { ++ cfg0_shp_wmask->slot_info[i].pp_ctrl = ~0; ++ cfg0_shp_wmask->slot_info[i].wl_ctrl = ~0; ++ cfg0_shp_wmask->slot_info[i].pl_ctrl = ~0; ++ cfg0_shp_wmask->slot_info[i].ms_ctrl = ~0; ++ cfg0_shp_wmask->slot_info[i].pd_ctrl = ~0; ++ cfg0_shp_wmask->slot_info[i].pds_ctrl = ~0; ++ cfg0_shp_wmask->slot_info[i].pw_ctrl = ~0; ++ } ++ ++ /* cfg1 cap */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP1_DECODER, true); ++ dec_cap_mask = (UbCfg1DecoderCap *)(ub_dev->wmask + emulated_offset); ++ memset(dec_cap_mask, 0, sizeof(UbCfg1DecoderCap)); ++ dec_cap_mask->decoder_ctrl.decoder_en = ~0; ++ memset(&dec_cap_mask->dec_matt_ba, 0xff, sizeof(dec_cap_mask->dec_matt_ba)); ++ memset(&dec_cap_mask->dec_mmio_ba, 0xff, sizeof(dec_cap_mask->dec_mmio_ba)); ++ memset(&dec_cap_mask->dev_usi_idx, 0xff, sizeof(dec_cap_mask->dev_usi_idx)); ++ dec_cap_mask->decoder_cmdq_cfg.cmdq_en = ~0; ++ dec_cap_mask->decoder_cmdq_cfg.cmdq_size_use = ~0; ++ dec_cap_mask->decoder_cmdq_prod.cmdq_wr_idx = ~0; ++ dec_cap_mask->decoder_cmdq_prod.cmdq_err_resp = ~0; ++ dec_cap_mask->decoder_cmdq_cons.cmdq_rd_idx = ~0; ++ dec_cap_mask->decoder_cmdq_cons.cmdq_err = ~0; ++ dec_cap_mask->decoder_cmdq_cons.cmdq_err_res = ~0; ++ dec_cap_mask->decoder_cmdq_ba.cmdq_ba = ~0; ++ dec_cap_mask->decoder_evtq_cfg.evtq_en = ~0; ++ dec_cap_mask->decoder_evtq_cfg.evtq_size_use = ~0; ++ dec_cap_mask->decoder_evtq_prod.evtq_wr_idx = ~0; ++ dec_cap_mask->decoder_evtq_cons.evtq_rd_idx = ~0; ++ dec_cap_mask->decoder_evtq_ba.evtq_ba = ~0; ++} ++ ++static void ub_bus_controller_w1cmask_init(UBDevice *ub_dev) ++{ ++ UbCfg0ShpCap *cfg0_shp_w1cmask, *cfg0_shp; ++ uint64_t emulated_offset; ++ ++ /* cfg0 cap */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_CAP2_SHP_START, true); ++ cfg0_shp_w1cmask = (UbCfg0ShpCap *)(ub_dev->w1cmask + emulated_offset); ++ cfg0_shp = (UbCfg0ShpCap *)(ub_dev->config + emulated_offset); ++ memset(cfg0_shp_w1cmask, 0, UB_SLICE_SZ); ++ for (int i = 0; i < cfg0_shp->slot_num; ++i) { ++ cfg0_shp_w1cmask->slot_info[i].pp_st = ~0; ++ cfg0_shp_w1cmask->slot_info[i].pd_st = ~0; ++ cfg0_shp_w1cmask->slot_info[i].pdsc_st = ~0; ++ } ++} ++ ++static void ub_bus_controller_dev_config_space_init(UBDevice *dev) ++{ ++ ub_bus_controller_space_cfg0_init(dev); ++ ub_bus_controller_space_cfg1_init(dev); ++ ub_bus_controller_wmask_init(dev); ++ ub_bus_controller_w1cmask_init(dev); ++} ++ + static bool ub_ubc_is_empty(UBBus *bus) + { + UBDevice *dev; +@@ -240,6 +408,7 @@ static void ub_bus_controller_dev_realize(UBDevice *dev, Error **errp) + } + + dev->dev_type = UB_TYPE_IBUS_CONTROLLER; ++ ub_bus_controller_dev_config_space_init(dev); + } + + static Property ub_bus_controller_dev_properties[] = { +diff --git a/include/hw/ub/ub_ubc.h b/include/hw/ub/ub_ubc.h +index af0f4b1a7f..fe86a1e34f 100644 +--- a/include/hw/ub/ub_ubc.h ++++ b/include/hw/ub/ub_ubc.h +@@ -61,6 +61,15 @@ struct BusControllerClass { + SysBusDeviceClass parent_class; + }; + ++#define UBC_ERS0_SPACE_SIZE 0x2 ++#define UBC_ERS1_SPACE_SIZE 0x10001 ++#define UBC_ERS2_SPACE_SIZE 0x20 ++#define UBC_ERS0_SPACE_ADDR 0x2c00000000 ++#define UBC_ERS1_SPACE_ADDR 0x2d00000000 ++#define UBC_ERS2_SPACE_ADDR 0x2e00000000 ++#define UBC_EID_UPI_TEN_DEFAULT_VAL 1024 ++#define UBC_CLASS_CODE 0x0 ++ + void ub_save_ubc_list(BusControllerState *s); + BusControllerState *container_of_ubbus(UBBus *bus); + #endif +-- +2.33.0 + diff --git a/ub-init-usi-operator-function.patch b/ub-init-usi-operator-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..09be83ac22bff27ef9a34c3435de26d72111d8af --- /dev/null +++ b/ub-init-usi-operator-function.patch @@ -0,0 +1,656 @@ +From 69c969333282e8c2962273ff6124a166d2d37f06 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 14 Nov 2025 10:13:09 +0800 +Subject: [PATCH 5/5] ub: init usi operator function + +add some usi operator function, this will be used later in vfio ub +device realized. + +Signed-off-by: caojinhuahw +--- + hw/intc/arm_gicv3_its_kvm.c | 2 +- + hw/ub/ub_usi.c | 513 +++++++++++++++++++++++++ + include/exec/memattrs.h | 2 +- + include/hw/intc/arm_gicv3_its_common.h | 2 +- + include/hw/ub/ub.h | 19 + + include/hw/ub/ub_usi.h | 19 + + 6 files changed, 554 insertions(+), 3 deletions(-) + +diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c +index f7df602cff..5f79802804 100644 +--- a/hw/intc/arm_gicv3_its_kvm.c ++++ b/hw/intc/arm_gicv3_its_kvm.c +@@ -41,7 +41,7 @@ struct KVMARMITSClass { + }; + + +-static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint16_t devid) ++static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint32_t devid) + { + struct kvm_msi msi; + +diff --git a/hw/ub/ub_usi.c b/hw/ub/ub_usi.c +index 8250d853eb..0e3dae9d6e 100644 +--- a/hw/ub/ub_usi.c ++++ b/hw/ub/ub_usi.c +@@ -21,6 +21,451 @@ + #include "qemu/log.h" + #include "exec/address-spaces.h" + ++static void usi_init_vector_notifiers(UBDevice *udev, ++ USIVectorUseNotifier use_notifier, ++ USIVectorReleaseNotifier release_notifier, ++ USIVectorPollNotifier poll_notifier) ++{ ++ udev->usi_vector_use_notifier = use_notifier; ++ udev->usi_vector_release_notifier = release_notifier; ++ udev->usi_vector_poll_notifier = poll_notifier; ++} ++ ++static int usi_set_notifier_for_vector(UBDevice *udev, uint16_t vector) ++{ ++ USIMessage msg; ++ ++ if (usi_is_masked(udev, vector)) { ++ return 0; ++ } ++ ++ msg = usi_get_message(udev, vector); ++ return udev->usi_vector_use_notifier(udev, vector, msg); ++} ++ ++static void usi_unset_notifier_for_vector(UBDevice *udev, uint16_t vector) ++{ ++ if (usi_is_masked(udev, vector)) { ++ return; ++ } ++ udev->usi_vector_release_notifier(udev, vector); ++} ++ ++void usi_unset_vector_notifiers(UBDevice *udev) ++{ ++ int vector; ++ ++ for (vector = 0; vector < udev->usi_entries_nr; vector++) { ++ usi_unset_notifier_for_vector(udev, vector); ++ } ++ ++ udev->usi_vector_use_notifier = NULL; ++ udev->usi_vector_release_notifier = NULL; ++ udev->usi_vector_poll_notifier = NULL; ++} ++ ++int usi_set_vector_notifiers(UBDevice *udev, ++ USIVectorUseNotifier use_notifier, ++ USIVectorReleaseNotifier release_notifier, ++ USIVectorPollNotifier poll_notifier) ++{ ++ int vector, ret; ++ ++ usi_init_vector_notifiers(udev, use_notifier, release_notifier, poll_notifier); ++ for (vector = 0; vector < udev->usi_entries_nr; vector++) { ++ ret = usi_set_notifier_for_vector(udev, vector); ++ if (ret < 0) { ++ goto undo; ++ } ++ } ++ ++ qemu_log("usi set notifier for vector success.\n"); ++ return 0; ++ ++undo: ++ qemu_log("usi set notifier for vector failed.\n"); ++ while (--vector >= 0) { ++ usi_unset_notifier_for_vector(udev, vector); ++ } ++ udev->usi_vector_use_notifier = NULL; ++ udev->usi_vector_release_notifier = NULL; ++ return ret; ++} ++ ++int usi_enabled(UBDevice *udev) ++{ ++ uint64_t emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2_ENABLE_OFFSET, true); ++ uint32_t *mask = (uint32_t *)(udev->config + emulated_offset); ++ ++ return (*mask) & UB_CFG1_CAP4_INT_TYPE2_ENABLEBIT; ++} ++ ++USIMessage usi_get_message(UBDevice *udev, uint16_t vector) ++{ ++ USIMessage msg; ++ uint16_t addr_index; ++ uint8_t *vec_table_entry = NULL; ++ uint8_t *addr_table_entry = NULL; ++ uint8_t *valid_byte = NULL; ++ uint8_t valid_bit; ++ ++ vec_table_entry = udev->usi_vec_table + vector * USI_VEC_TABLE_ENTRY_SIZE; ++ msg.data = ub_get_long(vec_table_entry); ++ addr_index = ub_get_word(vec_table_entry + USI_VEC_TABLE_ADDR_INDEX_OFFSET); ++ if (addr_index >= udev->usi_addr_table_nr) { ++ qemu_log("address index exceed, the index is %u, total table num is %u\n", ++ addr_index, udev->usi_addr_table_nr); ++ addr_index = udev->usi_addr_table_nr - 1; ++ } ++ ++ addr_table_entry = udev->usi_addr_table + addr_index * USI_ADDR_TABLE_ENTRY_SIZE; ++ /* check addr table entry is valid */ ++ msg.address = ub_get_quad(addr_table_entry); ++ ++ valid_byte = addr_table_entry + USI_ADDR_TABLE_VALID_BIT_OFFSET; ++ valid_bit = ub_get_byte(valid_byte); ++ valid_bit = valid_bit & USI_ADDR_TABLE_VALID_BIT_MASK; ++ if (valid_bit == 0) { ++ qemu_log("invalid interrupt address table, the index is %u\n", addr_index); ++ } ++ ++ return msg; ++} ++ ++static uint8_t usi_pending_mask(uint16_t vector) ++{ ++ return 1 << (vector % 8); ++} ++ ++static uint8_t *usi_pending_byte(UBDevice *udev, uint16_t vector) ++{ ++ return udev->usi_pend_table + vector / 8; ++} ++ ++int usi_is_pending(UBDevice *udev, uint16_t vector) ++{ ++ return *usi_pending_byte(udev, vector) & usi_pending_mask(vector); ++} ++ ++void usi_set_pending(UBDevice *udev, uint16_t vector) ++{ ++ *usi_pending_byte(udev, vector) |= usi_pending_mask(vector); ++} ++ ++void usi_clr_pending(UBDevice *udev, uint16_t vector) ++{ ++ *usi_pending_byte(udev, vector) &= ~usi_pending_mask(vector); ++} ++ ++static void usi_fire_vector_notifier(UBDevice *udev, uint16_t vector, bool is_masked) ++{ ++ USIMessage msg; ++ ++ if (!udev->usi_vector_use_notifier) { ++ qemu_log("usi_vector_use_notifier not init, do nothing.\n"); ++ return; ++ } ++ ++ if (is_masked) { ++ qemu_log("udev(%s %s) vector(%u) masked.\n", ++ udev->name, udev->qdev.id, vector); ++ udev->usi_vector_release_notifier(udev, vector); ++ return; ++ } ++ ++ msg = usi_get_message(udev, vector); ++ udev->usi_vector_use_notifier(udev, vector, msg); ++} ++ ++static void usi_handle_mask_update(UBDevice *udev, uint16_t vector, bool was_masked) ++{ ++ bool is_masked = usi_is_masked(udev, vector); ++ ++ if (is_masked == was_masked) { ++ qemu_log("vector(%u) is_masked and was_masked equal, val is %d, " ++ "update do nothing.\n", vector, is_masked); ++ return; ++ } ++ ++ if (usi_ue_is_masked(udev)) { ++ qemu_log("function entity is masked, vector(%u) mask update do nothing.\n", vector); ++ return; ++ } ++ ++ usi_fire_vector_notifier(udev, vector, is_masked); ++ ++ if (!is_masked && usi_is_pending(udev, vector)) { ++ usi_clr_pending(udev, vector); ++ usi_notify(udev, vector); ++ } ++} ++ ++static uint64_t usi_vec_table_mmio_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ UBDevice *udev = opaque; ++ uint64_t val = UINT64_MAX; ++ ++ switch (size) { ++ case BYTE_SIZE: ++ val = ub_get_byte(udev->usi_vec_table + addr); ++ break; ++ case WORD_SIZE: ++ val = ub_get_word(udev->usi_vec_table + addr); ++ break; ++ case DWORD_SIZE: ++ val = ub_get_long(udev->usi_vec_table + addr); ++ break; ++ default: ++ qemu_log("uxpect usi vec table read size %u.\n", size); ++ break; ++ } ++ ++ qemu_log("vec table read: addr(0x%lx), size(%u) value(0x%lx).\n", addr, size, val); ++ return val; ++} ++ ++static void usi_vec_table_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) ++{ ++ UBDevice *udev = opaque; ++ uint16_t vector = addr / USI_VEC_TABLE_ENTRY_SIZE; ++ bool was_masked; ++ ++ was_masked = usi_is_masked(udev, vector); ++ switch (size) { ++ case BYTE_SIZE: ++ ub_set_byte(udev->usi_vec_table + addr, val); ++ break; ++ case WORD_SIZE: ++ ub_set_word(udev->usi_vec_table + addr, val); ++ break; ++ case DWORD_SIZE: ++ ub_set_long(udev->usi_vec_table + addr, val); ++ break; ++ default: ++ qemu_log("uxpect usi vec table write size %u.\n", size); ++ break; ++ } ++ ++ qemu_log("vec table update: addr(0x%lx), size(%u), val(0x%lx).\n", addr, size, val); ++ usi_handle_mask_update(udev, vector, was_masked); ++} ++ ++static const MemoryRegionOps usi_vec_table_mmio_ops = { ++ .read = usi_vec_table_mmio_read, ++ .write = usi_vec_table_mmio_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++}; ++ ++static uint64_t usi_addr_table_mmio_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ UBDevice *udev = opaque; ++ uint64_t val = UINT64_MAX; ++ ++ switch (size) { ++ case BYTE_SIZE: ++ val = ub_get_byte(udev->usi_addr_table + addr); ++ break; ++ case WORD_SIZE: ++ val = ub_get_word(udev->usi_addr_table + addr); ++ break; ++ case DWORD_SIZE: ++ val = ub_get_long(udev->usi_addr_table + addr); ++ break; ++ default: ++ qemu_log("uxpect usi addr table read size %u.\n", size); ++ break; ++ } ++ ++ qemu_log("addr table read: addr(0x%lx), size(%u) value(0x%lx).\n", addr, size, val); ++ return val; ++} ++ ++static void usi_addr_table_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) ++{ ++ UBDevice *udev = opaque; ++ ++ switch (size) { ++ case BYTE_SIZE: ++ ub_set_byte(udev->usi_addr_table + addr, val); ++ break; ++ case WORD_SIZE: ++ ub_set_word(udev->usi_addr_table + addr, val); ++ break; ++ case DWORD_SIZE: ++ ub_set_long(udev->usi_addr_table + addr, val); ++ break; ++ default: ++ qemu_log("uxpect usi addr table write size %u.\n", size); ++ break; ++ } ++ ++ qemu_log("usi addr table update: addr(0x%lx), size(%u), val(0x%lx).\n", ++ addr, size, val); ++} ++ ++static const MemoryRegionOps usi_addr_table_mmio_ops = { ++ .read = usi_addr_table_mmio_read, ++ .write = usi_addr_table_mmio_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ } ++}; ++ ++static uint64_t usi_pend_table_mmio_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ UBDevice *udev = opaque; ++ uint64_t val = UINT64_MAX; ++ ++ switch (size) { ++ case BYTE_SIZE: ++ val = ub_get_byte(udev->usi_pend_table + addr); ++ break; ++ case WORD_SIZE: ++ val = ub_get_word(udev->usi_pend_table + addr); ++ break; ++ case DWORD_SIZE: ++ val = ub_get_long(udev->usi_pend_table + addr); ++ break; ++ default: ++ qemu_log("expect usi pend addr table read size %u.\n", size); ++ break; ++ } ++ ++ qemu_log("pend table read: addr(0x%lx), size(%u) value(0x%lx).\n", addr, size, val); ++ ++ return val; ++} ++ ++static void usi_pend_table_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) ++{ ++ /* do nothing now */ ++} ++ ++static const MemoryRegionOps usi_pend_table_mmio_ops = { ++ .read = usi_pend_table_mmio_read, ++ .write = usi_pend_table_mmio_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ } ++}; ++ ++bool usi_is_masked(UBDevice *udev, uint16_t vector) ++{ ++ uint32_t offset = (uint32_t)vector * USI_VEC_TABLE_ENTRY_SIZE + USI_VEC_TABLE_MASK_OFFSET; ++ ++ return udev->usi_vec_table[offset] & USI_VEC_TABLE_MASKBIT; ++} ++ ++static void usi_mask_all(UBDevice *udev, uint16_t entries) ++{ ++ uint16_t vector; ++ uint32_t offset; ++ bool was_masked; ++ ++ for (vector = 0; vector < entries; vector++) { ++ offset = (uint32_t)vector * USI_VEC_TABLE_ENTRY_SIZE + USI_VEC_TABLE_MASK_OFFSET; ++ was_masked = usi_is_masked(udev, vector); ++ udev->usi_vec_table[offset] |= USI_VEC_TABLE_MASKBIT; ++ usi_handle_mask_update(udev, vector, was_masked); ++ } ++} ++ ++static void usi_set_disable(UBDevice *udev) ++{ ++ uint64_t emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2_ENABLE_OFFSET, true); ++ uint32_t *val = (uint32_t *)(udev->config + emulated_offset); ++ memset(val, 0, sizeof(uint32_t)); ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2_MASK_OFFSET, true); ++ val = (uint32_t *)(udev->config + emulated_offset); ++ memset(val, 0, sizeof(uint32_t)); ++ (*val) |= UB_CFG1_CAP4_INT_TYPE2_MASKBIT; ++ qemu_log("ub device(%s %s) disable usi\n", udev->name, udev->qdev.id); ++} ++ ++static void usi_clear_all_vectors(UBDevice *dev) ++{ ++ int vector; ++ ++ for (vector = 0; vector < dev->usi_entries_nr; ++vector) { ++ usi_clr_pending(dev, vector); ++ } ++} ++ ++void usi_reset(UBDevice *udev) ++{ ++ uint32_t pend_table_size = DIV_ROUND_UP(udev->usi_entries_nr, USI_PEND_TABLE_ENTRY_BIT_NUM) * ++ USI_PEND_TABLE_ENTRY_SIZE; ++ usi_clear_all_vectors(udev); ++ memset(udev->usi_vec_table, 0, udev->usi_entries_nr * USI_VEC_TABLE_ENTRY_SIZE); ++ memset(udev->usi_addr_table, 0, udev->usi_addr_table_nr * USI_ADDR_TABLE_ENTRY_SIZE); ++ memset(udev->usi_pend_table, 0, pend_table_size); ++ ++ usi_mask_all(udev, udev->usi_entries_nr); ++ usi_set_disable(udev); ++} ++ ++void usi_init(UBDevice *udev, uint16_t vec_table_num, uint16_t addr_table_num, ++ uint64_t vec_table_start_addr, uint64_t addr_table_start_addr, ++ uint64_t pend_table_start_addr, MemoryRegion *fer0_mr) ++{ ++ uint32_t vec_table_size, addr_table_size, pend_table_size; ++ ++ vec_table_size = (uint32_t)vec_table_num * USI_VEC_TABLE_ENTRY_SIZE; ++ addr_table_size = (uint32_t)addr_table_num * USI_ADDR_TABLE_ENTRY_SIZE; ++ pend_table_size = DIV_ROUND_UP(vec_table_num, USI_PEND_TABLE_ENTRY_BIT_NUM) * ++ USI_PEND_TABLE_ENTRY_SIZE; ++ ++ udev->usi_entries_nr = vec_table_num; ++ udev->usi_addr_table_nr = addr_table_num; ++ udev->usi_vec_table = g_malloc0(vec_table_size); ++ udev->usi_addr_table = g_malloc0(addr_table_size); ++ udev->usi_pend_table = g_malloc0(pend_table_size); ++ ++ usi_mask_all(udev, vec_table_num); ++ usi_set_disable(udev); ++ ++ memory_region_init_io(&udev->usi_vec_table_mmio, OBJECT(udev), &usi_vec_table_mmio_ops, ++ udev, "usi-vec-table", vec_table_size); ++ memory_region_add_subregion(fer0_mr, vec_table_start_addr, &udev->usi_vec_table_mmio); ++ memory_region_init_io(&udev->usi_addr_table_mmio, OBJECT(udev), &usi_addr_table_mmio_ops, ++ udev, "usi-addr-table", addr_table_size); ++ memory_region_add_subregion(fer0_mr, addr_table_start_addr, &udev->usi_addr_table_mmio); ++ memory_region_init_io(&udev->usi_pend_table_mmio, OBJECT(udev), &usi_pend_table_mmio_ops, ++ udev, "usi-pend-table", pend_table_size); ++ memory_region_add_subregion(fer0_mr, pend_table_start_addr, &udev->usi_pend_table_mmio); ++} ++ ++void usi_uninit(UBDevice *udev, MemoryRegion *fer0_mr) ++{ ++ g_free(udev->usi_vec_table); ++ memory_region_del_subregion(fer0_mr, &udev->usi_vec_table_mmio); ++ g_free(udev->usi_addr_table); ++ memory_region_del_subregion(fer0_mr, &udev->usi_addr_table_mmio); ++ g_free(udev->usi_pend_table); ++ memory_region_del_subregion(fer0_mr, &udev->usi_pend_table_mmio); ++} ++ + void usi_send_message(USIMessage *msg, uint32_t interrupt_id, UBDevice *udev) + { + MemTxAttrs attrs = {}; +@@ -36,3 +481,71 @@ void usi_send_message(USIMessage *msg, uint32_t interrupt_id, UBDevice *udev) + qemu_log("usi notify success: interrupt_id %u eventid %u gicv3_its 0x%lx\n", + interrupt_id, msg->data, msg->address); + } ++ ++void usi_notify(UBDevice *udev, uint16_t vector) ++{ ++ USIMessage msg; ++ ++ /* check vector is valid later */ ++ ++ if (usi_is_masked(udev, vector) || usi_ue_is_masked(udev)) { ++ usi_set_pending(udev, vector); ++ return; ++ } ++ ++ msg = usi_get_message(udev, vector); ++ usi_send_message(&msg, ub_interrupt_id(udev), udev); ++} ++ ++int usi_ue_is_masked(UBDevice *udev) ++{ ++ uint64_t emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2_MASK_OFFSET, true); ++ uint32_t *mask = (uint32_t *)(udev->config + emulated_offset); ++ ++ return (*mask) & UB_CFG1_CAP4_INT_TYPE2_MASKBIT; ++} ++ ++static void usi_ue_each_vector_update(UBDevice *udev, uint16_t vector, bool ue_is_masked) ++{ ++ USIMessage msg; ++ ++ if (usi_is_masked(udev, vector)) { ++ qemu_log("vector(%u) is masked, do nothing.\n", vector); ++ return; ++ } ++ ++ if (!udev->usi_vector_use_notifier) { ++ qemu_log("usi_vector_use_notifier not init, do nothing.\n"); ++ return; ++ } ++ ++ if (ue_is_masked) { ++ udev->usi_vector_release_notifier(udev, vector); ++ return; ++ } ++ ++ msg = usi_get_message(udev, vector); ++ udev->usi_vector_use_notifier(udev, vector, msg); ++ ++ if (usi_is_pending(udev, vector)) { ++ qemu_log("start udev(%s) vector(%u) pending interrupt notify.\n", udev->name, vector); ++ usi_clr_pending(udev, vector); ++ usi_notify(udev, vector); ++ } ++} ++ ++void usi_handle_ue_mask_update(UBDevice *udev, bool was_masked) ++{ ++ bool is_masked = usi_ue_is_masked(udev); ++ uint16_t vector; ++ ++ if (is_masked == was_masked) { ++ qemu_log("ue is_masked and was_masked equal, val is %d, " ++ "update do nothing.\n", is_masked); ++ return; ++ } ++ ++ for (vector = 0; vector < udev->usi_entries_nr; vector++) { ++ usi_ue_each_vector_update(udev, vector, is_masked); ++ } ++} +\ No newline at end of file +diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h +index d04170aa27..6dded7df43 100644 +--- a/include/exec/memattrs.h ++++ b/include/exec/memattrs.h +@@ -51,7 +51,7 @@ typedef struct MemTxAttrs { + */ + unsigned int memory:1; + /* Requester ID (for MSI for example) */ +- unsigned int requester_id:16; ++ unsigned int requester_id; + /* Invert endianness for this page */ + unsigned int byte_swap:1; + /* +diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h +index 7dc712b38d..e072c36cca 100644 +--- a/include/hw/intc/arm_gicv3_its_common.h ++++ b/include/hw/intc/arm_gicv3_its_common.h +@@ -117,7 +117,7 @@ struct GICv3ITSCommonClass { + SysBusDeviceClass parent_class; + /*< public >*/ + +- int (*send_msi)(GICv3ITSState *s, uint32_t data, uint16_t devid); ++ int (*send_msi)(GICv3ITSState *s, uint32_t data, uint32_t devid); + void (*pre_save)(GICv3ITSState *s); + void (*post_load)(GICv3ITSState *s); + }; +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index 800be61451..9a5d4c6c33 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -142,6 +142,9 @@ typedef void UBConfigReadFunc(UBDevice *dev, uint64_t offset, + uint32_t *val, uint32_t dw_mask); + typedef void UBConfigWriteFunc(UBDevice *dev, uint64_t offset, + uint32_t *val, uint32_t dw_mask); ++typedef int (*USIVectorUseNotifier)(UBDevice *udev, uint16_t vector, USIMessage msg); ++typedef void (*USIVectorReleaseNotifier)(UBDevice *udev, uint16_t vector); ++typedef void (*USIVectorPollNotifier)(UBDevice *dev, uint16_t vector_start, uint16_t vector_end); + + struct UBDevice { + DeviceState qdev; +@@ -165,6 +168,22 @@ struct UBDevice { + UBConfigWriteFunc *config_write; + int (* bus_instance_verify)(UBDevice *dev, Error **errp); + ++ /* usi entries */ ++ uint16_t usi_entries_nr; ++ uint16_t usi_addr_table_nr; ++ /* Space to store usi vec table & addr table & pending bit array */ ++ uint8_t *usi_vec_table; ++ uint8_t *usi_addr_table; ++ uint8_t *usi_pend_table; ++ /* MemoryRegion container for usi vec table & addr table & pending bit array */ ++ MemoryRegion usi_vec_table_mmio; ++ MemoryRegion usi_addr_table_mmio; ++ MemoryRegion usi_pend_table_mmio; ++ /* USI notifiers */ ++ USIVectorUseNotifier usi_vector_use_notifier; ++ USIVectorReleaseNotifier usi_vector_release_notifier; ++ USIVectorPollNotifier usi_vector_poll_notifier; ++ + QLIST_ENTRY(UBDevice) node; + }; + +diff --git a/include/hw/ub/ub_usi.h b/include/hw/ub/ub_usi.h +index 96332e5850..5fcefe8f7f 100644 +--- a/include/hw/ub/ub_usi.h ++++ b/include/hw/ub/ub_usi.h +@@ -24,6 +24,25 @@ struct USIMessage { + uint32_t data; + }; + ++void usi_init(UBDevice *udev, uint16_t vec_table_num, uint16_t addr_table_num, ++ uint64_t vec_table_start_addr, uint64_t addr_table_start_addr, ++ uint64_t pend_table_start_addr, MemoryRegion *fer0_mr); ++void usi_uninit(UBDevice *udev, MemoryRegion *fer0_mr); ++bool usi_is_masked(UBDevice *udev, uint16_t vector); ++USIMessage usi_get_message(UBDevice *udev, uint16_t vector); ++int usi_enabled(UBDevice *udev); ++int usi_set_vector_notifiers(UBDevice *udev, ++ USIVectorUseNotifier use_notifier, ++ USIVectorReleaseNotifier release_notifier, ++ USIVectorPollNotifier poll_notifier); ++void usi_unset_vector_notifiers(UBDevice *udev); ++void usi_notify(UBDevice *udev, uint16_t vector); ++int usi_is_pending(UBDevice *udev, uint16_t vector); ++void usi_set_pending(UBDevice *udev, uint16_t vector); ++void usi_clr_pending(UBDevice *udev, uint16_t vector); ++int usi_ue_is_masked(UBDevice *udev); ++void usi_handle_ue_mask_update(UBDevice *udev, bool was_masked); + void usi_send_message(USIMessage *msg, uint32_t interrupt_id, UBDevice *udev); ++void usi_reset(UBDevice *dev); + + #endif +-- +2.33.0 + diff --git a/ub-just-default-build-on-aarch64-machine.patch b/ub-just-default-build-on-aarch64-machine.patch new file mode 100644 index 0000000000000000000000000000000000000000..34a7e190997efbb47d7e040cee8ae71d2b006352 --- /dev/null +++ b/ub-just-default-build-on-aarch64-machine.patch @@ -0,0 +1,28 @@ +From c1ea8c0e5a21686a2f3158d3afb8cc3b5f443481 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 20 Nov 2025 11:04:53 +0800 +Subject: [PATCH 2/2] ub: just default build on aarch64 machine + +ub feature current just support for aarch64, dont build on other +platform + +Signed-off-by: caojinhuahw +--- + meson.build | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/meson.build b/meson.build +index b03869810f..458d8981cc 100644 +--- a/meson.build ++++ b/meson.build +@@ -583,6 +583,7 @@ config_host_data.set('CONFIG_HUGEPAGE_POD', have_hugepage_pod) + # ub + have_ub = get_option('ub') \ + .require(targetos == 'linux', error_message: 'UB is supported only on Linux') \ ++ .require(cpu == 'aarch64', error_message: 'UB is supported only on aarch64') \ + .allowed() + + if cpu in ['aarch64'] +-- +2.33.0 + diff --git a/ub-port-setup-ub-port-info.patch b/ub-port-setup-ub-port-info.patch new file mode 100644 index 0000000000000000000000000000000000000000..686c3f3766225182048c91f041e5b43f5ad4fb15 --- /dev/null +++ b/ub-port-setup-ub-port-info.patch @@ -0,0 +1,379 @@ +From 9b685111a67661e5bf5fb2866d7d8b376cd072ea Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 17:36:55 +0800 +Subject: [PATCH 3/5] ub port: setup ub port info + +config ub device port info after machine create done + +Signed-off-by: caojinhuahw +--- + hw/arm/virt.c | 6 +- + hw/ub/ub.c | 276 ++++++++++++++++++++++++++++++++++++++++ + include/hw/arm/virt.h | 1 - + include/hw/ub/ub.h | 4 +- + include/qemu/typedefs.h | 1 + + 5 files changed, 285 insertions(+), 3 deletions(-) + +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index 470a320bc6..de914a9136 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -2070,7 +2070,11 @@ void virt_machine_done(Notifier *notifier, void *data) + } + + fw_cfg_add_extra_pci_roots(vms->bus, vms->fw_cfg); +- ++#ifdef CONFIG_UB ++ if (ub_dev_finally_setup(vms, &error_fatal) < 0) { ++ exit(1); ++ } ++#endif // CONFIG_UB + virt_acpi_setup(vms); + virt_build_smbios(vms); + } +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index b6503c62e2..2b797dcf60 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -27,6 +27,7 @@ + #include "hw/ub/ub_config.h" + #include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_acpi.h" + #include "qemu/log.h" + #include "qapi/error.h" + #include "hw/ub/ub_bus.h" +@@ -515,6 +516,24 @@ BusControllerState *container_of_ubbus(UBBus *bus) + return NULL; + } + ++UBDevice *ub_find_device_by_id(const char *id) ++{ ++ BusControllerState *ubc = NULL; ++ UBDevice *dev = NULL; ++ ++ QLIST_FOREACH(ubc, &ub_bus_controllers, node) { ++ if (!ubc->bus->qbus.num_children) { ++ continue; ++ } ++ QLIST_FOREACH(dev, &ubc->bus->devices, node) { ++ if (dev && !strcmp(id, dev->qdev.id)) { ++ return dev; ++ } ++ } ++ } ++ return NULL; ++} ++ + UBDevice *ub_find_device_by_guid(UbGuid *guid) + { + BusControllerState *ubc = NULL; +@@ -532,3 +551,260 @@ UBDevice *ub_find_device_by_guid(UbGuid *guid) + } + return NULL; + } ++ ++// #pragma GCC push_options ++// #pragma GCC optimize ("O0") ++static void ub_config_set_port_basic(NeighborInfo *info, UBDevice *dev) ++{ ++ uint32_t port_idx = info->local_port_idx; ++ uint64_t emulated_offset; ++ ConfigPortBasic *port_basic = NULL; ++ ConfigPortBasic *port_basic_wmask = NULL; ++ ConfigPortBasic *port_basic_w1cmask = NULL; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_PORT_SLICE_START + port_idx * UB_PORT_SZ, true); ++ port_basic = (ConfigPortBasic *)(dev->config + emulated_offset); ++ port_basic_wmask = (ConfigPortBasic *)(dev->wmask + emulated_offset); ++ port_basic_w1cmask = (ConfigPortBasic *)(dev->w1cmask + emulated_offset); ++ memset(port_basic, 0, sizeof(ConfigPortBasic)); ++ memset(port_basic_wmask, 0, sizeof(ConfigPortBasic)); ++ memset(port_basic_w1cmask, 0, sizeof(ConfigPortBasic)); ++ /* slice header */ ++ port_basic->header.slice_version = UB_SLICE_VERSION; ++ port_basic->header.slice_used_size = UB_PORT_BASIC_SLICE_USED_SIZE; ++ /* port info */ ++ port_basic->port_info.port_type = 0; // physical port ++ port_basic->port_info.port_idx = port_idx & UINT16_MASK; ++ /* neighbor port info */ ++ port_basic->neighbor_port_info.neighbor_port_idx = info->neighbor_port_idx & UINT16_MASK; ++ port_basic->neighbor_port_info.neighbot_port_guid = info->neighbor_dev->guid; ++ port_basic->port_reset = 0; ++ ++ /* set wmask */ ++ port_basic_wmask->port_cna = ~0; ++ port_basic_wmask->port_reset = ~0; ++} ++// #pragma GCC pop_options ++ ++static int ub_dev_set_neighbor_dev_neighbor_info(uint32_t local_port_idx, ++ uint32_t neighbor_port_idx, UBDevice *local_dev, ++ UBDevice *neighbor_dev, Error **errp) ++{ ++ UbPortInfo *neighbor_port = &neighbor_dev->port; ++ ++ if (neighbor_port->port_num <= neighbor_port_idx) { ++ qemu_log("invalid neighbor port idx %u %u\n", ++ neighbor_port->port_num, neighbor_port_idx); ++ ++ error_setg(errp, "invalid neighbor port idx %u %u\n", ++ neighbor_port->port_num, neighbor_port_idx); ++ return -1; ++ } ++ ++ if (neighbor_port->neighbors[neighbor_port_idx].neighbor_dev) { ++ if (neighbor_port->neighbors[neighbor_port_idx].neighbor_dev != local_dev || ++ neighbor_port->neighbors[neighbor_port_idx].local_port_idx != neighbor_port_idx || ++ neighbor_port->neighbors[neighbor_port_idx].neighbor_port_idx != local_port_idx) { ++ qemu_log("The neighbor information of the two devices does not match " ++ "each other. \nPlease check your command line parameter port info:\n" ++ "%s set (%s:%u = %s:%u) BUT neighbor %s already set (%s:%u = %s:%u)\n", ++ local_dev->qdev.id, local_dev->qdev.id, local_port_idx, ++ neighbor_dev->qdev.id, neighbor_port_idx, ++ neighbor_dev->qdev.id, neighbor_dev->qdev.id, neighbor_port_idx, ++ neighbor_port->neighbors[neighbor_port_idx].neighbor_dev->qdev.id, ++ neighbor_port->neighbors[neighbor_port_idx].neighbor_port_idx); ++ ++ error_setg(errp, "The neighbor information of the two devices does not match " ++ "each other. \nPlease check your command line parameter port info:\n" ++ "%s set (%s:%u = %s:%u) BUT neighbor %s already set (%s:%u = %s:%u)\n", ++ local_dev->qdev.id, local_dev->qdev.id, local_port_idx, ++ neighbor_dev->qdev.id, neighbor_port_idx, ++ neighbor_dev->qdev.id, neighbor_dev->qdev.id, neighbor_port_idx, ++ neighbor_port->neighbors[neighbor_port_idx].neighbor_dev->qdev.id, ++ neighbor_port->neighbors[neighbor_port_idx].neighbor_port_idx); ++ return -1; ++ } ++ } ++ neighbor_port->neighbors[neighbor_port_idx].local_port_idx = neighbor_port_idx; ++ neighbor_port->neighbors[neighbor_port_idx].neighbor_port_idx = local_port_idx; ++ neighbor_port->neighbors[neighbor_port_idx].neighbor_dev = local_dev; ++ neighbor_port->port_info_exist = true; ++ ub_config_set_port_basic(&neighbor_port->neighbors[neighbor_port_idx], neighbor_dev); ++ return 0; ++} ++ ++static int ub_dev_set_neighbor_info(UBDevice *dev, Error **errp) ++{ ++ char *neighbor_info_str; ++ char neighbor_id[UB_DEV_ID_LEN] = {0}; ++ uint32_t local_port_idx; ++ uint32_t neighbor_port_idx; ++ UBDevice *neighbor_dev; ++ ++ neighbor_info_str = strtok(dev->port.neighbors_cmd, "+"); ++ while (neighbor_info_str != NULL) { ++ int ret = sscanf(neighbor_info_str, "%u:%[^:]:%u", ++ &local_port_idx, neighbor_id, &neighbor_port_idx); ++ neighbor_info_str = strtok(NULL, "+"); ++ if (ret < 3) { ++ qemu_log("port info format is incorrect %s\n", neighbor_info_str); ++ error_setg(errp, "port info format is incorrect %s\n", neighbor_info_str); ++ g_free(dev->port.neighbors_cmd); ++ dev->port.neighbors_cmd = NULL; ++ return -1; ++ } ++ if (local_port_idx >= dev->port.port_num) { ++ qemu_log("%s local port info is illegal, port idx:%u port num %u\n", ++ dev->qdev.id, local_port_idx, dev->port.port_num); ++ error_setg(errp, "%s local port info is illegal, port idx:%u port num %u\n", ++ dev->qdev.id, local_port_idx, dev->port.port_num); ++ g_free(dev->port.neighbors_cmd); ++ dev->port.neighbors_cmd = NULL; ++ return -1; ++ } ++ ++ neighbor_dev = ub_find_device_by_id(neighbor_id); ++ if (neighbor_dev == NULL) { ++ qemu_log("%s:%u neighbor_dev not exist %s\n", ++ dev->qdev.id, local_port_idx, neighbor_id); ++ error_setg(errp, "%s:%u neighbor_dev not exist %s\n", ++ dev->qdev.id, local_port_idx, neighbor_id); ++ g_free(dev->port.neighbors_cmd); ++ dev->port.neighbors_cmd = NULL; ++ return -1; ++ } ++ if (neighbor_dev == dev) { ++ qemu_log("%s can not connect to itself\n", dev->qdev.id); ++ error_setg(errp, "%s can not connect to itself\n", dev->qdev.id); ++ g_free(dev->port.neighbors_cmd); ++ dev->port.neighbors_cmd = NULL; ++ return -1; ++ } ++ if (neighbor_port_idx >= neighbor_dev->port.port_num) { ++ qemu_log("%s neighbor port info is illegal, port idx:%u port num %u\n", ++ dev->qdev.id, neighbor_port_idx, neighbor_dev->port.port_num); ++ error_setg(errp, "%s neighbor port info is illegal, port idx:%u port num %u\n", ++ dev->qdev.id, neighbor_port_idx, neighbor_dev->port.port_num); ++ g_free(dev->port.neighbors_cmd); ++ dev->port.neighbors_cmd = NULL; ++ return -1; ++ } ++ /* ub device can only connect with ub controller or ub switch */ ++ if ((dev->dev_type & UB_TYPE_DEVICE) && ++ !(neighbor_dev->dev_type & (UB_TYPE_SWITCH | UB_TYPE_ISWITCH | UB_TYPE_IBUS_CONTROLLER))) { ++ qemu_log("%s can not connect with %s, ub device can only connect with " ++ "ub controller or ub switch\n", dev->qdev.id, neighbor_dev->qdev.id); ++ error_setg(errp,"%s can not connect with %s ub device can only connect with " ++ "ub controller or ub switch\n", dev->qdev.id, neighbor_dev->qdev.id); ++ g_free(dev->port.neighbors_cmd); ++ dev->port.neighbors_cmd = NULL; ++ return -1; ++ } ++ /* Check whether the neighbor information of the two ends matches. */ ++ if (dev->port.neighbors[local_port_idx].neighbor_dev) { ++ if (dev->port.neighbors[local_port_idx].neighbor_dev != neighbor_dev || ++ dev->port.neighbors[local_port_idx].local_port_idx != local_port_idx || ++ dev->port.neighbors[local_port_idx].neighbor_port_idx != neighbor_port_idx) { ++ qemu_log("The neighbor information of the two devices does not match " ++ "each other. \nPlease check your command line parameter port info:\n" ++ "%s set (%s:%u = %s:%u) BUT %s set (%s:%u = %s:%u)\n", ++ dev->qdev.id, dev->qdev.id, local_port_idx, ++ neighbor_dev->qdev.id, neighbor_port_idx, ++ dev->port.neighbors[local_port_idx].neighbor_dev->qdev.id, ++ dev->port.neighbors[local_port_idx].neighbor_dev->qdev.id, ++ dev->port.neighbors[local_port_idx].neighbor_port_idx, ++ dev->qdev.id, ++ dev->port.neighbors[local_port_idx].local_port_idx); ++ ++ error_setg(errp, "The neighbor information of the two devices does not match " ++ "each other. \nPlease check your command line parameter port info:\n" ++ "%s set (%s:%u = %s:%u) BUT %s set (%s:%u = %s:%u)\n", ++ dev->qdev.id, dev->qdev.id, local_port_idx, ++ neighbor_dev->qdev.id, neighbor_port_idx, ++ dev->port.neighbors[local_port_idx].neighbor_dev->qdev.id, ++ dev->port.neighbors[local_port_idx].neighbor_dev->qdev.id, ++ dev->port.neighbors[local_port_idx].neighbor_port_idx, ++ dev->qdev.id, ++ dev->port.neighbors[local_port_idx].local_port_idx); ++ ++ g_free(dev->port.neighbors_cmd); ++ dev->port.neighbors_cmd = NULL; ++ return -1; ++ } ++ } ++ dev->port.neighbors[local_port_idx].local_port_idx = local_port_idx; ++ dev->port.neighbors[local_port_idx].neighbor_port_idx = neighbor_port_idx; ++ dev->port.neighbors[local_port_idx].neighbor_dev = neighbor_dev; ++ dev->port.port_info_exist = true; ++ /* set remote neighbor_dev */ ++ if (ub_dev_set_neighbor_dev_neighbor_info(local_port_idx, neighbor_port_idx, dev, ++ neighbor_dev, errp) < 0) { ++ g_free(dev->port.neighbors_cmd); ++ dev->port.neighbors_cmd = NULL; ++ return -1; ++ } ++ ub_config_set_port_basic(&dev->port.neighbors[local_port_idx], dev); ++ } ++ g_free(dev->port.neighbors_cmd); ++ dev->port.neighbors_cmd = NULL; ++ return 0; ++} ++ ++static int ub_dev_init_port_info_by_cmd(Error **errp) ++{ ++ BusControllerState *ubc = NULL; ++ UBDevice *dev = NULL; ++ ++ QLIST_FOREACH(ubc, &ub_bus_controllers, node) { ++ if (!ubc->bus->qbus.num_children) { ++ continue; ++ } ++ ++ QLIST_FOREACH(dev, &ubc->bus->devices, node) { ++ if (dev && dev->port.neighbors_cmd) { ++ if (ub_dev_set_neighbor_info(dev, errp) < 0) { ++ return -1; ++ } ++ qemu_log("finish set_neighbor_info, eid:%u\n", dev->eid); ++ } ++ } ++ } ++ /* Check whether any device port info does not exist */ ++ QLIST_FOREACH(ubc, &ub_bus_controllers, node) { ++ if (!ubc->bus->qbus.num_children) { ++ continue; ++ } ++ QLIST_FOREACH(dev, &ubc->bus->devices, node) { ++ if (dev->dev_type != UB_TYPE_DEVICE && dev->dev_type != UB_TYPE_IDEVICE) { ++ continue; ++ } ++ ++ if (dev && !dev->port.port_info_exist) { ++ qemu_log("%s port info does not exist.\n", dev->qdev.id); ++ error_setg(errp, "%s port info does not exist.\n", dev->qdev.id); ++ return -1; ++ } ++ } ++ } ++ return 0; ++} ++ ++/* ++ * now all ub device add, finally setup for all ub device. ++ * 1. check ub device bus instance type ++ * 2. init the port info ++ * */ ++int ub_dev_finally_setup(VirtMachineState *vms, Error **errp) ++{ ++ /* ++ * Initialize the port information of all UB devices according ++ * to the input information after all UB devices are constructed. ++ */ ++ if (ub_dev_init_port_info_by_cmd(errp) < 0) { ++ return -1; ++ } ++ ++ ub_set_ubinfo_in_ubc_table(vms); ++ ++ return 0; ++} +\ No newline at end of file +diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h +index 7f0d3ed39d..29cc5fa0c4 100644 +--- a/include/hw/arm/virt.h ++++ b/include/hw/arm/virt.h +@@ -39,7 +39,6 @@ + #include "sysemu/kvm.h" + #include "hw/intc/arm_gicv3_common.h" + #include "qom/object.h" +-#include "hw/ub/ub_bus.h" + + #define NUM_GICV2M_SPIS 64 + #define NUM_VIRTIO_TRANSPORTS 32 +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index 2f408d874d..858824220c 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -20,6 +20,7 @@ + #include + #include "qemu/typedefs.h" + #include "exec/memory.h" ++#include "hw/arm/virt.h" + + #define BYTE_SIZE 1 + #define WORD_SIZE 2 +@@ -226,5 +227,6 @@ void ub_default_read_config(UBDevice *dev, uint64_t offset, + void ub_default_write_config(UBDevice *dev, uint64_t offset, + uint32_t *val, uint32_t dw_mask); + UBDevice *ub_find_device_by_guid(UbGuid *guid); +- ++int ub_dev_finally_setup(VirtMachineState *vms, Error **errp); ++UBDevice *ub_find_device_by_id(const char *id); + #endif +diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h +index 18d0f21ee2..a1b15dd219 100644 +--- a/include/qemu/typedefs.h ++++ b/include/qemu/typedefs.h +@@ -140,6 +140,7 @@ typedef struct VMStateDescription VMStateDescription; + + /* UB typedef */ + typedef struct UBDevice UBDevice; ++typedef struct UBBus UBBus; + + /* + * Pointer types +-- +2.33.0 + diff --git a/ub-prepare-some-function-for-later-vfio-ub-realize.patch b/ub-prepare-some-function-for-later-vfio-ub-realize.patch new file mode 100644 index 0000000000000000000000000000000000000000..b776306bad6deceaaf164598c59e9c97cf9b155f --- /dev/null +++ b/ub-prepare-some-function-for-later-vfio-ub-realize.patch @@ -0,0 +1,196 @@ +From 960234ba49dcbebfe1fd92279f6743e58d4d6178 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 14 Nov 2025 09:55:31 +0800 +Subject: [PATCH 4/5] ub: prepare some function for later vfio-ub realize + +add ers register and iommu operator help functions + +Signed-off-by: caojinhuahw +--- + hw/ub/ub.c | 129 +++++++++++++++++++++++++++++++++++++++++---- + include/hw/ub/ub.h | 7 +++ + 2 files changed, 126 insertions(+), 10 deletions(-) + +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index fbfedb6368..45a2c84968 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -691,16 +691,6 @@ BusControllerState *container_of_ubbus(UBBus *bus) + return NULL; + } + +-AddressSpace *ub_device_iommu_address_space(UBDevice *dev) +-{ +- UBBus *bus = ub_get_bus(dev); +- +- if (bus->iommu_ops && bus->iommu_ops->get_address_space) { +- return bus->iommu_ops->get_address_space(bus, bus->iommu_opaque, dev->eid); +- } +- return &address_space_memory; +-} +- + UBDevice *ub_find_device_by_id(const char *id) + { + BusControllerState *ubc = NULL; +@@ -974,6 +964,87 @@ static int ub_dev_init_port_info_by_cmd(Error **errp) + return 0; + } + ++bool ub_guid_initialized(UbGuid *guid) ++{ ++ if (!guid->vendor && !guid->type && !guid->version && ++ !guid->device_id && !guid->rsv && !guid->seq_num) { ++ return false; ++ } else { ++ return true; ++ } ++} ++ ++AddressSpace *ub_device_iommu_address_space(UBDevice *dev) ++{ ++ UBBus *bus = ub_get_bus(dev); ++ ++ if (bus->iommu_ops && bus->iommu_ops->get_address_space) { ++ return bus->iommu_ops->get_address_space(bus, bus->iommu_opaque, dev->eid); ++ } ++ return &address_space_memory; ++} ++ ++int ub_device_set_iommu_device(UBDevice *dev, HostIOMMUDevice *hoid, Error **errp) ++{ ++ UBBus *bus = ub_get_bus(dev); ++ ++ if (bus->iommu_ops && bus->iommu_ops->set_iommu_device) { ++ return bus->iommu_ops->set_iommu_device(bus, bus->iommu_opaque, dev->eid, hoid, errp); ++ } ++ ++ return 0; ++} ++ ++void ub_device_unset_iommu_device(UBDevice *dev) ++{ ++ UBBus *bus = ub_get_bus(dev); ++ ++ if (bus->iommu_ops && bus->iommu_ops->unset_iommu_device) { ++ bus->iommu_ops->unset_iommu_device(bus, bus->iommu_opaque, dev->eid); ++ } ++} ++ ++bool ub_device_check_ummu_is_nested(UBDevice *dev) ++{ ++ UBBus *bus = ub_get_bus(dev); ++ ++ if (bus->iommu_ops && bus->iommu_ops->ummu_is_nested) { ++ return bus->iommu_ops->ummu_is_nested(bus->iommu_opaque); ++ } ++ ++ return false; ++} ++ ++void ub_register_ers(UBDevice *dev, uint8_t region_num, MemoryRegion *memory) ++{ ++ UBIORegion *r; ++ UbCfg1Basic *cfg1_basic_wmask; ++ uint64_t size = memory_region_size(memory); ++ uint64_t emulated_offset; ++ uint64_t wmask; ++ ++ if (region_num >= UB_NUM_REGIONS) { ++ qemu_log("invalid region_num %u\n", region_num); ++ return; ++ } ++ if (!is_power_of_2(size)) { ++ qemu_log("region %u is_power_of_2 check failed! size 0x%"PRIx64"\n", ++ region_num, size); ++ return; ++ } ++ ++ r = &dev->io_regions[region_num]; ++ r->addr = UINT64_MAX; ++ r->size = size; ++ r->memory = memory; ++ r->address_space = ub_get_bus(dev)->address_space_mem; ++ wmask = ~(size - 1); ++ /* Mark that the ers is RW */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic_wmask = (UbCfg1Basic *)(dev->wmask + emulated_offset); ++ ub_set_quad((uint8_t *)&cfg1_basic_wmask->ers_ubba[region_num], wmask); ++} ++ + uint32_t ub_interrupt_id(UBDevice *udev) + { + uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2, true); +@@ -1045,3 +1116,41 @@ uint32_t ub_dev_get_ueid(UBDevice *udev) + uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_DEV_UEID_OFFSET, true); + return *(uint32_t *)(udev->config + offset); + } ++ ++enum UbDeviceType ub_dev_get_type(UBDevice *udev) ++{ ++ uint64_t offset; ++ UbCfg1Basic *cfg1; ++ int baseCode; ++ ++ if (udev == NULL) { ++ return UB_TYPE_UNINIT; ++ } ++ ++ offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1 = (UbCfg1Basic *)(udev->config + offset); ++ baseCode = cfg1->class_code & UB_GUID_BASE_CODE_MASK; ++ ++ switch (udev->guid.type) { ++ case UB_GUID_TYPE_BUS_INSTANCE: ++ return UB_TYPE_BUS_INSTANCE; ++ case UB_GUID_TYPE_BUS_CONTROLLER: ++ if (baseCode == UB_GUID_BASE_INSTANCE) { ++ return UB_TYPE_UNINIT; ++ } else { ++ return UB_TYPE_DEVICE; ++ } ++ case UB_GUID_TYPE_IBUS_CONTROLLER: ++ if (baseCode == UB_GUID_BASE_INSTANCE) { ++ return UB_TYPE_IBUS_CONTROLLER; ++ } else { ++ return UB_TYPE_IDEVICE; ++ } ++ case UB_GUID_TYPE_SWITCH: ++ return UB_TYPE_SWITCH; ++ case UB_GUID_TYPE_ISWITCH: ++ return UB_TYPE_ISWITCH; ++ default: ++ return UB_TYPE_UNINIT; ++ } ++} +\ No newline at end of file +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index ca2a54d845..800be61451 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -53,6 +53,7 @@ typedef struct __attribute__ ((__packed__)) UbGuid { + unsigned int device_id : 16; + unsigned int vendor : 16; + } UbGuid; ++bool ub_guid_initialized(UbGuid *guid); + #define UB_DEV_GUID_STRING_LENGTH 37 + void ub_device_get_str_from_guid(UbGuid *guid, char *guid_str, uint32_t str_len); + bool ub_device_get_guid_from_str(UbGuid *guid, char *guid_str); +@@ -253,9 +254,15 @@ static inline uint64_t ub_config_size(void) + return UB_DEV_CONFIG_SPACE_TOTAL_SIZE; + } + AddressSpace *ub_device_iommu_address_space(UBDevice *dev); ++int ub_device_set_iommu_device(UBDevice *dev, HostIOMMUDevice *hoid, Error **errp); ++void ub_device_unset_iommu_device(UBDevice *dev); ++bool ub_device_check_ummu_is_nested(UBDevice *dev); + UBDevice *ub_find_device_by_id(const char *id); ++void ub_register_ers(UBDevice *dev, uint8_t region_num, ++ MemoryRegion *memory); + uint32_t ub_interrupt_id(UBDevice *udev); + void ub_setup_iommu(UBBus *bus, const UBIOMMUOps *ops, void *opaque); + uint32_t ub_dev_get_token_id(UBDevice *udev); + uint32_t ub_dev_get_ueid(UBDevice *udev); ++enum UbDeviceType ub_dev_get_type(UBDevice *udev); + #endif +-- +2.33.0 + diff --git a/ub-realize-base-ub-device-framework.patch b/ub-realize-base-ub-device-framework.patch new file mode 100644 index 0000000000000000000000000000000000000000..4107ff893fd1ed1f110b62d77352b244abc57011 --- /dev/null +++ b/ub-realize-base-ub-device-framework.patch @@ -0,0 +1,1025 @@ +From b6653818d7b0a2f3eccb6c9eb006adc9f9452a6f Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Mon, 10 Nov 2025 20:30:00 +0800 +Subject: [PATCH] ub: realize base ub device framework +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1、add UBDevice struct and some base header file define +2、support UBDeivce properties config + +Signed-off-by: caojinhuahw +--- + hw/core/qdev-properties-system.c | 214 +++++++++++++++++++++ + hw/ub/meson.build | 1 + + hw/ub/ub.c | 162 ++++++++++++++++ + include/hw/qdev-properties-system.h | 20 ++ + include/hw/ub/ub.h | 156 +++++++++++++++ + include/hw/ub/ub_bus.h | 39 ++++ + include/hw/ub/ub_common.h | 288 ++++++++++++++++++++++++++++ + include/qemu/typedefs.h | 3 + + linux-headers/linux/vfio.h | 15 ++ + 9 files changed, 898 insertions(+) + create mode 100644 hw/ub/ub.c + create mode 100644 include/hw/ub/ub_bus.h + create mode 100644 include/hw/ub/ub_common.h + +diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c +index 9cc2e38aba..4643ef1fbd 100644 +--- a/hw/core/qdev-properties-system.c ++++ b/hw/core/qdev-properties-system.c +@@ -23,6 +23,7 @@ + #include "qemu/cutils.h" + #include "qemu/units.h" + #include "qemu/uuid.h" ++#include "qemu/id.h" + #include "qemu/error-report.h" + #include "qdev-prop-internal.h" + +@@ -35,6 +36,10 @@ + #include "hw/pci/pcie.h" + #include "hw/i386/x86.h" + #include "util/block-helpers.h" ++#ifdef CONFIG_UB ++#include "hw/ub/ub.h" ++#include "qemu/log.h" ++#endif // CONFIG_UB + + static bool check_prop_still_unset(Object *obj, const char *name, + const void *old_val, const char *new_val, +@@ -1235,3 +1240,212 @@ const PropertyInfo qdev_prop_cpus390entitlement = { + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, + }; ++ ++#ifdef CONFIG_UB ++/* --- ub host address --- */ ++static void get_ub_host_devaddr(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ Property *prop = opaque; ++ UBHostDeviceAddress *addr = object_field_prop_ptr(obj, prop); ++ char buffer[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ char *p = buffer; ++ ub_device_get_str_from_guid(&addr->guid, buffer, sizeof(buffer)); ++ ++ visit_type_str(v, name, &p, errp); ++} ++ ++static void set_ub_host_devaddr(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ Property *prop = opaque; ++ UBHostDeviceAddress *addr = object_field_prop_ptr(obj, prop); ++ char *str; ++ if (!visit_type_str(v, name, &str, errp)) { ++ return; ++ } ++ ++ if (!ub_device_get_guid_from_str(&addr->guid, str)) { ++ qemu_log("host set failed, current: %s," ++ "example: %s\n", str, GUID_STR_EXAMPLE); ++ error_setg(errp, "host set failed, current: %s," ++ " example: %s\n", str, GUID_STR_EXAMPLE); ++ } ++ g_free(str); ++} ++ ++const PropertyInfo qdev_prop_ub_host_devaddr = { ++ .name = "str", ++ .description = "Address ub_guid(128bit) of the host device. example: "GUID_STR_EXAMPLE, ++ .get = get_ub_host_devaddr, ++ .set = set_ub_host_devaddr, ++}; ++ ++/* --- ub device guid --- */ ++static void ub_dev_get_guid(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ Property *prop = opaque; ++ UbGuid *guid = object_field_prop_ptr(obj, prop); ++ g_autofree char *guid_str = g_malloc0(UB_DEV_GUID_STRING_LENGTH + 1); ++ ++ ub_device_get_str_from_guid(guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ visit_type_str(v, name, &guid_str, errp); ++} ++ ++static void ub_dev_set_guid(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ Property *prop = opaque; ++ UbGuid *guid = object_field_prop_ptr(obj, prop); ++ char *str; ++ if (!visit_type_str(v, name, &str, errp)) { ++ return; ++ } ++ ++ if (!ub_device_get_guid_from_str(guid, str)) { ++ qemu_log("guid set failed, current: %s," ++ " example: %s\n", str, GUID_STR_EXAMPLE); ++ error_setg(errp, "guid set failed, current: %s," ++ " example: %s\n", str, GUID_STR_EXAMPLE); ++ } ++ g_free(str); ++} ++ ++const PropertyInfo qdev_prop_ub_dev_guid = { ++ .name = "str", ++ .description = "guid(128bit) of the ub device. example: "GUID_STR_EXAMPLE, ++ .get = ub_dev_get_guid, ++ .set = ub_dev_set_guid, ++}; ++ ++/* --- ub device neighbor info --- */ ++static void ub_dev_get_neighbor_cmd(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ Property *prop = opaque; ++ UbPortInfo *port = object_field_prop_ptr(obj, prop); ++ g_autofree char *buffer = g_malloc0(64); ++ int i; ++ uint32_t input_port; ++ ++ if (sscanf(name, "port%u", &input_port) != 1) { ++ return; ++ } ++ ++ for (i = 0; i < port->port_num; i++) { ++ if (!port->port_info_exist) { ++ continue; ++ } ++ if (port->neighbors->local_port_idx == input_port) { ++ snprintf(buffer, 64, "remote %s port %u", ++ port->neighbors->neighbor_dev ? ++ port->neighbors->neighbor_dev->qdev.id : "", ++ port->neighbors->neighbor_port_idx); ++ break; ++ } ++ } ++ ++ visit_type_str(v, name, &buffer, errp); ++} ++ ++static void ub_dev_set_neighbor_cmd(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ Property *prop = opaque; ++ UbPortInfo *port = object_field_prop_ptr(obj, prop); ++ char *str; ++ char *origin; ++ uint64_t rport_id; ++ uint64_t lport_id; ++ gchar **substrings; ++ ++ if (!visit_type_str(v, name, &str, errp)) { ++ return; ++ } ++ ++ if (sscanf(name, "port%lu", &lport_id) != 1) { ++ qemu_log("failed to get port id %s\n", str); ++ g_free(str); ++ return; ++ } ++ ++ substrings = g_strsplit(str, ":", 2); ++ if (!substrings || !substrings[0] || !substrings[1]) { ++ error_setg(errp, "remote neighbor info '%s' doesn't contain ':' ", str); ++ g_free(str); ++ g_strfreev(substrings); ++ return; ++ } ++ ++ if (sscanf(substrings[1], "%lu", &rport_id) != 1 || ++ !id_wellformed(substrings[0])) { ++ qemu_log("failed to parse remote neighber info %s %s\n", ++ substrings[0], substrings[1]); ++ error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "remote device id", "an identifier"); ++ error_append_hint(errp, "Identifiers consist of letters, digits, " ++ "'-', '.', '_', starting with a letter. \n" ++ "remote info:%s parse failed: %s %s\n", ++ str, substrings[0], substrings[1]); ++ g_free(str); ++ g_strfreev(substrings); ++ return; ++ } ++ ++ origin = port->neighbors_cmd; ++ if (origin != NULL) { ++ port->neighbors_cmd = g_strdup_printf("%s+%lu:%s", origin, lport_id, str); ++ g_free(origin); ++ } else { ++ port->neighbors_cmd = g_strdup_printf("%lu:%s", lport_id, str); ++ } ++ ++ qemu_log("%s = %s\n", name, str); ++ g_free(str); ++ g_strfreev(substrings); ++} ++ ++const PropertyInfo qdev_prop_ub_dev_neighbor_info = { ++ .name = "str", ++ .description = "port remote neighbor info. " ++ "example: mydev1:1(id:portIdx)", ++ .get = ub_dev_get_neighbor_cmd, ++ .set = ub_dev_set_neighbor_cmd, ++}; ++ ++/* --- ub device port num --- */ ++static void ub_dev_get_port_num(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ Property *prop = opaque; ++ UbPortInfo *port = object_field_prop_ptr(obj, prop); ++ ++ visit_type_uint32(v, name, &port->port_num, errp); ++} ++ ++static void ub_dev_set_port_num(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ Property *prop = opaque; ++ UbPortInfo *port = object_field_prop_ptr(obj, prop); ++ int port_num; ++ Error *local_err = NULL; ++ ++ if (!visit_type_int32(v, name, &port_num, errp)) { ++ return; ++ } ++ if ((port_num <= 0) || (port_num > UB_DEV_MAX_NUM_OF_PORT)) { ++ error_setg(&local_err, "illegal port num: %d, set port num bettwen 1 to %u", ++ port_num, UB_DEV_MAX_NUM_OF_PORT); ++ error_propagate(errp, local_err); ++ } ++ port->port_num = port_num; ++} ++ ++const PropertyInfo qdev_prop_ub_dev_port_num = { ++ .name = "str", ++ .description = "number of the ub device ports. ", ++ .get = ub_dev_get_port_num, ++ .set = ub_dev_set_port_num, ++}; ++#endif // CONFIG_UB +diff --git a/hw/ub/meson.build b/hw/ub/meson.build +index b6d5f4beff..39fd4b7c77 100644 +--- a/hw/ub/meson.build ++++ b/hw/ub/meson.build +@@ -1,5 +1,6 @@ + ub_ss = ss.source_set() + ub_ss.add(files( ++ 'ub.c', + 'ub_ubc.c', + )) + system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss) +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +new file mode 100644 +index 0000000000..0c494fc9f9 +--- /dev/null ++++ b/hw/ub/ub.c +@@ -0,0 +1,162 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "qemu/module.h" ++#include "qemu/cutils.h" ++#include "qemu/range.h" ++#include "qemu/bitmap.h" ++#include "hw/arm/virt.h" ++#include "hw/qdev-properties.h" ++#include "hw/qdev-properties-system.h" ++#include "hw/ub/ub_common.h" ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_bus.h" ++#include "hw/ub/ub_ubc.h" ++#include "qemu/log.h" ++#include "qapi/error.h" ++ ++static UBDevice *do_ub_register_device(UBDevice *ub_dev, const char *name, Error **errp) ++{ ++ return NULL; ++} ++ ++static void do_ub_unregister_device(UBDevice *ub_dev) ++{ ++} ++ ++static void ub_qdev_realize(DeviceState *qdev, Error **errp) ++{ ++ UBDevice *ub_dev = (UBDevice *)qdev; ++ UBDeviceClass *uc = UB_DEVICE_GET_CLASS(ub_dev); ++ Error *local_err = NULL; ++ ++ ub_dev->dev_type = UB_TYPE_UNINIT; ++ ub_dev->bus_instance_eid = UINT32_MAX; ++ ub_dev->rst_cnt = 0; ++ ub_dev->host_dev = false; ++ ub_dev = do_ub_register_device(ub_dev, ++ object_get_typename(OBJECT(qdev)), errp); ++ if (ub_dev == NULL) { ++ return; ++ } ++ ++ if (uc->realize) { ++ uc->realize(ub_dev, &local_err); ++ if (local_err) { ++ error_propagate(errp, local_err); ++ do_ub_unregister_device(ub_dev); ++ return; ++ } ++ } ++} ++ ++static void ub_qdev_unrealize(DeviceState *dev) ++{ ++} ++#define DECLARE_PORT_INFO(n) \ ++ DEFINE_PROP_UB_DEV_NEIGHBOR_INFO("port"#n, UBDevice, port), ++static Property ub_props[] = { ++ DEFINE_PROP_UINT32("eid", UBDevice, eid, 0), ++ DEFINE_PROP_UB_DEV_GUID("guid", UBDevice, guid), ++ DEFINE_PROP_UB_DEV_PORT_NUM("portnum", UBDevice, port), ++ /* max port num UB_DEV_MAX_NUM_OF_PORT(256) ++ * port id start with 0, so here set 255 ++ */ ++ LOOP(DECLARE_PORT_INFO, 255) ++ DEFINE_PROP_END_OF_LIST() ++}; ++ ++static void ub_device_class_init(ObjectClass *klass, void *data) ++{ ++ DeviceClass *k = DEVICE_CLASS(klass); ++ ++ k->realize = ub_qdev_realize; ++ k->unrealize = ub_qdev_unrealize; ++ k->bus_type = TYPE_UB_BUS; ++ device_class_set_props(k, ub_props); ++} ++ ++static const TypeInfo ub_device_type_info = { ++ .name = TYPE_UB_DEVICE, ++ .parent = TYPE_DEVICE, ++ .instance_size = sizeof(UBDevice), ++ .abstract = true, ++ .class_size = sizeof(UBDeviceClass), ++ .class_init = ub_device_class_init, ++}; ++ ++static void ub_register_types(void) ++{ ++ type_register_static(&ub_device_type_info); ++} ++ ++type_init(ub_register_types) ++ ++/* guid format: ++ * vendor:device id:version:type:rsv:sequence number ++ * 16 16 4 4 24 64 (bits) ++ */ ++void ub_device_get_str_from_guid(UbGuid *guid, char *guid_str, uint32_t str_len) ++{ ++ uint32_t len = UB_DEV_GUID_STRING_LENGTH + 1; ++ int ret; ++ ++ if (str_len < UB_DEV_GUID_STRING_LENGTH + 1) { ++ qemu_log("expect str_len(%u) < guid_str_len(%u), guid " ++ "to str will be truncate.\n", str_len, UB_DEV_GUID_STRING_LENGTH + 1); ++ len = str_len; ++ } ++ ret = snprintf(guid_str, len, "%04x-%04x-%01x-%01x-%06x-%016lx", ++ guid->vendor, guid->device_id, ++ guid->version, guid->type, guid->rsv, ++ (guid->seq_num & 0xFFFFFFFFFFFFFFFF)); ++ if (ret < 0) { ++ qemu_log("get str from ub device guid fail.\n"); ++ } ++} ++ ++#define UB_GUID_ELEMENT_NUM 6 ++bool ub_device_get_guid_from_str(UbGuid *guid, char *guid_str) ++{ ++ unsigned long seq_num; ++ unsigned int device_id; ++ unsigned int version; ++ unsigned int type; ++ unsigned int vendor; ++ unsigned int rsv; ++ int ret; ++ ++ if (strlen(guid_str) != UB_DEV_GUID_STRING_LENGTH) { ++ qemu_log("expect guid len is %d, but current guid len is %ld\n", ++ UB_DEV_GUID_STRING_LENGTH, strlen(guid_str)); ++ return false; ++ } ++ ++ ret = sscanf(guid_str, "%04x-%04x-%01x-%01x-%06x-%016lx", ++ &vendor, &device_id, &version, &type, &rsv, &seq_num); ++ if (ret != UB_GUID_ELEMENT_NUM) { ++ qemu_log("guid format is incorrect, example: " GUID_STR_EXAMPLE "\n"); ++ return false; ++ } ++ guid->vendor = vendor & 0xFFFF; ++ guid->type = type & 0x0F; ++ guid->version = version & 0x0F; ++ guid->device_id = device_id & 0xFFFF; ++ guid->rsv = rsv & 0xFFFFFF; ++ guid->seq_num = seq_num & 0xFFFFFFFFFFFFFFFF; ++ return true; ++} +diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h +index 63dcf69978..6d795e54d8 100644 +--- a/include/hw/qdev-properties-system.h ++++ b/include/hw/qdev-properties-system.h +@@ -27,6 +27,12 @@ extern const PropertyInfo qdev_prop_off_auto_pcibar; + extern const PropertyInfo qdev_prop_pcie_link_speed; + extern const PropertyInfo qdev_prop_pcie_link_width; + extern const PropertyInfo qdev_prop_cpus390entitlement; ++#ifdef CONFIG_UB ++extern const PropertyInfo qdev_prop_ub_host_devaddr; ++extern const PropertyInfo qdev_prop_ub_dev_guid; ++extern const PropertyInfo qdev_prop_ub_dev_neighbor_info; ++extern const PropertyInfo qdev_prop_ub_dev_port_num; ++#endif + + #define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \ + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t) +@@ -94,4 +100,18 @@ extern const PropertyInfo qdev_prop_cpus390entitlement; + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_cpus390entitlement, \ + CpuS390Entitlement) + ++#ifdef CONFIG_UB ++#define DEFINE_PROP_UB_HOST_DEVADDR(_n, _s, _f) \ ++ DEFINE_PROP(_n, _s, _f, qdev_prop_ub_host_devaddr, UBHostDeviceAddress) ++ ++#define DEFINE_PROP_UB_DEV_GUID(_n, _s, _f) \ ++ DEFINE_PROP(_n, _s, _f, qdev_prop_ub_dev_guid, UbGuid) ++ ++#define DEFINE_PROP_UB_DEV_NEIGHBOR_INFO(_n, _s, _f) \ ++ DEFINE_PROP(_n, _s, _f, qdev_prop_ub_dev_neighbor_info, UbPortInfo) ++ ++#define DEFINE_PROP_UB_DEV_PORT_NUM(_n, _s, _f) \ ++ DEFINE_PROP(_n, _s, _f, qdev_prop_ub_dev_port_num, UbPortInfo) ++#endif // CONFIG_UB ++ + #endif +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index 4e3ed8a919..5cb6b2b207 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -25,6 +25,162 @@ + #define WORD_SIZE 2 + #define DWORD_SIZE 4 + ++#define UINT16_MASK 0x0000FFFF ++ ++#define UB_DEV_NAME_LEN 64 ++#define UB_NUM_REGIONS (VFIO_UB_NUM_REGIONS - 1) /* Exclude the config region */ ++#define UB_SUPPORT_MIN_EID 1 ++#define UB_SUPPORT_MAX_EID 0xFFFFF ++#define UB_GUID_BASE_CODE_MASK 0x00FF ++ ++typedef struct UBIORegion { ++ uint64_t addr; /* current UB mapping address. -1 means not mapped */ ++#define UB_ER_UNMAPPED (~(uint64_t)0) ++ uint64_t size; ++ MemoryRegion *memory; ++ MemoryRegion *address_space; ++} UBIORegion; ++ ++#define GUID_STR_EXAMPLE "e0fc-a120-0-2-000000-0000000000000000" \ ++ "(Vendor-DeviceId-Version-Type-Rsv-SequenceNumber)" ++typedef struct __attribute__ ((__packed__)) UbGuid { ++ unsigned long seq_num : 64; ++ unsigned long rsv : 24; ++ unsigned int type : 4; ++ unsigned int version : 4; ++ unsigned int device_id : 16; ++ unsigned int vendor : 16; ++} UbGuid; ++#define UB_DEV_GUID_STRING_LENGTH 37 ++void ub_device_get_str_from_guid(UbGuid *guid, char *guid_str, uint32_t str_len); ++bool ub_device_get_guid_from_str(UbGuid *guid, char *guid_str); ++ ++typedef struct UBHostDeviceAddress { ++ UbGuid guid; ++} UBHostDeviceAddress; ++ ++enum UbGUIDType { ++ UB_GUID_TYPE_UNINIT = -1, ++ UB_GUID_TYPE_BUS_INSTANCE = 0x0, ++ UB_GUID_TYPE_BUS_CONTROLLER = 0x1, ++ UB_GUID_TYPE_IBUS_CONTROLLER = 0x2, ++ UB_GUID_TYPE_SWITCH = 0x3, ++ UB_GUID_TYPE_ISWITCH = 0x4, ++}; ++ ++enum UbGUIDBaseCode { ++ UB_GUID_BASE_INSTANCE = 0, ++ UB_GUID_BASE_SWITCH = 4, ++}; ++ ++enum UbDeviceType { ++ UB_TYPE_UNINIT = -1, ++ UB_TYPE_BUS_INSTANCE, ++ UB_TYPE_DEVICE, ++ UB_TYPE_IDEVICE, ++ UB_TYPE_SWITCH, ++ UB_TYPE_ISWITCH, ++ UB_TYPE_IBUS_CONTROLLER, ++}; ++ ++static inline const char *ub_dev_get_type_str(enum UbDeviceType type) ++{ ++ switch (type) { ++ case UB_TYPE_UNINIT: ++ return "type_uninit"; ++ case UB_TYPE_BUS_INSTANCE: ++ return "type_businstance"; ++ case UB_TYPE_DEVICE: ++ return "type_device"; ++ case UB_TYPE_IDEVICE: ++ return "type_idevice"; ++ case UB_TYPE_SWITCH: ++ return "type_switch"; ++ case UB_TYPE_ISWITCH: ++ return "type_iswitch"; ++ case UB_TYPE_IBUS_CONTROLLER: ++ return "type_ibus_controller"; ++ default: ++ return "type_unknown"; ++ } ++} ++ ++/* ++ * the reserved address space in config space supports a maximum of 4094 ports, ++ * current ubus driver support max 256 ports. ++ * */ ++#define UB_DEV_MAX_NUM_OF_PORT 256 ++#define UB_DEV_CONFIG_SPACE_PORT_SIZE 0x40000UL // 256KiB ++#define UB_DEV_NUM_OF_CFG 0x2UL ++#define UB_DEV_CONFIG_SPACE_CFG_SIZE 0x40000UL // 256KiB ++#define UB_DEV_CONFIG_SPACE_ROUTE_TABLE_SIZE 0x40000000UL // 1GiB ++#define UB_DEV_CONFIG_SPACE_ROUTE_TABLE_START 0x3C0000000UL // 1GiB ++#define UB_DEV_CONFIG_SPACE_TOTAL_SIZE \ ++ (UB_DEV_CONFIG_SPACE_ROUTE_TABLE_START + UB_DEV_CONFIG_SPACE_ROUTE_TABLE_SIZE) // according to frontend code ++ ++#define UB_DEV_ID_LEN 64 ++typedef struct NeighborInfo { ++ union { ++ char neighbor_id[UB_DEV_ID_LEN]; ++ UBDevice *neighbor_dev; ++ }; ++ uint32_t local_port_idx; ++ uint32_t neighbor_port_idx; ++} NeighborInfo; ++ ++typedef struct UbPortInfo { ++ uint32_t port_num; ++ char *neighbors_cmd; ++ NeighborInfo *neighbors; ++ bool port_info_exist; ++} UbPortInfo; ++ ++typedef void UBConfigReadFunc(UBDevice *dev, uint64_t offset, ++ uint32_t *val, uint32_t dw_mask); ++typedef void UBConfigWriteFunc(UBDevice *dev, uint64_t offset, ++ uint32_t *val, uint32_t dw_mask); ++ ++struct UBDevice { ++ DeviceState qdev; ++ /* UB config space */ ++ uint8_t *config; ++ /* UB config space right mask */ ++ uint8_t *wmask; ++ uint8_t *w1cmask; ++ enum UbDeviceType dev_type; ++ char name[UB_DEV_NAME_LEN]; ++ uint32_t eid; ++ uint32_t bus_instance_eid; ++ uint32_t cna; ++ uint32_t ue_idx; ++ uint32_t rst_cnt; ++ bool host_dev; ++ UbGuid guid; ++ UbPortInfo port; ++ UBIORegion io_regions[UB_NUM_REGIONS]; ++ UBConfigReadFunc *config_read; ++ UBConfigWriteFunc *config_write; ++ int (* bus_instance_verify)(UBDevice *dev, Error **errp); ++ ++ QLIST_ENTRY(UBDevice) node; ++}; ++ ++typedef void UBUnregisterFunc(UBDevice *dev); ++ ++typedef struct UBDeviceClass { ++ DeviceClass parent_class; ++ ++ void (*realize)(UBDevice *dev, Error **errp); ++ UBUnregisterFunc *exit; ++ UBConfigReadFunc *config_read; ++ UBConfigWriteFunc *config_write; ++} UBDeviceClass; ++ ++#define TYPE_UB_DEVICE "ub-device" ++DECLARE_OBJ_CHECKERS(UBDevice, UBDeviceClass, ++ UB_DEVICE, TYPE_UB_DEVICE) ++ ++ + static inline void ub_set_byte(uint8_t *config, uint8_t val) + { + *config = val; +diff --git a/include/hw/ub/ub_bus.h b/include/hw/ub/ub_bus.h +new file mode 100644 +index 0000000000..ef78305cb0 +--- /dev/null ++++ b/include/hw/ub/ub_bus.h +@@ -0,0 +1,39 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_BUS_H ++#define UB_BUS_H ++ ++#include "hw/ub/ub.h" ++ ++struct UBBusClass { ++ /* < private > */ ++ BusClass parent_class; ++ /* < public > */ ++}; ++ ++typedef QLIST_HEAD(, UBDevice) UBDeviceList; ++struct UBBus { ++ BusState qbus; ++ UBDeviceList devices; ++ MemoryRegion *address_space_mem; ++}; ++ ++#define TYPE_UB_BUS "UB_BUS" ++OBJECT_DECLARE_TYPE(UBBus, UBBusClass, UB_BUS) ++ ++#endif +diff --git a/include/hw/ub/ub_common.h b/include/hw/ub/ub_common.h +new file mode 100644 +index 0000000000..d52dc7e651 +--- /dev/null ++++ b/include/hw/ub/ub_common.h +@@ -0,0 +1,288 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_COMMON_H ++#define UB_COMMON_H ++ ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_bus.h" ++ ++/* You can use the following macro to execute a ++ * repeated snippet of code ++ */ ++#define CMD(macro, arg) macro(arg) ++#define LOOP0(macro) CMD(macro, 0) ++#define LOOP1(macro) LOOP0(macro) CMD(macro, 1) ++#define LOOP2(macro) LOOP1(macro) CMD(macro, 2) ++#define LOOP3(macro) LOOP2(macro) CMD(macro, 3) ++#define LOOP4(macro) LOOP3(macro) CMD(macro, 4) ++#define LOOP5(macro) LOOP4(macro) CMD(macro, 5) ++#define LOOP6(macro) LOOP5(macro) CMD(macro, 6) ++#define LOOP7(macro) LOOP6(macro) CMD(macro, 7) ++#define LOOP8(macro) LOOP7(macro) CMD(macro, 8) ++#define LOOP9(macro) LOOP8(macro) CMD(macro, 9) ++#define LOOP10(macro) LOOP9(macro) CMD(macro, 10) ++#define LOOP11(macro) LOOP10(macro) CMD(macro, 11) ++#define LOOP12(macro) LOOP11(macro) CMD(macro, 12) ++#define LOOP13(macro) LOOP12(macro) CMD(macro, 13) ++#define LOOP14(macro) LOOP13(macro) CMD(macro, 14) ++#define LOOP15(macro) LOOP14(macro) CMD(macro, 15) ++#define LOOP16(macro) LOOP15(macro) CMD(macro, 16) ++#define LOOP17(macro) LOOP16(macro) CMD(macro, 17) ++#define LOOP18(macro) LOOP17(macro) CMD(macro, 18) ++#define LOOP19(macro) LOOP18(macro) CMD(macro, 19) ++#define LOOP20(macro) LOOP19(macro) CMD(macro, 20) ++#define LOOP21(macro) LOOP20(macro) CMD(macro, 21) ++#define LOOP22(macro) LOOP21(macro) CMD(macro, 22) ++#define LOOP23(macro) LOOP22(macro) CMD(macro, 23) ++#define LOOP24(macro) LOOP23(macro) CMD(macro, 24) ++#define LOOP25(macro) LOOP24(macro) CMD(macro, 25) ++#define LOOP26(macro) LOOP25(macro) CMD(macro, 26) ++#define LOOP27(macro) LOOP26(macro) CMD(macro, 27) ++#define LOOP28(macro) LOOP27(macro) CMD(macro, 28) ++#define LOOP29(macro) LOOP28(macro) CMD(macro, 29) ++#define LOOP30(macro) LOOP29(macro) CMD(macro, 30) ++#define LOOP31(macro) LOOP30(macro) CMD(macro, 31) ++#define LOOP32(macro) LOOP31(macro) CMD(macro, 32) ++#define LOOP33(macro) LOOP32(macro) CMD(macro, 33) ++#define LOOP34(macro) LOOP33(macro) CMD(macro, 34) ++#define LOOP35(macro) LOOP34(macro) CMD(macro, 35) ++#define LOOP36(macro) LOOP35(macro) CMD(macro, 36) ++#define LOOP37(macro) LOOP36(macro) CMD(macro, 37) ++#define LOOP38(macro) LOOP37(macro) CMD(macro, 38) ++#define LOOP39(macro) LOOP38(macro) CMD(macro, 39) ++#define LOOP40(macro) LOOP39(macro) CMD(macro, 40) ++#define LOOP41(macro) LOOP40(macro) CMD(macro, 41) ++#define LOOP42(macro) LOOP41(macro) CMD(macro, 42) ++#define LOOP43(macro) LOOP42(macro) CMD(macro, 43) ++#define LOOP44(macro) LOOP43(macro) CMD(macro, 44) ++#define LOOP45(macro) LOOP44(macro) CMD(macro, 45) ++#define LOOP46(macro) LOOP45(macro) CMD(macro, 46) ++#define LOOP47(macro) LOOP46(macro) CMD(macro, 47) ++#define LOOP48(macro) LOOP47(macro) CMD(macro, 48) ++#define LOOP49(macro) LOOP48(macro) CMD(macro, 49) ++#define LOOP50(macro) LOOP49(macro) CMD(macro, 50) ++#define LOOP51(macro) LOOP50(macro) CMD(macro, 51) ++#define LOOP52(macro) LOOP51(macro) CMD(macro, 52) ++#define LOOP53(macro) LOOP52(macro) CMD(macro, 53) ++#define LOOP54(macro) LOOP53(macro) CMD(macro, 54) ++#define LOOP55(macro) LOOP54(macro) CMD(macro, 55) ++#define LOOP56(macro) LOOP55(macro) CMD(macro, 56) ++#define LOOP57(macro) LOOP56(macro) CMD(macro, 57) ++#define LOOP58(macro) LOOP57(macro) CMD(macro, 58) ++#define LOOP59(macro) LOOP58(macro) CMD(macro, 59) ++#define LOOP60(macro) LOOP59(macro) CMD(macro, 60) ++#define LOOP61(macro) LOOP60(macro) CMD(macro, 61) ++#define LOOP62(macro) LOOP61(macro) CMD(macro, 62) ++#define LOOP63(macro) LOOP62(macro) CMD(macro, 63) ++#define LOOP64(macro) LOOP63(macro) CMD(macro, 64) ++#define LOOP65(macro) LOOP64(macro) CMD(macro, 65) ++#define LOOP66(macro) LOOP65(macro) CMD(macro, 66) ++#define LOOP67(macro) LOOP66(macro) CMD(macro, 67) ++#define LOOP68(macro) LOOP67(macro) CMD(macro, 68) ++#define LOOP69(macro) LOOP68(macro) CMD(macro, 69) ++#define LOOP70(macro) LOOP69(macro) CMD(macro, 70) ++#define LOOP71(macro) LOOP70(macro) CMD(macro, 71) ++#define LOOP72(macro) LOOP71(macro) CMD(macro, 72) ++#define LOOP73(macro) LOOP72(macro) CMD(macro, 73) ++#define LOOP74(macro) LOOP73(macro) CMD(macro, 74) ++#define LOOP75(macro) LOOP74(macro) CMD(macro, 75) ++#define LOOP76(macro) LOOP75(macro) CMD(macro, 76) ++#define LOOP77(macro) LOOP76(macro) CMD(macro, 77) ++#define LOOP78(macro) LOOP77(macro) CMD(macro, 78) ++#define LOOP79(macro) LOOP78(macro) CMD(macro, 79) ++#define LOOP80(macro) LOOP79(macro) CMD(macro, 80) ++#define LOOP81(macro) LOOP80(macro) CMD(macro, 81) ++#define LOOP82(macro) LOOP81(macro) CMD(macro, 82) ++#define LOOP83(macro) LOOP82(macro) CMD(macro, 83) ++#define LOOP84(macro) LOOP83(macro) CMD(macro, 84) ++#define LOOP85(macro) LOOP84(macro) CMD(macro, 85) ++#define LOOP86(macro) LOOP85(macro) CMD(macro, 86) ++#define LOOP87(macro) LOOP86(macro) CMD(macro, 87) ++#define LOOP88(macro) LOOP87(macro) CMD(macro, 88) ++#define LOOP89(macro) LOOP88(macro) CMD(macro, 89) ++#define LOOP90(macro) LOOP89(macro) CMD(macro, 90) ++#define LOOP91(macro) LOOP90(macro) CMD(macro, 91) ++#define LOOP92(macro) LOOP91(macro) CMD(macro, 92) ++#define LOOP93(macro) LOOP92(macro) CMD(macro, 93) ++#define LOOP94(macro) LOOP93(macro) CMD(macro, 94) ++#define LOOP95(macro) LOOP94(macro) CMD(macro, 95) ++#define LOOP96(macro) LOOP95(macro) CMD(macro, 96) ++#define LOOP97(macro) LOOP96(macro) CMD(macro, 97) ++#define LOOP98(macro) LOOP97(macro) CMD(macro, 98) ++#define LOOP99(macro) LOOP98(macro) CMD(macro, 99) ++#define LOOP100(macro) LOOP99(macro) CMD(macro, 100) ++#define LOOP101(macro) LOOP100(macro) CMD(macro, 101) ++#define LOOP102(macro) LOOP101(macro) CMD(macro, 102) ++#define LOOP103(macro) LOOP102(macro) CMD(macro, 103) ++#define LOOP104(macro) LOOP103(macro) CMD(macro, 104) ++#define LOOP105(macro) LOOP104(macro) CMD(macro, 105) ++#define LOOP106(macro) LOOP105(macro) CMD(macro, 106) ++#define LOOP107(macro) LOOP106(macro) CMD(macro, 107) ++#define LOOP108(macro) LOOP107(macro) CMD(macro, 108) ++#define LOOP109(macro) LOOP108(macro) CMD(macro, 109) ++#define LOOP110(macro) LOOP109(macro) CMD(macro, 110) ++#define LOOP111(macro) LOOP110(macro) CMD(macro, 111) ++#define LOOP112(macro) LOOP111(macro) CMD(macro, 112) ++#define LOOP113(macro) LOOP112(macro) CMD(macro, 113) ++#define LOOP114(macro) LOOP113(macro) CMD(macro, 114) ++#define LOOP115(macro) LOOP114(macro) CMD(macro, 115) ++#define LOOP116(macro) LOOP115(macro) CMD(macro, 116) ++#define LOOP117(macro) LOOP116(macro) CMD(macro, 117) ++#define LOOP118(macro) LOOP117(macro) CMD(macro, 118) ++#define LOOP119(macro) LOOP118(macro) CMD(macro, 119) ++#define LOOP120(macro) LOOP119(macro) CMD(macro, 120) ++#define LOOP121(macro) LOOP120(macro) CMD(macro, 121) ++#define LOOP122(macro) LOOP121(macro) CMD(macro, 122) ++#define LOOP123(macro) LOOP122(macro) CMD(macro, 123) ++#define LOOP124(macro) LOOP123(macro) CMD(macro, 124) ++#define LOOP125(macro) LOOP124(macro) CMD(macro, 125) ++#define LOOP126(macro) LOOP125(macro) CMD(macro, 126) ++#define LOOP127(macro) LOOP126(macro) CMD(macro, 127) ++#define LOOP128(macro) LOOP127(macro) CMD(macro, 128) ++#define LOOP129(macro) LOOP128(macro) CMD(macro, 129) ++#define LOOP130(macro) LOOP129(macro) CMD(macro, 130) ++#define LOOP131(macro) LOOP130(macro) CMD(macro, 131) ++#define LOOP132(macro) LOOP131(macro) CMD(macro, 132) ++#define LOOP133(macro) LOOP132(macro) CMD(macro, 133) ++#define LOOP134(macro) LOOP133(macro) CMD(macro, 134) ++#define LOOP135(macro) LOOP134(macro) CMD(macro, 135) ++#define LOOP136(macro) LOOP135(macro) CMD(macro, 136) ++#define LOOP137(macro) LOOP136(macro) CMD(macro, 137) ++#define LOOP138(macro) LOOP137(macro) CMD(macro, 138) ++#define LOOP139(macro) LOOP138(macro) CMD(macro, 139) ++#define LOOP140(macro) LOOP139(macro) CMD(macro, 140) ++#define LOOP141(macro) LOOP140(macro) CMD(macro, 141) ++#define LOOP142(macro) LOOP141(macro) CMD(macro, 142) ++#define LOOP143(macro) LOOP142(macro) CMD(macro, 143) ++#define LOOP144(macro) LOOP143(macro) CMD(macro, 144) ++#define LOOP145(macro) LOOP144(macro) CMD(macro, 145) ++#define LOOP146(macro) LOOP145(macro) CMD(macro, 146) ++#define LOOP147(macro) LOOP146(macro) CMD(macro, 147) ++#define LOOP148(macro) LOOP147(macro) CMD(macro, 148) ++#define LOOP149(macro) LOOP148(macro) CMD(macro, 149) ++#define LOOP150(macro) LOOP149(macro) CMD(macro, 150) ++#define LOOP151(macro) LOOP150(macro) CMD(macro, 151) ++#define LOOP152(macro) LOOP151(macro) CMD(macro, 152) ++#define LOOP153(macro) LOOP152(macro) CMD(macro, 153) ++#define LOOP154(macro) LOOP153(macro) CMD(macro, 154) ++#define LOOP155(macro) LOOP154(macro) CMD(macro, 155) ++#define LOOP156(macro) LOOP155(macro) CMD(macro, 156) ++#define LOOP157(macro) LOOP156(macro) CMD(macro, 157) ++#define LOOP158(macro) LOOP157(macro) CMD(macro, 158) ++#define LOOP159(macro) LOOP158(macro) CMD(macro, 159) ++#define LOOP160(macro) LOOP159(macro) CMD(macro, 160) ++#define LOOP161(macro) LOOP160(macro) CMD(macro, 161) ++#define LOOP162(macro) LOOP161(macro) CMD(macro, 162) ++#define LOOP163(macro) LOOP162(macro) CMD(macro, 163) ++#define LOOP164(macro) LOOP163(macro) CMD(macro, 164) ++#define LOOP165(macro) LOOP164(macro) CMD(macro, 165) ++#define LOOP166(macro) LOOP165(macro) CMD(macro, 166) ++#define LOOP167(macro) LOOP166(macro) CMD(macro, 167) ++#define LOOP168(macro) LOOP167(macro) CMD(macro, 168) ++#define LOOP169(macro) LOOP168(macro) CMD(macro, 169) ++#define LOOP170(macro) LOOP169(macro) CMD(macro, 170) ++#define LOOP171(macro) LOOP170(macro) CMD(macro, 171) ++#define LOOP172(macro) LOOP171(macro) CMD(macro, 172) ++#define LOOP173(macro) LOOP172(macro) CMD(macro, 173) ++#define LOOP174(macro) LOOP173(macro) CMD(macro, 174) ++#define LOOP175(macro) LOOP174(macro) CMD(macro, 175) ++#define LOOP176(macro) LOOP175(macro) CMD(macro, 176) ++#define LOOP177(macro) LOOP176(macro) CMD(macro, 177) ++#define LOOP178(macro) LOOP177(macro) CMD(macro, 178) ++#define LOOP179(macro) LOOP178(macro) CMD(macro, 179) ++#define LOOP180(macro) LOOP179(macro) CMD(macro, 180) ++#define LOOP181(macro) LOOP180(macro) CMD(macro, 181) ++#define LOOP182(macro) LOOP181(macro) CMD(macro, 182) ++#define LOOP183(macro) LOOP182(macro) CMD(macro, 183) ++#define LOOP184(macro) LOOP183(macro) CMD(macro, 184) ++#define LOOP185(macro) LOOP184(macro) CMD(macro, 185) ++#define LOOP186(macro) LOOP185(macro) CMD(macro, 186) ++#define LOOP187(macro) LOOP186(macro) CMD(macro, 187) ++#define LOOP188(macro) LOOP187(macro) CMD(macro, 188) ++#define LOOP189(macro) LOOP188(macro) CMD(macro, 189) ++#define LOOP190(macro) LOOP189(macro) CMD(macro, 190) ++#define LOOP191(macro) LOOP190(macro) CMD(macro, 191) ++#define LOOP192(macro) LOOP191(macro) CMD(macro, 192) ++#define LOOP193(macro) LOOP192(macro) CMD(macro, 193) ++#define LOOP194(macro) LOOP193(macro) CMD(macro, 194) ++#define LOOP195(macro) LOOP194(macro) CMD(macro, 195) ++#define LOOP196(macro) LOOP195(macro) CMD(macro, 196) ++#define LOOP197(macro) LOOP196(macro) CMD(macro, 197) ++#define LOOP198(macro) LOOP197(macro) CMD(macro, 198) ++#define LOOP199(macro) LOOP198(macro) CMD(macro, 199) ++#define LOOP200(macro) LOOP199(macro) CMD(macro, 200) ++#define LOOP201(macro) LOOP200(macro) CMD(macro, 201) ++#define LOOP202(macro) LOOP201(macro) CMD(macro, 202) ++#define LOOP203(macro) LOOP202(macro) CMD(macro, 203) ++#define LOOP204(macro) LOOP203(macro) CMD(macro, 204) ++#define LOOP205(macro) LOOP204(macro) CMD(macro, 205) ++#define LOOP206(macro) LOOP205(macro) CMD(macro, 206) ++#define LOOP207(macro) LOOP206(macro) CMD(macro, 207) ++#define LOOP208(macro) LOOP207(macro) CMD(macro, 208) ++#define LOOP209(macro) LOOP208(macro) CMD(macro, 209) ++#define LOOP210(macro) LOOP209(macro) CMD(macro, 210) ++#define LOOP211(macro) LOOP210(macro) CMD(macro, 211) ++#define LOOP212(macro) LOOP211(macro) CMD(macro, 212) ++#define LOOP213(macro) LOOP212(macro) CMD(macro, 213) ++#define LOOP214(macro) LOOP213(macro) CMD(macro, 214) ++#define LOOP215(macro) LOOP214(macro) CMD(macro, 215) ++#define LOOP216(macro) LOOP215(macro) CMD(macro, 216) ++#define LOOP217(macro) LOOP216(macro) CMD(macro, 217) ++#define LOOP218(macro) LOOP217(macro) CMD(macro, 218) ++#define LOOP219(macro) LOOP218(macro) CMD(macro, 219) ++#define LOOP220(macro) LOOP219(macro) CMD(macro, 220) ++#define LOOP221(macro) LOOP220(macro) CMD(macro, 221) ++#define LOOP222(macro) LOOP221(macro) CMD(macro, 222) ++#define LOOP223(macro) LOOP222(macro) CMD(macro, 223) ++#define LOOP224(macro) LOOP223(macro) CMD(macro, 224) ++#define LOOP225(macro) LOOP224(macro) CMD(macro, 225) ++#define LOOP226(macro) LOOP225(macro) CMD(macro, 226) ++#define LOOP227(macro) LOOP226(macro) CMD(macro, 227) ++#define LOOP228(macro) LOOP227(macro) CMD(macro, 228) ++#define LOOP229(macro) LOOP228(macro) CMD(macro, 229) ++#define LOOP230(macro) LOOP229(macro) CMD(macro, 230) ++#define LOOP231(macro) LOOP230(macro) CMD(macro, 231) ++#define LOOP232(macro) LOOP231(macro) CMD(macro, 232) ++#define LOOP233(macro) LOOP232(macro) CMD(macro, 233) ++#define LOOP234(macro) LOOP233(macro) CMD(macro, 234) ++#define LOOP235(macro) LOOP234(macro) CMD(macro, 235) ++#define LOOP236(macro) LOOP235(macro) CMD(macro, 236) ++#define LOOP237(macro) LOOP236(macro) CMD(macro, 237) ++#define LOOP238(macro) LOOP237(macro) CMD(macro, 238) ++#define LOOP239(macro) LOOP238(macro) CMD(macro, 239) ++#define LOOP240(macro) LOOP239(macro) CMD(macro, 240) ++#define LOOP241(macro) LOOP240(macro) CMD(macro, 241) ++#define LOOP242(macro) LOOP241(macro) CMD(macro, 242) ++#define LOOP243(macro) LOOP242(macro) CMD(macro, 243) ++#define LOOP244(macro) LOOP243(macro) CMD(macro, 244) ++#define LOOP245(macro) LOOP244(macro) CMD(macro, 245) ++#define LOOP246(macro) LOOP245(macro) CMD(macro, 246) ++#define LOOP247(macro) LOOP246(macro) CMD(macro, 247) ++#define LOOP248(macro) LOOP247(macro) CMD(macro, 248) ++#define LOOP249(macro) LOOP248(macro) CMD(macro, 249) ++#define LOOP250(macro) LOOP249(macro) CMD(macro, 250) ++#define LOOP251(macro) LOOP250(macro) CMD(macro, 251) ++#define LOOP252(macro) LOOP251(macro) CMD(macro, 252) ++#define LOOP253(macro) LOOP252(macro) CMD(macro, 253) ++#define LOOP254(macro) LOOP253(macro) CMD(macro, 254) ++#define LOOP255(macro) LOOP254(macro) CMD(macro, 255) ++#define LOOP_HELPER(macro, n) LOOP##n(macro) ++#define LOOP(macro, n) LOOP_HELPER(macro, n) ++ ++#endif +diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h +index 5abdbc3874..18d0f21ee2 100644 +--- a/include/qemu/typedefs.h ++++ b/include/qemu/typedefs.h +@@ -138,6 +138,9 @@ typedef struct Visitor Visitor; + typedef struct VMChangeStateEntry VMChangeStateEntry; + typedef struct VMStateDescription VMStateDescription; + ++/* UB typedef */ ++typedef struct UBDevice UBDevice; ++ + /* + * Pointer types + * Such typedefs should be limited to cases where the typedef's users +diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h +index 5b1e2871af..7fb5ddba9e 100644 +--- a/linux-headers/linux/vfio.h ++++ b/linux-headers/linux/vfio.h +@@ -1858,4 +1858,19 @@ struct vfio_iommu_spapr_tce_remove { + + /* ***************************************************************** */ + ++/* ub irq types */ ++enum { ++ VFIO_UB_MSIX_IRQ_INDEX, ++ VFIO_UB_NUM_IRQS ++}; ++ ++/* ub regions types */ ++enum { ++ VFIO_UB_REGION0_INDEX = 0, ++ VFIO_UB_REGION1_INDEX, ++ VFIO_UB_REGION2_INDEX, ++ VFIO_UB_CONFIG_REGION_INDEX, ++ VFIO_UB_NUM_REGIONS ++}; ++ + #endif /* VFIO_H */ +-- +2.33.0 + diff --git a/ub-realize-detail-for-vfio-ub-device.patch b/ub-realize-detail-for-vfio-ub-device.patch new file mode 100644 index 0000000000000000000000000000000000000000..25a3448146c9b6cc4936bd8b1dd04248f9cb440a --- /dev/null +++ b/ub-realize-detail-for-vfio-ub-device.patch @@ -0,0 +1,1234 @@ +From f30bccdf965e2cbbe981e12c76e18ef75c9e1440 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 14 Nov 2025 14:12:34 +0800 +Subject: [PATCH 2/7] ub: realize detail for vfio-ub device + +realize vfio-ub device, now can use following command to config vfio-ub +device for guest: + -device vfio-ub,eid=xxx,guid=xxx,host=xxx,portnum=xxx + +Signed-off-by: caojinhuahw +--- + hw/vfio/trace-events | 8 + + hw/vfio/ub.c | 1146 ++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 1154 insertions(+) + +diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events +index 8fdde54456..4f1050843a 100644 +--- a/hw/vfio/trace-events ++++ b/hw/vfio/trace-events +@@ -175,3 +175,11 @@ iommufd_cdev_fail_attach_existing_container(const char *msg) " %s" + iommufd_cdev_alloc_ioas(int iommufd, int ioas_id) " [iommufd=%d] new IOMMUFD container with ioasid=%d" + iommufd_cdev_device_info(char *name, int devfd, int num_irqs, int num_regions, int flags) " %s (%d) num_irqs=%d num_regions=%d flags=%d" + iommufd_cdev_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int dev_id) "\t%04x:%02x:%02x.%x devid %d" ++ ++#ub.c ++vfio_ub_read_config(uint64_t offset, uint32_t emu_val, uint32_t phys_val, uint32_t val) "offset: 0x%lx, emu_val: 0x%x, phys_val: 0x%x, ret_val: 0x%x" ++vfio_ub_write_config(uint64_t offset, uint32_t val, uint32_t dw_mask, uint32_t phys_val) "offset: 0x%lx, val: 0x%x, dw_mask: 0x%x, phys_val: 0x%x" ++vfio_ub_write_config_ioregion(int id, uint64_t old_addr, uint64_t new_addr, uint64_t size) "io_regions: %d, old_addr: 0x%lx, new_addr: 0x%lx, size: 0x%lx" ++vfio_ub_write_config_ers(int id, uint64_t offset, uint64_t size, uint32_t flags, uint64_t page_size) "id: %d, offset: 0x%lx, size: 0x%lx, flags: 0x%x, page_size: 0x%lx" ++vfio_ub_write_config_int_cap_en(uint32_t val, int was_enabled, int is_enabled) "val: %u, was_enabled: %d, is_enabled: %d" ++vfio_ub_write_config_int_cap_mask(uint32_t val, int was_masked, int is_masked) "val: %u, was_masked: %d, is_masked: %d" +diff --git a/hw/vfio/ub.c b/hw/vfio/ub.c +index 6cc999f0ab..cfbf9eef3c 100644 +--- a/hw/vfio/ub.c ++++ b/hw/vfio/ub.c +@@ -43,6 +43,8 @@ + #include "sysemu/iommufd.h" + #include "trace.h" + ++static KVMRouteChange vfio_route_change; ++ + static Property vfio_ub_dev_properties[] = { + DEFINE_PROP_UB_HOST_DEVADDR("host", VFIOUBDevice, host), + #ifdef CONFIG_IOMMUFD +@@ -51,6 +53,10 @@ static Property vfio_ub_dev_properties[] = { + #endif + DEFINE_PROP_END_OF_LIST(), + }; ++static void vfio_disable_interrupts(VFIOUBDevice *vdev); ++static void vfio_usi_disable(VFIOUBDevice *vdev); ++static void vfio_ub_write_config(UBDevice *dev, uint64_t offset, ++ uint32_t *val, uint32_t dw_mask); + + static bool vfio_ub_needed(void *opaque) + { +@@ -70,24 +76,1153 @@ static const VMStateDescription vfio_ub_vmstate = { + + static void vfio_ub_reset(DeviceState *dev) + { ++ VFIOUBDevice *vdev = VFIO_UB(dev); ++ uint32_t val = 0; ++ ++ vfio_disable_interrupts(vdev); ++ ++ if (!ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { ++ qemu_log("%s execute ELR done\n", dev->id); ++ } else { ++ qemu_log("%s execute ELR failed\n", dev->id); ++ } ++ ++ /* need after ELR */ ++ vfio_ub_write_config(&vdev->udev, UB_CFG1_DEV_TOKEN_ID_OFFSET, ++ &val, UB_TOKEN_ID_MASK); ++ vfio_ub_write_config(&vdev->udev, UB_CFG1_DEV_RS_ACCESS_EN_OFFSET, ++ &val, UB_DEV_RS_ACCESS_EN_MASK); ++ vfio_ub_write_config(&vdev->udev, UB_CFG1_BUS_ACCESS_EN_OFFSET, ++ &val, UB_BUS_ACCESS_EN_MASK); ++ qemu_log("ub device(%s %s) clear 'dev_token_id'," ++ "'bus_access_en' and 'dev_rs_access_en'\n", ++ vdev->udev.name, vdev->udev.qdev.id); ++} ++ ++static void vfio_ub_compute_needs_reset(VFIODevice *vbasedev) ++{ ++ if (!vbasedev->reset_works) { ++ vbasedev->needs_reset = true; ++ } ++} ++ ++static int vfio_ub_hot_reset_multi(VFIODevice *vbasedev) ++{ ++ /* do nothing now */ ++ return 0; ++} ++ ++static void vfio_ub_eoi(VFIODevice *vbasedev) ++{ ++ /* do nothing now */ ++} ++ ++static Object *vfio_ub_get_object(VFIODevice *vbasedev) ++{ ++ VFIOUBDevice *vdev = container_of(vbasedev, VFIOUBDevice, vbasedev); ++ ++ return OBJECT(vdev); ++} ++ ++ ++static void vfio_ub_save_config(VFIODevice *vbasedev, QEMUFile *f) ++{ ++ /* do nothing now */ ++} ++ ++static int vfio_ub_load_config(VFIODevice *vbasedev, QEMUFile *f) ++{ ++ /* do nothing now */ ++ return 0; ++} ++ ++static VFIODeviceOps vfio_ub_ops = { ++ .vfio_compute_needs_reset = vfio_ub_compute_needs_reset, ++ .vfio_hot_reset_multi = vfio_ub_hot_reset_multi, ++ .vfio_eoi = vfio_ub_eoi, ++ .vfio_get_object = vfio_ub_get_object, ++ .vfio_save_config = vfio_ub_save_config, ++ .vfio_load_config = vfio_ub_load_config, ++}; ++ ++static void vfio_populate_device(VFIOUBDevice *vdev, Error **errp) ++{ ++ VFIODevice *vbasedev = &vdev->vbasedev; ++ struct vfio_region_info *reg_info; ++ struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) }; ++ int i; ++ int ret = -1; ++ ++ if (vbasedev->num_regions < VFIO_UB_NUM_REGIONS) { ++ error_setg(errp, "unexpected number of io regions %u", ++ vbasedev->num_regions); ++ return; ++ } ++ qemu_log("vbasedev->num_irqs %u vbasedev->num_regions %u\n", ++ vbasedev->num_irqs, vbasedev->num_regions); ++ ++ for (i = VFIO_UB_REGION0_INDEX; i < VFIO_UB_CONFIG_REGION_INDEX; i++) { ++ char *name = g_strdup_printf("%s-ERS%d", vbasedev->name, i); ++ ++ ret = vfio_region_setup(OBJECT(vdev), vbasedev, ++ &vdev->ers[i].region, i, name); ++ qemu_log("%s flags 0x%x size %zu fd_offset 0x%lx " ++ "nr_mmaps %u mmaps %p\n", name, ++ vdev->ers[i].region.flags, ++ vdev->ers[i].region.size, ++ vdev->ers[i].region.fd_offset, ++ vdev->ers[i].region.nr_mmaps, ++ vdev->ers[i].region.mmaps); ++ g_free(name); ++ ++ if (ret) { ++ error_setg_errno(errp, -ret, "failed to get region %d info", i); ++ return; ++ } ++ ++ QLIST_INIT(&vdev->ers[i].quirks); ++ } ++ ret = vfio_get_region_info(vbasedev, VFIO_UB_CONFIG_REGION_INDEX, ®_info); ++ if (ret) { ++ error_setg_errno(errp, -ret, "failed to get ub config info"); ++ return; ++ } ++ vdev->config_offset = reg_info->offset; ++} ++ ++static void vfio_ers_prepare(VFIOUBDevice *vdev, uint8_t nr) ++{ ++ VFIOERS *ers = &vdev->ers[nr]; ++ ++ ers->size = ers->region.size; ++} ++ ++static void vfio_ers_register(VFIOUBDevice *vdev, uint8_t nr) ++{ ++ VFIOERS *ers = &vdev->ers[nr]; ++ char *name; ++ ++ if (!ers->size) { ++ return; ++ } ++ ++ ers->mr = g_new0(MemoryRegion, 1); ++ name = g_strdup_printf("%s-ERS%u", vdev->vbasedev.name, nr); ++ memory_region_init_io(ers->mr, OBJECT(vdev), NULL, NULL, name, ers->size); ++ g_free(name); ++ ++ if (ers->region.size) { ++ memory_region_add_subregion(ers->mr, 0, ers->region.mem); ++ ++ if (vfio_region_mmap(&ers->region)) { ++ error_report("Failed to mmap %s ERS%u. Performance may be slow", ++ vdev->vbasedev.name, nr); ++ } ++ } ++ for (int i = 0; i < ers->region.nr_mmaps; i++) { ++ qemu_log("mmaps[%d].mmap %p fd_offset 0x%lx offset 0x%lx size %zu mem.name %s\n", ++ i, ers->region.mmaps[i].mmap, ++ ers->region.fd_offset, ++ ers->region.mmaps[i].offset, ++ ers->region.mmaps[i].size, ++ ers->region.mmaps[i].mem.name); ++ } ++ ub_register_ers(&vdev->udev, nr, ers->mr); ++} ++ ++static int vfio_usi_setup(VFIOUBDevice *vdev, Error **errp) ++{ ++ usi_init(&vdev->udev, vdev->usi->vec_table_num, vdev->usi->addr_table_num, ++ vdev->usi->vec_table_start_addr, vdev->usi->addr_table_start_addr, ++ vdev->usi->pend_table_start_addr, vdev->ers[VFIO_UB_REGION0_INDEX].mr); ++ return 0; ++} ++ ++static int vfio_add_capabilities(VFIOUBDevice *vdev, Error **errp) ++{ ++ int ret; ++ ++ ret = vfio_usi_setup(vdev, errp); ++ ++ return ret; ++} ++static void vfio_ers_quirk_setup(VFIOUBDevice *vdev, uint8_t nr) ++{ ++ /* Currently, no processing is required. */ ++} ++ ++static void vfio_ub_fixup_usi_region(VFIOUBDevice *vdev, Error **errp) ++{ ++ uint64_t vec_table_start, vec_table_end; ++ uint64_t addr_table_start, addr_table_end; ++ VFIOUSIInfo *usi = vdev->usi; ++ VFIORegion *region = &vdev->ers[VFIO_UB_REGION0_INDEX].region; ++ int i; ++ ++ if (region->nr_mmaps != 1 || region->mmaps[0].offset || ++ region->size != region->mmaps[0].size) { ++ error_setg(errp, "vfio ub device(%s) fixup usi region failed: region.nr_mmaps(%u), " ++ "region->mmaps[0].offset(%lu), region->mmaps[0].size(%ld), region.size(%ld).", ++ vdev->vbasedev.name, region->nr_mmaps, region->mmaps[0].offset, ++ region->mmaps[0].size, region->size); ++ return; ++ } ++ ++ /* usi table start and end aligned to host page size */ ++ vec_table_start = usi->vec_table_start_addr & qemu_real_host_page_mask(); ++ vec_table_end = vec_table_start + usi->vec_table_num * USI_VEC_TABLE_ENTRY_SIZE; ++ vec_table_end = REAL_HOST_PAGE_ALIGN(vec_table_end); ++ addr_table_start = usi->addr_table_start_addr & qemu_real_host_page_mask(); ++ addr_table_end = addr_table_start + usi->addr_table_num * USI_ADDR_TABLE_ENTRY_SIZE; ++ addr_table_end = REAL_HOST_PAGE_ALIGN(addr_table_end); ++ ++ qemu_log("vfio ub device(%s) after host page aligned usi info: " ++ "vec_table_start(0x%lx) vec_table_end(0x%lx) " ++ "addr_table_start(0x%lx) addr_table_end(0x%lx).\n", ++ vdev->vbasedev.name, vec_table_start, vec_table_end, ++ addr_table_start, addr_table_end); ++ ++ /* vec table in ERO offset is 0 */ ++ if (!vec_table_start) { ++ if (vec_table_end >= addr_table_start) { ++ /* ER0 ++ * ---------------------------------- ++ * | | | ++ * | vec_table | addr_table | ++ * | | | ++ * ---------------------------------- ++ */ ++ if (addr_table_end >= region->size) { ++ region->nr_mmaps = 0; ++ g_free(region->mmaps); ++ region->mmaps = NULL; ++ goto complete; ++ } ++ ++ /* ER0 ++ * --------------------------------------------------- ++ * | | | | ++ * | vec_table | addr_table | mmap[0] | ++ * | | | | ++ * --------------------------------------------------- ++ */ ++ region->mmaps[0].offset = addr_table_end; ++ region->mmaps[0].size = region->size - addr_table_end; ++ goto complete; ++ } ++ ++ /* ER0 ++ * -------------------------------------------- ++ * | | | | ++ * | vec_table | mmap[0] | addr_table | ++ * | | | | ++ * -------------------------------------------- ++ */ ++ if (addr_table_end >= region->size) { ++ region->mmaps[0].offset = vec_table_end; ++ region->mmaps[0].size = addr_table_start - vec_table_end; ++ goto complete; ++ } ++ ++ /* ER0 ++ * ------------------------------------------------------- ++ * | | | | | ++ * | vec_table | mmap[0] | addr_table | mmap[1] | ++ * | | | | | ++ * ------------------------------------------------------- ++ */ ++ region->nr_mmaps = 2; ++ region->mmaps = g_renew(VFIOMmap, region->mmaps, 2); ++ memcpy(®ion->mmaps[1], ®ion->mmaps[0], sizeof(VFIOMmap)); ++ region->mmaps[0].size = addr_table_start - vec_table_end; ++ region->mmaps[0].offset = vec_table_end; ++ region->mmaps[1].size = region->size - addr_table_end; ++ region->mmaps[1].offset = addr_table_end; ++ goto complete; ++ } ++ ++ /* the following case, vec_table in FER0 offset is not 0 */ ++ if (vec_table_end >= addr_table_start) { ++ /* FER0 ++ * ------------------------------------------ ++ * | | | | ++ * | mmap[0] | vec_table | addr_table | ++ * | | | | ++ * ------------------------------------------ ++ */ ++ if (addr_table_end >= region->size) { ++ region->mmaps[0].size = vec_table_start; ++ region->mmaps[0].offset = 0; ++ goto complete; ++ } ++ ++ /* ER0 ++ * ------------------------------------------------------ ++ * | | | | | ++ * | mmap[0] | vec_table | addr_table | mmap[1] | ++ * | | | | | ++ * ------------------------------------------------------ ++ * */ ++ region->nr_mmaps = 2; ++ region->mmaps = g_renew(VFIOMmap, region->mmaps, 2); ++ memcpy(®ion->mmaps[1], ®ion->mmaps[0], sizeof(VFIOMmap)); ++ region->mmaps[0].size = vec_table_start; ++ region->mmaps[0].offset = 0; ++ region->mmaps[1].size = region->size - addr_table_end; ++ region->mmaps[1].offset = addr_table_end; ++ goto complete; ++ } ++ ++ /* ER0 ++ * ------------------------------------------------------ ++ * | | | | | ++ * | mmap[0] | vec_table | mmap[1] | addr_table | ++ * | | | | | ++ * ------------------------------------------------------ ++ * */ ++ if (addr_table_end >= region->size) { ++ region->nr_mmaps = 2; ++ region->mmaps = g_renew(VFIOMmap, region->mmaps, 2); ++ memcpy(®ion->mmaps[1], ®ion->mmaps[0], sizeof(VFIOMmap)); ++ region->mmaps[0].size = vec_table_start; ++ region->mmaps[0].offset = 0; ++ region->mmaps[1].size = addr_table_start - vec_table_end; ++ region->mmaps[1].offset = vec_table_end; ++ goto complete; ++ } ++ ++ /* ER0 ++ * ------------------------------------------------------------------- ++ * | | | | | | ++ * | mmap[0] | vec_table | mmap[1] | addr_table | mmap[2] | ++ * | | | | | | ++ * ------------------------------------------------------------------- ++ * */ ++ region->nr_mmaps = 3; ++ region->mmaps = g_renew(VFIOMmap, region->mmaps, 3); ++ memcpy(®ion->mmaps[1], ®ion->mmaps[0], sizeof(VFIOMmap)); ++ memcpy(®ion->mmaps[2], ®ion->mmaps[0], sizeof(VFIOMmap)); ++ region->mmaps[0].size = vec_table_start; ++ region->mmaps[0].offset = 0; ++ region->mmaps[1].size = addr_table_start - vec_table_end; ++ region->mmaps[1].offset = vec_table_end; ++ region->mmaps[2].size = region->size - addr_table_end; ++ region->mmaps[2].offset = addr_table_end; ++ ++complete: ++ qemu_log("vfio ub device(%s) region after fix nr_mmaps is %u.\n", ++ vdev->vbasedev.name, region->nr_mmaps); ++ for (i = 0; i < region->nr_mmaps; i++) { ++ qemu_log("region[%d].mmap: size(0x%lx), offset(0x%lx)\n", ++ i, region->mmaps[i].size, region->mmaps[i].offset); ++ } ++} ++ ++static void vfio_vector_init(VFIOUBDevice *vdev, uint32_t nr) ++{ ++ VFIOUSIVector *vector = &vdev->usi_vectors[nr]; ++ ++ vector->vdev = vdev; ++ vector->virq = -1; ++ if (event_notifier_init(&vector->interrupt, 0)) { ++ error_report("vfio: Error: event_notifier_init failed"); ++ } ++ vector->use = true; ++} ++ ++static void vfio_update_kvm_usi_virq(VFIOUSIVector *vector, USIMessage msg, UBDevice *udev) ++{ ++ KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); ++ ++ qemu_log("udev(%s %s) virq(%u) start update kvm usi virq\n", ++ udev->name, udev->qdev.id, vector->virq); ++ kvm_irqchip_update_usi_route(&c, vector->virq, msg, udev); ++ kvm_irqchip_commit_routes(kvm_state); ++} ++ ++static void vfio_add_kvm_usi_virq(VFIOUBDevice *vdev, VFIOUSIVector *vector, uint16_t nr) ++{ ++ UBDevice *udev = &vdev->udev; ++ ++ qemu_log("ub device(%s %s) vector(%u) start add usi route.\n", ++ udev->name, udev->qdev.id, nr); ++ vector->virq = kvm_irqchip_add_usi_route(&vfio_route_change, ++ usi_get_message(udev, nr), ++ ub_interrupt_id(udev), ++ udev); ++} ++ ++static void vfio_connect_kvm_usi_virq(VFIOUBDevice *vdev, VFIOUSIVector *vector, ++ const char *name, uint16_t nr) ++{ ++ if (vector->virq < 0) { ++ qemu_log("unexpect vector virq %d < 0.\n", vector->virq); ++ return; ++ } ++ ++ if (event_notifier_init(&vector->kvm_interrupt, 0)) { ++ qemu_log("vector kvm_interupt notifier init failed.\n"); ++ goto fail_notifier; ++ } ++ ++ if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, ++ NULL, vector->virq) < 0) { ++ qemu_log("vfio ub device(%s) failed to add irqfd notifiers gsi.\n", vdev->vbasedev.name); ++ goto fail_kvm; ++ } ++ ++ qemu_log("vfio ub device(%s) connect kvm success.\n", vdev->vbasedev.name); ++ return; ++ ++fail_kvm: ++ event_notifier_cleanup(&vector->kvm_interrupt); ++fail_notifier: ++ kvm_irqchip_release_virq(kvm_state, vector->virq); ++ vector->virq = -1; ++} ++ ++static int vfio_enable_vectors(VFIOUBDevice *vdev) ++{ ++ struct vfio_irq_set *irq_set; ++ int i, argsz, ret; ++ int32_t *fds; ++ ++ argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); ++ irq_set = g_malloc0(argsz); ++ irq_set->argsz = argsz; ++ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; ++ irq_set->index = VFIO_UB_MSIX_IRQ_INDEX; ++ irq_set->start = 0; ++ irq_set->count = vdev->nr_vectors; ++ fds = (int32_t *)&irq_set->data; ++ ++ for (i = 0; i < vdev->nr_vectors; i++) { ++ int fd = -1; ++ ++ if (vdev->usi_vectors[i].use) { ++ fd = event_notifier_get_fd(&vdev->usi_vectors[i].kvm_interrupt); ++ } ++ ++ fds[i] = fd; ++ } ++ ++ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); ++ g_free(irq_set); ++ ++ return ret; ++} ++ ++static int vfio_usi_vector_do_use(UBDevice *udev, uint16_t nr, USIMessage *msg, ++ IOHandler *handler) ++{ ++ VFIOUBDevice *vdev = VFIO_UB(udev); ++ VFIOUSIVector *vector = NULL; ++ int32_t fd; ++ int ret; ++ Error *err = NULL; ++ ++ vector = &vdev->usi_vectors[nr]; ++ if (!vector->use) { ++ vfio_vector_init(vdev, nr); ++ } ++ ++ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), ++ handler, NULL, vector); ++ ++ if (vector->virq >= 0) { ++ if (!msg) { ++ qemu_log("vfio_remove_kvm_usi_virq %u\n", vector->virq); ++ } else { ++ vfio_update_kvm_usi_virq(vector, *msg, udev); ++ } ++ } else { ++ if (msg) { ++ vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state); ++ vfio_add_kvm_usi_virq(vdev, vector, nr); ++ kvm_irqchip_commit_route_changes(&vfio_route_change); ++ vfio_connect_kvm_usi_virq(vdev, vector, NULL, nr); ++ } ++ } ++ ++ if (vdev->nr_vectors < nr + 1) { ++ vdev->nr_vectors = nr + 1; ++ vfio_disable_irqindex(&vdev->vbasedev, VFIO_UB_MSIX_IRQ_INDEX); ++ ret = vfio_enable_vectors(vdev); ++ if (ret < 0) { ++ error_report("vfio: failed to enable vectors, %d", ret); ++ } ++ } else { ++ fd = event_notifier_get_fd(&vector->kvm_interrupt); ++ ret = vfio_set_irq_signaling(&vdev->vbasedev, VFIO_UB_MSIX_IRQ_INDEX, nr, ++ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err); ++ if (ret) { ++ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); ++ } ++ } ++ ++ return 0; ++} ++ ++static void vfio_usi_vector_release(UBDevice *udev, uint16_t nr) ++{ ++ VFIOUBDevice *vdev = VFIO_UB(udev); ++ VFIOUSIVector *vector = &vdev->usi_vectors[nr]; ++ ++ if (vector->virq > 0) { ++ int32_t fd = event_notifier_get_fd(&vector->interrupt); ++ Error *err = NULL; ++ ++ qemu_log("udev(%s %s) start update fd to qemu.\n", ++ udev->name, udev->qdev.id); ++ if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_UB_MSIX_IRQ_INDEX, nr, ++ VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { ++ error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); ++ return; ++ } ++ ++ qemu_log("udev(%s %s) update fd to qemu success.\n", ++ udev->name, udev->qdev.id); ++ } ++} ++ ++static void vfio_usi_interrupt(void *opaque) ++{ ++ VFIOUSIVector *vector = opaque; ++ VFIOUBDevice *vdev = vector->vdev; ++ int nr = vector - vdev->usi_vectors; ++ ++ if (!event_notifier_test_and_clear(&vector->interrupt)) { ++ return; ++ } ++ ++ if (usi_is_masked(&vdev->udev, nr)) { ++ /* process for memory region pba */ ++ } ++ ++ usi_notify(&vdev->udev, nr); ++} ++ ++static int vfio_usi_vector_use(UBDevice *udev, uint16_t nr, USIMessage msg) ++{ ++ return vfio_usi_vector_do_use(udev, nr, &msg, vfio_usi_interrupt); ++} ++ ++static void vfio_usi_early_setup(VFIOUBDevice *vdev, Error **errp) ++{ ++ int fd = vdev->vbasedev.fd; ++ uint64_t offset = vdev->config_offset; ++ uint16_t vec_table_num; ++ uint16_t addr_table_num; ++ uint64_t vec_table_start_addr; ++ uint64_t addr_table_start_addr; ++ uint64_t pend_table_start_addr; ++ VFIOUSIInfo *usi = NULL; ++ ++ if (pread(fd, &vec_table_num, sizeof(vec_table_num), ++ offset + UB_CFG1_CAP4_INT_TYPE2_NUMOF_INT_VEC_OFFSET) != sizeof(vec_table_num)) { ++ error_setg_errno(errp, errno, "failed to read NUMOF_INT_VEC"); ++ return; ++ } ++ ++ if (pread(fd, &addr_table_num, sizeof(addr_table_num), ++ offset + UB_CFG1_CAP4_INT_TYPE2_NUMOF_INT_ADDR_OFFSET) != sizeof(addr_table_num)) { ++ error_setg_errno(errp, errno, "failed to read NUMOF_INT_ADDR"); ++ return; ++ } ++ ++ if (pread(fd, &vec_table_start_addr, sizeof(vec_table_start_addr), ++ offset + UB_CFG1_CAP4_INT_TYPE2_INT_VEC_TAB_OFFSET) != sizeof(vec_table_start_addr)) { ++ error_setg_errno(errp, errno, "failed to read INT_VEC_TAB_START_ADDR"); ++ return; ++ } ++ ++ if (pread(fd, &addr_table_start_addr, sizeof(addr_table_start_addr), ++ offset + UB_CFG1_CAP4_INT_TYPE2_INT_ADDR_TAB_OFFSET) != sizeof(addr_table_start_addr)) { ++ error_setg_errno(errp, errno, "failed to read INT_ADDR_TAB_START_ADDR"); ++ return; ++ } ++ ++ if (pread(fd, &pend_table_start_addr, sizeof(pend_table_start_addr), ++ offset + UB_CFG1_CAP4_INT_TYPE2_INT_PENDING_TAB_OFFSET) != sizeof(pend_table_start_addr)) { ++ error_setg_errno(errp, errno, "failed to read INT_PENDING_TAB_START_ADDR"); ++ return; ++ } ++ ++ vec_table_num = le16_to_cpu(vec_table_num); ++ addr_table_num = le16_to_cpu(addr_table_num); ++ vec_table_start_addr = le64_to_cpu(vec_table_start_addr); ++ addr_table_start_addr = le64_to_cpu(addr_table_start_addr); ++ pend_table_start_addr = le64_to_cpu(pend_table_start_addr); ++ ++ usi = g_malloc0(sizeof(VFIOUSIInfo)); ++ /* according to UB SPEC, vec_table_num&addr_table_num is base 0, so this value need +1 */ ++ usi->vec_table_num = vec_table_num + 1; ++ usi->addr_table_num = addr_table_num + 1; ++ usi->vec_table_start_addr = vec_table_start_addr; ++ usi->addr_table_start_addr = addr_table_start_addr; ++ usi->pend_table_start_addr = pend_table_start_addr; ++ ++ qemu_log("vfio ub device(%s %s) usi info: vec_table_num(%u), addr_table_num(%u), " ++ "vec_table_start_addr(0x%lx), addr_table_start_addr(0x%lx), " ++ "pend_table_start_addr(0x%lx).\n", ++ vdev->vbasedev.name, vdev->udev.qdev.id, usi->vec_table_num, ++ usi->addr_table_num, usi->vec_table_start_addr, usi->addr_table_start_addr, ++ usi->pend_table_start_addr); ++ ++ vdev->usi = usi; ++ vfio_ub_fixup_usi_region(vdev, errp); ++} ++ ++static void vfio_ers_exit(VFIOUBDevice *vdev) ++{ ++ /* do nothing now */ ++} ++ ++static void vfio_copy_config_space_slice(VFIOUBDevice *vdev, size_t offset, size_t read_size) ++{ ++ int ret = 0; ++ uint64_t emulated_offset; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(offset, false); ++ if (emulated_offset == UINT64_MAX) { ++ qemu_log("vfio copy cfg space slice out of emulated cfg range, " ++ "offset is 0x%lx\n", offset); ++ return; ++ } ++ ++ ret = pread(vdev->vbasedev.fd, vdev->udev.config + emulated_offset, ++ read_size, vdev->config_offset + offset); ++ if (ret < read_size) { ++ qemu_log("failed to read device config space, ret: %d, offset: %lu, read_size: %lu\n", ++ ret, offset, read_size); ++ } ++} ++ ++static void vfio_copy_config_space_caps(VFIOUBDevice *vdev, uint8_t *bit_map, size_t base_off) ++{ ++ size_t bit_idx = 1; // bit0 in bit_map is invalid ++ size_t offset, rsize; ++ unsigned long *addr = (unsigned long *)bit_map; ++ ++ for_each_set_bit_from(bit_idx, addr, BITS_PER_CAP_BIT_MAP) { ++ offset = base_off + UB_SLICE_SZ * (bit_idx - 1); ++ rsize = UB_SLICE_SZ; ++ vfio_copy_config_space_slice(vdev, offset, rsize); ++ } ++} ++ ++static void vfio_copy_config_space(VFIOUBDevice *vdev) ++{ ++ size_t offset; ++ size_t rsize; ++ uint64_t emulated_offset; ++ UbCfg0Basic *cfg0_basic; ++ UbCfg1Basic *cfg1_basic; ++ ++ /* copy cfg0 */ ++ offset = UB_CFG0_BASIC_START; ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_START, true); ++ rsize = UB_SLICE_SZ; ++ vfio_copy_config_space_slice(vdev, offset, rsize); // cfg0 basic ++ cfg0_basic = (UbCfg0Basic *)(vdev->udev.config + emulated_offset); ++ vfio_copy_config_space_caps(vdev, cfg0_basic->cap_bitmap, ++ UB_CFG0_BASIC_START + UB_SLICE_SZ); // cfg0 caps ++ ++ /* copy cfg1 */ ++ offset = UB_CFG1_BASIC_START; ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ vfio_copy_config_space_slice(vdev, offset, rsize); // cfg1 basic ++ cfg1_basic = (UbCfg1Basic *)(vdev->udev.config + emulated_offset); ++ vfio_copy_config_space_caps(vdev, cfg1_basic->cap_bitmap, ++ UB_CFG1_BASIC_START + UB_SLICE_SZ); // cfg1 caps ++} ++ ++static void vfio_cfg1_idev_ubba_init(VFIOUBDevice *vdev, Error **errp) ++{ ++ int i; ++ uint64_t emulated_offset; ++ hwaddr addr; ++ UbCfg1Basic *cfg1_basic = NULL; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic = (UbCfg1Basic *)(vdev->udev.config + emulated_offset); ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ /* if config1:feat.ers0s is 0, no need init ubba? */ ++ addr = ub_idev_ers_alloc_address_space(cfg1_basic->ers_space_size[i], ++ cfg1_basic->sys_pgs); ++ if (addr == UINT64_MAX) { ++ error_setg(errp, "failed to alloc address space for idev."); ++ return; ++ } ++ ++ qemu_log("vfio-ub idev(%s) ers[%d] alloc ubba: 0x%lx," ++ "host ubba: 0x%lx.\n", ++ vdev->vbasedev.name, i, addr, cfg1_basic->ers_ubba[i]); ++ cfg1_basic->ers_ubba[i] = addr; ++ } ++} ++ ++static void vfio_cfg1_ubba_init(VFIOUBDevice *vdev, Error **errp) ++{ ++ uint64_t emulated_offset; ++ UbCfg1Basic *cfg1_basic, *emulated_cfg1_basic; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic = (UbCfg1Basic *)(vdev->udev.config + emulated_offset); ++ emulated_cfg1_basic = (UbCfg1Basic *)(vdev->emulated_config_bits + emulated_offset); ++ memset(&emulated_cfg1_basic->ers_ubba, 0xff, ++ sizeof(emulated_cfg1_basic->ers_ubba)); ++ ++ if (vdev->udev.dev_type == UB_TYPE_DEVICE) { ++ /* clear UBBA from vfio, the value of UB_TYPE_DEVICE should come from guestOS */ ++ memset(&cfg1_basic->ers_ubba, 0, sizeof(cfg1_basic->ers_ubba)); ++ } else if (vdev->udev.dev_type == UB_TYPE_IDEVICE) { ++ /* init ubba for idev */ ++ vfio_cfg1_idev_ubba_init(vdev, errp); ++ } ++} ++ ++static void vfio_cfg1_feature_init(VFIOUBDevice *vdev) ++{ ++ UbCfg1Basic *cfg1_basic, *emulated_cfg1_basic; ++ Cfg1SupportFeature *feat, *emulated_feat; ++ uint64_t emulated_offset; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic = (UbCfg1Basic *)(vdev->udev.config + emulated_offset); ++ emulated_cfg1_basic = (UbCfg1Basic *)(vdev->emulated_config_bits + emulated_offset); ++ feat = &cfg1_basic->support_feature; ++ emulated_feat = &emulated_cfg1_basic->support_feature; ++ feat->bits.ubbas = 1; /* UBBA must exist in VM for mmio mmap */ ++ emulated_feat->bits.ubbas = 1; ++} ++ ++static void vfio_cfg1_init(VFIOUBDevice *vdev, Error **errp) ++{ ++ uint64_t emulated_offset; ++ UbCfg1IntType2Cap *emulate_cfg1_int_cap = NULL; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2, true); ++ emulate_cfg1_int_cap = (UbCfg1IntType2Cap *)(vdev->emulated_config_bits + emulated_offset); ++ emulate_cfg1_int_cap->interrupt_id = ~0U; ++ emulate_cfg1_int_cap->interrupt_enable = ~0; ++ emulate_cfg1_int_cap->interrupt_mask = ~0; ++ vfio_cfg1_feature_init(vdev); ++ vfio_cfg1_ubba_init(vdev, errp); ++} ++ ++static void vfio_cfg0_init(VFIOUBDevice *vdev) ++{ ++ ConfigPortBasic *emulated_port_basic; ++ UbCfg0Basic *cfg0_basic, *emulated_cfg0_basic; ++ uint16_t port_num, port_idx; ++ size_t offset; ++ uint64_t emulated_offset; ++ ++ /* emulated feilds in cfg0 basic */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_START, true); ++ cfg0_basic = (UbCfg0Basic *)(vdev->udev.config + emulated_offset); ++ emulated_cfg0_basic = (UbCfg0Basic *)(vdev->emulated_config_bits + emulated_offset); ++ memset(&emulated_cfg0_basic->total_num_of_ue, 0xff, ++ sizeof(emulated_cfg0_basic->total_num_of_ue)); ++ cfg0_basic->total_num_of_ue = 1; // Only FE0 is supported in VM ++ memset(&emulated_cfg0_basic->total_num_of_port, 0xff, ++ sizeof(emulated_cfg0_basic->total_num_of_port)); ++ cfg0_basic->total_num_of_port = vdev->udev.port.port_num & UINT16_MASK; ++ memset(&emulated_cfg0_basic->guid, 0xff, sizeof(UbGuid)); ++ memcpy(&cfg0_basic->guid, &vdev->udev.guid, sizeof(UbGuid)); ++ memset(&emulated_cfg0_basic->eid, 0xff, sizeof(UbEid)); ++ memset(&cfg0_basic->eid, 0, sizeof(UbEid)); ++ memset(&emulated_cfg0_basic->fm_eid, 0xff, sizeof(UbEid)); ++ memset(&cfg0_basic->fm_eid, 0, sizeof(UbEid)); ++ emulated_cfg0_basic->net_addr_info.primary_cna = 0xffffff; ++ cfg0_basic->net_addr_info.primary_cna = 0; ++ emulated_cfg0_basic->ueid_low = ~0UL; ++ emulated_cfg0_basic->ueid_high = ~0UL; ++ emulated_cfg0_basic->ucna = ~0; ++ cfg0_basic->support_feature.bits.route_table_supported = 0; ++ emulated_cfg0_basic->support_feature.bits.route_table_supported = ~0; ++ /* port info need emulate */ ++ port_num = cfg0_basic->total_num_of_port; ++ for (port_idx = 0; port_idx < port_num; ++port_idx) { ++ offset = UB_PORT_SLICE_START + port_idx * UB_PORT_SZ; ++ emulated_offset = ub_cfg_offset_to_emulated_offset(offset, true); ++ emulated_port_basic = (ConfigPortBasic *)(vdev->emulated_config_bits + emulated_offset); ++ memset(emulated_port_basic, 0xff, sizeof(ConfigPortBasic)); ++ } ++} ++ ++static void vfio_emulate_bits_init(VFIOUBDevice *vdev, Error **errp) ++{ ++ if (!vdev) { ++ qemu_log("vfio init emulated bits fail, device null\n"); ++ return; ++ } ++ ++ vdev->config_size = ub_emulated_config_size(); ++ vdev->emulated_config_bits = g_malloc0(vdev->config_size); ++ vfio_cfg0_init(vdev); ++ vfio_cfg1_init(vdev, errp); ++} ++ ++static bool vfio_check_guid(VFIOUBDevice *vdev, Error **errp) ++{ ++ UBDevice *udev = &vdev->udev; ++ UbGuid *hguid = &vdev->host.guid; ++ ++ if (udev->guid.type != UB_GUID_TYPE_BUS_CONTROLLER && ++ udev->guid.type != UB_GUID_TYPE_IBUS_CONTROLLER) { ++ qemu_log("%s device type set error, expect: %u or %u, actual: %u\n", ++ udev->qdev.id, UB_GUID_TYPE_BUS_CONTROLLER, UB_GUID_TYPE_IBUS_CONTROLLER, udev->guid.type); ++ error_setg(errp, "%s device type set error, expect: %u or %u, actual: %u\n", ++ udev->qdev.id, UB_GUID_TYPE_BUS_CONTROLLER, UB_GUID_TYPE_IBUS_CONTROLLER, udev->guid.type); ++ return false; ++ } ++ ++ if (hguid->type != udev->guid.type || ++ hguid->vendor != udev->guid.vendor || ++ hguid->rsv != udev->guid.rsv || ++ hguid->device_id != udev->guid.device_id || ++ hguid->version != udev->guid.version) { ++ qemu_log("%s guid and host are not matching\n", udev->qdev.id); ++ error_setg(errp, "%s guid and host are not matching\n", udev->qdev.id); ++ return false; ++ } ++ return true; + } + + static void vfio_realize(UBDevice *udev, Error **errp) + { ++ VFIOUBDevice *vdev = VFIO_UB(udev); ++ VFIODevice *vbasedev = &vdev->vbasedev; ++ uint32_t id; ++ int ret; ++ uint8_t i; ++ uint32_t bus_instance_eid; ++ int bus_instance_type; ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ Error *err = NULL; ++ ++ if (!vfio_check_guid(vdev, errp)) { ++ return; ++ } ++ if (!vdev->vbasedev.sysfsdev) { ++ if (!ub_guid_initialized(&vdev->host.guid)) { ++ error_setg(errp, "No provided host device"); ++ error_append_hint(errp, "Use -device vfio-ub,host="GUID_STR_EXAMPLE ++ "or -device vfio-ub,sysfsdev=PATH_TO_DEVICE\n"); ++ return; ++ } ++ /* Obtain the actual dev id of the device using the guid through sysfs. */ ++ id = sysfs_get_dev_number_by_guid(&vdev->host.guid); ++ if (id == UINT32_MAX) { ++ ub_device_get_str_from_guid(&vdev->host.guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ error_setg(errp, "not found device, guid: %s\n", guid_str); ++ return; ++ } ++ vdev->vbasedev.sysfsdev = g_strdup_printf("/sys/bus/ub/devices/%05x", id); ++ ++ /* ub device get bus instance eid&type */ ++ bus_instance_eid = sysfs_get_ub_device_bus_instance_eid(vdev->vbasedev.sysfsdev); ++ bus_instance_type = sysfs_get_bus_instance_type_by_eid(bus_instance_eid); ++ if (!UBUS_INSTANCE_IS_DYNAMIC(bus_instance_type)) { ++ error_setg(errp, "ub device(%s) not bind to dynamic bus instance\n", vdev->vbasedev.sysfsdev); ++ return; ++ } ++ ++ udev->bus_instance_eid = bus_instance_eid; ++ ret = udev->bus_instance_verify(udev, errp); ++ if (ret) { ++ qemu_log("vfio ub device bus instance verify failed\n"); ++ return; ++ } ++ udev->host_dev = true; ++ } ++ qemu_log("sysfsdev %s\n", vdev->vbasedev.sysfsdev); ++ ++ vdev->vbasedev.name = g_path_get_basename(vdev->vbasedev.sysfsdev); ++ if (ub_device_check_ummu_is_nested(udev) && !vdev->vbasedev.iommufd) { ++ error_setg(errp, "iommufd is require for nested ummu."); ++ goto error; ++ } ++ ++ ret = vfio_attach_device(vdev->vbasedev.name, &vdev->vbasedev, ++ ub_device_iommu_address_space(udev), errp); ++ if (ret) { ++ goto error; ++ } ++ ++ vfio_populate_device(vdev, &err); ++ if (err) { ++ error_propagate(errp, err); ++ goto error; ++ } ++ ++ /* Get a copy of config space */ ++ vfio_copy_config_space(vdev); ++ ++ /* Get dev type frome cfg1Basic and guid */ ++ udev->dev_type = ub_dev_get_type(udev); ++ ++ /* vfio emulates a lot for us, but some bits need extra love */ ++ vfio_emulate_bits_init(vdev, &err); ++ if (err) { ++ error_propagate(errp, err); ++ goto error; ++ } ++ ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ vfio_ers_prepare(vdev, i); ++ } ++ ++ vfio_usi_early_setup(vdev, &err); ++ if (err) { ++ error_propagate(errp, err); ++ goto error; ++ } ++ ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ vfio_ers_register(vdev, i); ++ } ++ ++ if (!vbasedev->mdev && vbasedev->iommufd) { ++ ret = ub_device_set_iommu_device(udev, vbasedev->hiod, errp); ++ if (ret) { ++ error_prepend(errp, "Failed to set iommu_device: "); ++ goto out_teardown; ++ } ++ } ++ ++ ret = vfio_add_capabilities(vdev, errp); ++ if (ret) { ++ goto out_unset_idev; ++ } ++ ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ vfio_ers_quirk_setup(vdev, i); ++ } ++ ++ return; ++out_unset_idev: ++ ub_device_unset_iommu_device(udev); ++out_teardown: ++ vfio_ers_exit(vdev); ++ ++error: ++ error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); ++} ++ ++static void vfio_cfg1_idev_ubba_deinit(UBDevice *udev) ++{ ++ int i; ++ UbCfg1Basic *cfg1_basic = NULL; ++ uint64_t emulated_offset; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic = (UbCfg1Basic *)(udev->config + emulated_offset); ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ ub_idev_ers_free_address_space(cfg1_basic->ers_ubba[i]); ++ } ++} ++ ++static void vfio_teardown_usi(VFIOUBDevice *vdev) ++{ ++ if (vdev->usi) { ++ usi_uninit(&vdev->udev, vdev->ers[VFIO_UB_REGION0_INDEX].mr); ++ } + } + + static void vfio_exitfn(UBDevice *udev) + { ++ VFIOUBDevice *vdev = VFIO_UB(udev); ++ ++ if (udev->dev_type == UB_TYPE_IDEVICE) { ++ vfio_cfg1_idev_ubba_deinit(udev); ++ } ++ vfio_disable_interrupts(vdev); ++ ++ vfio_teardown_usi(vdev); ++ vfio_ers_exit(vdev); ++ ub_device_unset_iommu_device(udev); ++} ++ ++static void vfio_disable_interrupts(VFIOUBDevice *vdev) ++{ ++ vfio_usi_disable(vdev); ++} ++ ++static void vfio_usi_enable(VFIOUBDevice *vdev) ++{ ++ vfio_disable_interrupts(vdev); ++ ++ vdev->usi_vectors = g_new0(VFIOUSIVector, vdev->usi->vec_table_num); ++ usi_set_vector_notifiers(&vdev->udev, vfio_usi_vector_use, vfio_usi_vector_release, NULL); ++ qemu_log("vfio ub device(%s %s) usi enable, vectors %d," ++ "vec_table_num %u, addr_table_num %u.\n", ++ vdev->udev.name, vdev->udev.qdev.id, vdev->nr_vectors, ++ vdev->usi->vec_table_num, vdev->usi->addr_table_num); ++} ++ ++static void vfio_remove_kvm_usi_virq(VFIOUSIVector *vector) ++{ ++ kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, ++ vector->virq); ++ kvm_irqchip_release_virq(kvm_state, vector->virq); ++ vector->virq = -1; ++ event_notifier_cleanup(&vector->kvm_interrupt); ++} ++ ++static void vfio_usi_disable_common(VFIOUBDevice *vdev) ++{ ++ int i; ++ VFIOUSIVector *vector = NULL; ++ ++ for (i = 0; i < vdev->nr_vectors; i++) { ++ vector = &vdev->usi_vectors[i]; ++ if (!vector->use) { ++ continue; ++ } ++ ++ if (vector->virq >= 0) { ++ vfio_remove_kvm_usi_virq(vector); ++ } ++ ++ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), ++ NULL, NULL, NULL); ++ event_notifier_cleanup(&vector->interrupt); ++ } ++ ++ g_free(vdev->usi_vectors); ++ vdev->usi_vectors = NULL; ++ vdev->nr_vectors = 0; ++} ++ ++static void vfio_usi_disable(VFIOUBDevice *vdev) ++{ ++ usi_unset_vector_notifiers(&vdev->udev); ++ ++ if (vdev->nr_vectors) { ++ vfio_disable_irqindex(&vdev->vbasedev, VFIO_UB_MSIX_IRQ_INDEX); ++ } ++ qemu_log("vfio ub device(%s %s) usi disable, vectors %d.\n", ++ vdev->udev.name, vdev->udev.qdev.id, vdev->nr_vectors); ++ vfio_usi_disable_common(vdev); + } + + static void vfio_ub_read_config(UBDevice *dev, uint64_t offset, + uint32_t *val, uint32_t dw_mask) + { ++ VFIOUBDevice *vdev = VFIO_UB(dev); ++ uint32_t emu_bits = 0; ++ uint32_t emu_val = 0; ++ uint32_t phys_val = 0; ++ uint64_t emulated_offset; ++ ++ /* emu_bits bit_n: 0 means need read value from phy dev; 1 means read value from emulated config space */ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(offset, false); ++ if (emulated_offset != UINT64_MAX) { ++ emu_bits = *(uint32_t *)(vdev->emulated_config_bits + emulated_offset); ++ if (emu_bits) { ++ ub_default_read_config(dev, offset, &emu_val, dw_mask); ++ } ++ } ++ ++ if (pread(vdev->vbasedev.fd, &phys_val, DWORD_SIZE, vdev->config_offset + offset) ++ != DWORD_SIZE) { ++ qemu_log("read value from phys dev: %s, addr: 0x%lx failed\n", vdev->vbasedev.name, offset); ++ *val = 0; ++ return; ++ } ++ phys_val &= dw_mask; ++ *val = (emu_val & emu_bits) | (phys_val & ~emu_bits); ++ trace_vfio_ub_read_config(offset, emu_val, phys_val, *val); ++} ++ ++static void vfio_sub_page_ers_update_mapping(UBDevice *udev, int ers_id) ++{ ++ /* do nothing now */ + } + + static void vfio_ub_write_config(UBDevice *dev, uint64_t offset, + uint32_t *val, uint32_t dw_mask) + { ++ VFIOUBDevice *vdev = VFIO_UB(dev); ++ uint32_t write_val = *val; ++ uint32_t phys_val; ++ int is_enabled, was_enabled, is_masked, was_masked; ++ ++ /* get old phys_val */ ++ if (pread(vdev->vbasedev.fd, &phys_val, DWORD_SIZE, ++ vdev->config_offset + offset) ++ != DWORD_SIZE) { ++ qemu_log("read value from phys dev: %s, addr: 0x%lx failed\n", ++ vdev->vbasedev.name, offset); ++ return; ++ } ++ trace_vfio_ub_write_config(offset, *val, dw_mask, phys_val); ++ ++ phys_val = (write_val & dw_mask) | (phys_val & ~dw_mask); ++ /* update value to phy config space */ ++ if (pwrite(vdev->vbasedev.fd, &phys_val, DWORD_SIZE, vdev->config_offset + offset) ++ != DWORD_SIZE) { ++ qemu_log("write value to phys dev: %s, addr: 0x%lx failed\n", ++ vdev->vbasedev.name, offset); ++ return; ++ } ++ ++ /* check whether the UBBA is updated by GuestOS */ ++ if (ranges_overlap(offset, DWORD_SIZE, ++ UB_CFG1_BASIC_START + offsetof(UbCfg1Basic, ers_ubba), ++ UB_NUM_REGIONS * sizeof(uint64_t))) { ++ uint64_t old_addr[UB_NUM_REGIONS]; ++ int ers_id; ++ ++ for (ers_id = 0; ers_id < UB_NUM_REGIONS; ers_id++) { ++ old_addr[ers_id] = dev->io_regions[ers_id].addr; ++ } ++ ++ /* update cfg */ ++ ub_default_write_config(dev, offset, val, dw_mask); ++ for (ers_id = 0; ers_id < UB_NUM_REGIONS; ers_id++) { ++ trace_vfio_ub_write_config_ioregion(ers_id, old_addr[ers_id], ++ dev->io_regions[ers_id].addr, ++ dev->io_regions[ers_id].size); ++ trace_vfio_ub_write_config_ers(ers_id, vdev->ers[ers_id].region.fd_offset, ++ vdev->ers[ers_id].region.size, ++ vdev->ers[ers_id].region.flags, ++ qemu_real_host_page_size()); ++ if (old_addr[ers_id] != dev->io_regions[ers_id].addr && ++ vdev->ers[ers_id].region.size > 0 && ++ vdev->ers[ers_id].region.size < qemu_real_host_page_size()) { ++ vfio_sub_page_ers_update_mapping(dev, ers_id); ++ } ++ } ++ ++ /* check whether the INT CAP Enable is update */ ++ } else if (ranges_overlap(offset, DWORD_SIZE, ++ UB_CFG1_CAP4_INT_TYPE2_ENABLE_OFFSET, DWORD_SIZE)) { ++ was_enabled = usi_enabled(dev); ++ ub_default_write_config(dev, offset, val, dw_mask); ++ is_enabled = usi_enabled(dev); ++ trace_vfio_ub_write_config_int_cap_en(*val, was_enabled, is_enabled); ++ if (is_enabled && !was_enabled) { ++ vfio_usi_enable(vdev); ++ } else if (was_enabled && !is_enabled) { ++ vfio_usi_disable(vdev); ++ } ++ ++ /* check whether the INT CAP FE Mask is update */ ++ } else if (ranges_overlap(offset, DWORD_SIZE, ++ UB_CFG1_CAP4_INT_TYPE2_MASK_OFFSET, DWORD_SIZE)) { ++ was_masked = usi_ue_is_masked(dev); ++ ub_default_write_config(dev, offset, val, dw_mask); ++ is_masked = usi_ue_is_masked(dev); ++ trace_vfio_ub_write_config_int_cap_mask(*val, was_masked, is_masked); ++ usi_handle_ue_mask_update(dev, was_masked); ++ } else { ++ /* sync phy config space and write emulated value to emulated config space */ ++ ub_default_write_config(dev, offset, val, dw_mask); ++ } + } + + static void vfio_ub_dev_class_init(ObjectClass *klass, void *data) +@@ -108,10 +1243,21 @@ static void vfio_ub_dev_class_init(ObjectClass *klass, void *data) + + static void vfio_instance_init(Object *obj) + { ++ VFIOUBDevice *vdev = VFIO_UB(obj); ++ VFIODevice *vbasedev = &vdev->vbasedev; ++ ++ memset(&vdev->host, 0, sizeof(vdev->host)); ++ vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_UB, &vfio_ub_ops, ++ DEVICE(vdev), false); + } + + static void vfio_instance_finalize(Object *obj) + { ++ VFIOUBDevice *vdev = VFIO_UB(obj); ++ ++ g_free(vdev->emulated_config_bits); ++ g_free(vdev->usi); ++ g_free(vdev->usi_vectors); + } + + static const TypeInfo vfio_ub_dev_info = { +-- +2.33.0 + diff --git a/ub-realize-more-for-ubbus-and-realize-ub-ers-update.patch b/ub-realize-more-for-ubbus-and-realize-ub-ers-update.patch new file mode 100644 index 0000000000000000000000000000000000000000..21f86155567ad3daaf49dfe41b836ddce5c43ac0 --- /dev/null +++ b/ub-realize-more-for-ubbus-and-realize-ub-ers-update.patch @@ -0,0 +1,176 @@ +From 6271dab4f6386e128ae61423350cfa19edc0cb85 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 21:57:34 +0800 +Subject: [PATCH 6/7] ub: realize more for ubbus and realize ub ers update + +1. realize ubbus_dev_print and ubbus_get_dev_patch +2. support update mapping for ub device + +Signed-off-by: caojinhuahw +--- + hw/ub/trace-events | 4 ++ + hw/ub/ub.c | 113 ++++++++++++++++++++++++++++++++++++++++++++- + 2 files changed, 115 insertions(+), 2 deletions(-) + +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index 1fd621243b..d24c754de1 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -1,3 +1,7 @@ ++# ub.c ++ub_update_mappings(int i, uint64_t region_size, uint64_t old_addr, uint64_t new_addr) "region[%d], size: 0x%lx, old_addr: 0x%lx, new_addr: 0x%lx" ++ub_update_mappings_add(uint64_t new_addr) "commit region addr to 0x%lx" ++ + # ub_enum.c + enum_query_set_rsp_port_num(uint32_t num) "tlv total num ports is %u" + handle_enum_query_request(uint32_t hops, uint32_t opcode, uint32_t idx_start, uint32_t num_ports, uint32_t max_num_ports, char *guid) "hops: %u, opcode: %u, port_idx_start:%u, rsp_num_ports:%u, max_num_ports:%u guid %s" +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 2b797dcf60..21481b950c 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -33,17 +33,49 @@ + #include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" + #include "migration/vmstate.h" +- ++#include "exec/address-spaces.h" ++#include "monitor/monitor.h" ++#include "trace.h" + + QLIST_HEAD(, BusControllerState) ub_bus_controllers; + + static void ubbus_dev_print(Monitor *mon, DeviceState *dev, int indent) + { ++ UBDevice *udev = (UBDevice *)dev; ++ uint64_t offset0 = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_START, true); ++ uint64_t offset1 = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ UbCfg0Basic *cfg0 = (UbCfg0Basic *)(udev->config + offset0); ++ UbCfg1Basic *cfg1 = (UbCfg1Basic *)(udev->config + offset1); ++ UBIORegion *r; ++ int i; ++ ++ monitor_printf(mon, "%*sGUID:vendor 0x%x Class 0x%x Type 0x%x " ++ "DevId 0x%x Ver 0x%x SN 0x%lx\n", ++ indent, "", cfg0->guid.vendor, cfg1->class_code, ++ cfg0->guid.type, cfg0->guid.device_id, cfg0->guid.version, ++ (unsigned long)cfg0->guid.seq_num); ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ r = &udev->io_regions[i]; ++ if (!r->size) { ++ continue; ++ } ++ monitor_printf(mon, "%*sers %d: mem at 0x%"PRIx64 ++ " [0x%"PRIx64"]\n", ++ indent, "", i, ++ r->addr, r->addr + r->size - 1); ++ } + } + + static char *ubbus_get_dev_path(DeviceState *dev) + { +- return NULL; ++ UBDevice *udev = (UBDevice *)dev; ++ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_START, true); ++ UbCfg0Basic *cfg0 = (UbCfg0Basic *)(udev->config + offset); ++ char *path = g_malloc(UB_DEV_GUID_STRING_LENGTH + 1); ++ ++ ub_device_get_str_from_guid(&cfg0->guid, path, UB_DEV_GUID_STRING_LENGTH + 1); ++ ++ return path; + } + + static char *ubbus_get_fw_dev_path(DeviceState *dev) +@@ -294,6 +326,72 @@ void ub_default_read_config(UBDevice *dev, uint64_t offset, + *val = read_data & dw_mask; + } + ++static uint64_t ub_er_address(UBDevice *dev, uint8_t ers, uint64_t size) ++{ ++ uint64_t new_addr, last_addr; ++ UbCfg1Basic *cfg1_basic; ++ uint64_t emulated_offset; ++ ++ if (ers > UB_NUM_REGIONS) { ++ qemu_log("invalid ers %u\n", ers); ++ return UB_ER_UNMAPPED; ++ } ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic = (UbCfg1Basic *)(dev->config + emulated_offset); ++ if (!cfg1_basic->dev_rs_access_en) { ++ return UB_ER_UNMAPPED; ++ } ++ ++ new_addr = cfg1_basic->ers_ubba[ers]; ++ new_addr &= ~(size -1); ++ last_addr = new_addr + size - 1; ++ /* NOTE: we do not support wrapping */ ++ if (last_addr <= new_addr || last_addr == UB_ER_UNMAPPED) { ++ return UB_ER_UNMAPPED; ++ } ++ ++ return new_addr; ++} ++ ++static void ub_update_mappings(UBDevice *dev) ++{ ++ UBIORegion *region; ++ uint64_t new_addr; ++ uint8_t i; ++ ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ region = &dev->io_regions[i]; ++ ++ /* this region isn't registered */ ++ if (!region->size) { ++ continue; ++ } ++ ++ new_addr = ub_er_address(dev, i, region->size); ++ trace_ub_update_mappings(i, region->size, region->addr, new_addr); ++ if (new_addr == UB_ER_UNMAPPED) { ++ continue; ++ } ++ ++ /* This ers isn't changed */ ++ if (new_addr == region->addr) { ++ continue; ++ } ++ ++ if (region->addr != UB_ER_UNMAPPED) { ++ memory_region_del_subregion(region->address_space, region->memory); ++ } ++ ++ region->addr = new_addr; ++ if (region->addr != UB_ER_UNMAPPED) { ++ trace_ub_update_mappings_add(region->addr); ++ memory_region_add_subregion_overlap(region->address_space, ++ region->addr, region->memory, 1); ++ } ++ } ++} ++ + void ub_default_write_config(UBDevice *dev, uint64_t offset, + uint32_t *val, uint32_t dw_mask) + { +@@ -314,6 +412,17 @@ void ub_default_write_config(UBDevice *dev, uint64_t offset, + dw_w1cmask = *(uint32_t *)(dev->w1cmask + emulated_offset) & dw_mask; + *dst_data = (*dst_data & ~dw_wmask) | (write_data & dw_wmask); + *dst_data &= ~(write_data & dw_w1cmask); ++ ++ if (ranges_overlap(offset, DWORD_SIZE, ++ UB_CFG1_BASIC_START + offsetof(UbCfg1Basic, ers_ubba), ++ UB_NUM_REGIONS * sizeof(uint64_t)) && write_data != UINT32_MAX) { ++ ub_update_mappings(dev); ++ } ++ ++ /* for idev update mapping */ ++ if (ranges_overlap(offset, DWORD_SIZE, UB_CFG1_DEV_RS_ACCESS_EN_OFFSET, DWORD_SIZE)) { ++ ub_update_mappings(dev); ++ } + } + + static UBDevice *do_ub_register_device(UBDevice *ub_dev, const char *name, Error **errp) +-- +2.33.0 + diff --git a/ub-realize-more-mcmd-process.patch b/ub-realize-more-mcmd-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..2552a8faec861da711603cdc7bd3d97e860d1eb2 --- /dev/null +++ b/ub-realize-more-mcmd-process.patch @@ -0,0 +1,148 @@ +From c7dfa82ab43a2f12a40d8e1ceeb3d63cc1306d8c Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 20:30:33 +0800 +Subject: [PATCH 4/6] ub: realize more mcmd process + +realize tct/tect cfgi mcmdq cmd + +Signed-off-by: caojinhuahw +--- + hw/ub/trace-events | 2 ++ + hw/ub/ub_ummu.c | 69 ++++++++++++++++++++++++++++++++++++++++++++-- + 2 files changed, 68 insertions(+), 3 deletions(-) + +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index 78182e2896..986dab9e48 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -6,6 +6,8 @@ mcmdq_process_task(uint32_t mcmdq_idx, const char *cmd) "mcmdq_idx: %u, cmd: %s" + mcmdq_cmd_sync_handler(uint32_t mcmdq_idx, uint64_t usi_addr, uint32_t usi_data) "CMD_SYNC: mcmdq_idx(%u) usi_addr(0x%lx) usi_data(0x%x)" + mcmdq_cmd_cfgi_tect_handler(uint32_t mcmdq_idx, uint32_t tecte_tag) "CMD_CFGI_TECT: mcmdq_idx(%u) tecte_tag(%u)" + mcmdq_cmd_cfgi_tect_range_handler(uint32_t mcmdq_idx, uint32_t tecte_tag, uint32_t range) "CMD_CFGI_TECT_RANGE: mcmdq_idx(%u) tecte_tag(%u) range(%u)" ++mcmdq_cmd_cfgi_tct_handler(uint32_t mcmdq_idx, uint32_t tecte_tag) "CMD_CFGI_TCT: mcmdq_idx(%u) tecte_tag(%u)" ++mcmdq_cmd_cfgi_tct_all_handler(uint32_t mcmdq_idx) "CMD_CFGI_TCT_ALL: mcmdq_idx(%u)" + mcmdq_cmd_plbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_PLBIx: mcmdq_idx(%u) cmd(%s)" + mcmdq_cmd_tlbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_TLBIx: mcmdq_idx(%u) cmd(%s)" + mcmdq_cmd_create_kvtbl(uint32_t mcmdq_idx, uint32_t dest_eid, uint32_t tecte_tag) "CMD_CREATE_KVTBL: mcmdq_idx(%u) dest_eid(%u) tecte_tag(%u)" +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index a55ab00e96..db5e6583f8 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -573,6 +573,61 @@ static void mcmdq_cmd_cfgi_tect_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t + ummu_invalidate_cache(u, cmd); + } + ++static void mcmdq_cmd_cfgi_tect_range_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ uint32_t tecte_tag = CMD_TECTE_TAG(cmd); ++ uint8_t range = CMD_TECTE_RANGE(cmd); ++ uint32_t mask; ++ int i; ++ UMMUTecteRange tecte_range = { .invalid_all = false, }; ++ ++ trace_mcmdq_cmd_cfgi_tect_range_handler(mcmdq_idx, tecte_tag, range); ++ ++ if (CMD_TECTE_RANGE_INVILID_ALL(range)) { ++ tecte_range.invalid_all = true; ++ } else { ++ mask = (1ULL << (range + 1)) - 1; ++ tecte_range.start = tecte_tag & ~mask; ++ tecte_range.end = tecte_range.start + mask; ++ } ++ ++ g_hash_table_foreach_remove(u->configs, ummu_invalid_tecte, &tecte_range); ++ ummu_invalidate_cache(u, cmd); ++ ++ if (tecte_range.invalid_all && u->tecte_tag_num > 0) { ++ for (i = u->tecte_tag_num - 1; i >= 0; i--) { ++ if (i >= UMMU_TECTE_TAG_MAX_NUM) { ++ continue; ++ } ++ ummu_config_tecte(u, u->tecte_tag_cache[i]); ++ } ++ u->tecte_tag_num = 0; ++ return; ++ } ++ ++ for (i = tecte_range.start; i <= tecte_range.end; i++) { ++ ummu_config_tecte(u, i); ++ } ++} ++ ++static void mcmdq_cmd_cfgi_tct_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ uint32_t tecte_tag = CMD_TECTE_TAG(cmd); ++ ++ trace_mcmdq_cmd_cfgi_tct_handler(mcmdq_idx, tecte_tag); ++ ++ ummu_invalid_single_tecte(u, tecte_tag); ++ ummu_invalidate_cache(u, cmd); ++} ++ ++static void mcmdq_cmd_cfgi_tct_all_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ trace_mcmdq_cmd_cfgi_tct_all_handler(mcmdq_idx); ++ ++ /* cfgi_tct & cfgi_tct_all process is the same */ ++ mcmdq_cmd_cfgi_tct_handler(u, cmd, mcmdq_idx); ++} ++ + static void ummu_viommu_invalidate_cache(IOMMUFDViommu *viommu, uint32_t type, UMMUMcmdqCmd *cmd) + { + int ret; +@@ -611,11 +666,13 @@ static void ummu_invalidate_cache(UMMUState *u, UMMUMcmdqCmd *cmd) + static void mcmdq_cmd_plbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) + { + trace_mcmdq_cmd_plbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]); ++ ummu_invalidate_cache(u, cmd); + } + + static void mcmdq_cmd_tlbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) + { + trace_mcmdq_cmd_tlbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]); ++ ummu_invalidate_cache(u, cmd); + } + + static void mcmdq_check_pa_continuity_fill_result(UMMUMcmdQueue *mcmdq, bool continuity) +@@ -686,9 +743,9 @@ static void (*mcmdq_cmd_handlers[])(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcm + [CMD_STALL_RESUME] = NULL, + [CMD_PREFET_CFG] = mcmdq_cmd_prefet_cfg, + [CMD_CFGI_TECT] = mcmdq_cmd_cfgi_tect_handler, +- [CMD_CFGI_TECT_RANGE] = NULL, +- [CMD_CFGI_TCT] = NULL, +- [CMD_CFGI_TCT_ALL] = NULL, ++ [CMD_CFGI_TECT_RANGE] = mcmdq_cmd_cfgi_tect_range_handler, ++ [CMD_CFGI_TCT] = mcmdq_cmd_cfgi_tct_handler, ++ [CMD_CFGI_TCT_ALL] = mcmdq_cmd_cfgi_tct_all_handler, + [CMD_CFGI_VMS_PIDM] = NULL, + [CMD_PLBI_OS_EID] = mcmdq_cmd_plbi_x_process, + [CMD_PLBI_OS_EIDTID] = mcmdq_cmd_plbi_x_process, +@@ -904,9 +961,11 @@ static void ummu_process_mapt_cmd(UMMUState *u, MAPTCmdqBase *base, MAPTCmd *cmd + { + uint32_t type = MAPT_UCMD_TYPE(cmd); + MAPTCmdCpl cpl; ++ UMMUMcmdqCmd mcmd_cmd = { 0 }; + uint16_t tecte_tag; + uint32_t tid; + ++ mcmd_cmd.word[0] = CMD_PLBI_OS_EID; + /* default set cpl staus invalid */ + ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_INVALID, 0); + tecte_tag = ummu_mapt_cmdq_base_get_tecte_tag(base); +@@ -919,9 +978,13 @@ static void ummu_process_mapt_cmd(UMMUState *u, MAPTCmdqBase *base, MAPTCmd *cmd + break; + case MAPT_UCMD_TYPE_PLBI_USR_ALL: + qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PLBI_USR_ALL.\n"); ++ ummu_mcmdq_construct_plbi_os_eidtid(&mcmd_cmd, tid, tecte_tag); ++ ummu_invalidate_cache(u, &mcmd_cmd); + break; + case MAPT_UCMD_TYPE_PLBI_USR_VA: + qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PLBI_USR_VA.\n"); ++ ummu_plib_usr_va_to_pibi_os_va(cmd, &mcmd_cmd, tid, tecte_tag); ++ ummu_invalidate_cache(u, &mcmd_cmd); + break; + default: + qemu_log("unknown mapt cmd type: 0x%x\n", type); +-- +2.33.0 + diff --git a/ub-realize-ub-config-space-msg-process.patch b/ub-realize-ub-config-space-msg-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..998300610c561c57f864febad01edd5dc5557a6f --- /dev/null +++ b/ub-realize-ub-config-space-msg-process.patch @@ -0,0 +1,204 @@ +From 7f4c0f6ee632d3917eadb4ef5dbd931d16bd4e12 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 20:17:50 +0800 +Subject: [PATCH 2/7] ub: realize ub config space msg process + +process ub device config space read/write + +Signed-off-by: caojinhuahw +--- + hw/ub/ub_config.c | 162 +++++++++++++++++++++++++++++++++++++++++++++ + include/hw/ub/ub.h | 4 ++ + 2 files changed, 166 insertions(+) + +diff --git a/hw/ub/ub_config.c b/hw/ub/ub_config.c +index 25307cba19..48598a0230 100644 +--- a/hw/ub/ub_config.c ++++ b/hw/ub/ub_config.c +@@ -132,6 +132,168 @@ uint64_t ub_cfg_offset_to_emulated_offset(uint64_t offset, bool check_success) + return emulate_offset; + } + ++static uint32_t ub_dev_get_cna(UBDevice *dev) ++{ ++ uint64_t emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_NA_INFO_START, true); ++ ConfigNetAddrInfo *net_addr_info = (ConfigNetAddrInfo *)(dev->config + emulated_offset); ++ uint32_t dev_cna = net_addr_info->primary_cna; ++ return dev_cna; ++} ++ ++static UBDevice *ub_find_device_by_cna(UBBus *bus, uint32_t dcna) ++{ ++ UBDevice *dev; ++ ++ QLIST_FOREACH(dev, &bus->devices, node) { ++ if (ub_dev_get_cna(dev) == dcna) { ++ return dev; ++ } ++ } ++ ++ return NULL; ++} ++ ++static void ub_cfg_msg_fill_cq_rq(BusControllerState *s, HiMsgSqe *sqe, MsgPktHeader *header, ++ CfgMsgPkt *rsp_pkt) ++{ ++ HiMsgCqe cqe; ++ uint32_t pi; ++ ++ memset(&cqe, 0, sizeof(cqe)); ++ cqe.type = MSG_RSP; ++ cqe.msg_code = UB_MSG_CODE_CFG; ++ cqe.sub_msg_code = header->msgetah.sub_msg_code; ++ rsp_pkt->header.msgetah.type = MSG_RSP; ++ rsp_pkt->header.msgetah.sub_msg_code = header->msgetah.sub_msg_code; ++ ++ rsp_pkt->header.nth.scna = header->nth.dcna; ++ rsp_pkt->header.nth.dcna = header->nth.scna; ++ rsp_pkt->header.deid = EID_GEN(header->seid_h, header->seid_l); ++ rsp_pkt->header.seid_h = EID_HIGH(header->deid); ++ rsp_pkt->header.seid_l = EID_LOW(header->deid); ++ cqe.msn = sqe->msn; ++ cqe.p_len = MSG_CFG_PKT_SIZE; ++ pi = fill_rq(s, rsp_pkt, sizeof(CfgMsgPkt)); ++ if (pi == UINT32_MAX) { ++ qemu_log("fill rq failed!\n"); ++ return; ++ } ++ cqe.status = CQE_SUCCESS; ++ cqe.rq_pi = pi; ++ (void)fill_cq(s, &cqe); ++} ++ ++static uint32_t get_dw_mask(uint8_t byte_enable) ++{ ++ uint32_t dw_mask = 0; ++ uint32_t bt_mask = 0xff; ++ ++ byte_enable &= 0x0f; // for dword, only lower four bits are valid ++ while (byte_enable) { ++ if (byte_enable & 0x1) { ++ dw_mask |= bt_mask; ++ } ++ bt_mask <<= BITS_PER_BYTE; ++ byte_enable >>= 1; ++ } ++ return dw_mask; ++} ++ ++static void ub_cfg_rw(BusControllerState *s, HiMsgSqe *sqe, ++ MsgPktHeader *header) ++{ ++ CfgMsgPldReq *payload = (CfgMsgPldReq *)header->payload; ++ CfgMsgPkt rsp_pkt; ++ uint32_t local = sqe->local; ++ uint64_t cfg_offset = (uint64_t)payload->req_addr * DWORD_SIZE; ++ uint32_t entity = payload->entity_idx; ++ uint32_t dcna = header->nth.dcna; ++ UBDevice *ub_dev = NULL; ++ uint32_t dw_mask; ++ uint64_t emulated_offset; ++ ++ if (header->msgetah.plen != CFG_MSG_PLD_SIZE) { ++ qemu_log("invalid len %u, please check driver inside guestos\n", ++ header->msgetah.plen); ++ return; ++ } ++ memset(&rsp_pkt, 0, sizeof(CfgMsgPkt)); ++ memcpy(&rsp_pkt.header, header, sizeof(MsgPktHeader)); ++ ++ /* vm support only FE0(entity_idx = 0) */ ++ if (entity) { ++ qemu_log("vm support only FE0, entity idx: %u\n", entity); ++ rsp_pkt.header.msgetah.rsp_status = UB_MSG_RSP_REG_ATTR_MISMATCH; ++ goto fill_rq_cq; ++ } ++ /* ++ * TODO: Check whether dcna is the unique identifier of the device when the configuration space is read or written. ++ * local = 1: ubc or idev, dcna = 0: default to ubc ++ * local = 0: ub devices ++ */ ++ if (local && !dcna) { ++ ub_dev = UB_DEVICE(s->ubc_dev); ++ if (!ub_dev) { ++ qemu_log("ubc not config?\n"); ++ return; ++ } ++ } else { ++ ub_dev = ub_find_device_by_cna(s->bus, dcna); ++ if (!ub_dev) { ++ qemu_log("device not found. dcna %u\n", dcna); ++ return; ++ } ++ } ++ ++ rsp_pkt.header.msgetah.rsp_status = UB_MSG_RSP_SUCCESS; ++ if (cfg_offset >= ub_config_size()) { ++ rsp_pkt.header.msgetah.rsp_status = UB_MSG_RSP_INVALID_ADDR; ++ goto fill_rq_cq; ++ } ++ dw_mask = get_dw_mask(payload->byte_enable); ++ switch (header->msgetah.sub_msg_code) { ++ case UB_CFG0_READ: ++ case UB_CFG1_READ: ++ if (ub_dev->config_read) { ++ ub_dev->config_read(ub_dev, cfg_offset, &rsp_pkt.pld.rsp.read_data, dw_mask); ++ } else { ++ qemu_log("dev: %s read config func NULL\n", ub_dev->qdev.id); ++ } ++ break; ++ case UB_CFG0_WRITE: ++ case UB_CFG1_WRITE: ++ emulated_offset = ub_cfg_offset_to_emulated_offset(cfg_offset, false); ++ if (emulated_offset != UINT64_MAX && !*((uint32_t *)(&ub_dev->wmask[emulated_offset]))) { ++ rsp_pkt.header.msgetah.rsp_status = UB_MSG_RSP_REG_ATTR_MISMATCH; ++ qemu_log("register cannot be written.\n"); ++ goto fill_rq_cq; ++ } ++ if (ub_dev->config_write) { ++ ub_dev->config_write(ub_dev, cfg_offset, &payload->write_data, dw_mask); ++ } else { ++ qemu_log("dev: %s write config func NULL\n", ub_dev->qdev.id); ++ } ++ break; ++ default: ++ break; ++ } ++fill_rq_cq: ++ ub_cfg_msg_fill_cq_rq(s, sqe, header, &rsp_pkt); ++} ++ + void handle_msg_cfg(void *opaque, HiMsgSqe *sqe, void *payload) + { ++ BusControllerState *s = opaque; ++ MsgPktHeader *header = (MsgPktHeader *)payload; ++ MsgExtendedHeader *msgetah = &header->msgetah; ++ ++ if (msgetah->msg_code != UB_MSG_CODE_CFG || ++ msgetah->sub_msg_code >= UB_CFG_MAX_SUB_MSG_CODE) { ++ qemu_log("invalid msg code %u or sub msg code %u, " ++ "please check the driver inside guestos\n", ++ msgetah->msg_code, msgetah->sub_msg_code); ++ return; ++ } ++ ++ ub_cfg_rw(s, sqe, header); + } +\ No newline at end of file +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index 858824220c..b07cc36efd 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -228,5 +228,9 @@ void ub_default_write_config(UBDevice *dev, uint64_t offset, + uint32_t *val, uint32_t dw_mask); + UBDevice *ub_find_device_by_guid(UbGuid *guid); + int ub_dev_finally_setup(VirtMachineState *vms, Error **errp); ++static inline uint64_t ub_config_size(void) ++{ ++ return UB_DEV_CONFIG_SPACE_TOTAL_SIZE; ++} + UBDevice *ub_find_device_by_id(const char *id); + #endif +-- +2.33.0 + diff --git a/ub-realize-ub-fm-memory-region-ops.patch b/ub-realize-ub-fm-memory-region-ops.patch new file mode 100644 index 0000000000000000000000000000000000000000..6936627751b1a1d699dcdccfcd2c62a1a0ad80a3 --- /dev/null +++ b/ub-realize-ub-fm-memory-region-ops.patch @@ -0,0 +1,149 @@ +From 3e92cc3ff46785e1b9233f5f9d922757a655ceb1 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 19:01:56 +0800 +Subject: [PATCH 5/5] ub: realize ub fm memory region ops + +add ub fm memory region ops for read/write process + +Signed-off-by: caojinhuahw +--- + hw/ub/hisi/meson.build | 5 +++ + hw/ub/hisi/ub_fm.c | 70 ++++++++++++++++++++++++++++++++++++++ + hw/ub/ub_ubc.c | 5 ++- + include/hw/ub/hisi/ub_fm.h | 6 +++- + 4 files changed, 84 insertions(+), 2 deletions(-) + create mode 100644 hw/ub/hisi/ub_fm.c + +diff --git a/hw/ub/hisi/meson.build b/hw/ub/hisi/meson.build +index e69de29bb2..df07aae9e1 100644 +--- a/hw/ub/hisi/meson.build ++++ b/hw/ub/hisi/meson.build +@@ -0,0 +1,5 @@ ++ub_ss = ss.source_set() ++ub_ss.add(files( ++ 'ub_fm.c', ++)) ++system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss) +\ No newline at end of file +diff --git a/hw/ub/hisi/ub_fm.c b/hw/ub/hisi/ub_fm.c +new file mode 100644 +index 0000000000..7b7625621a +--- /dev/null ++++ b/hw/ub/hisi/ub_fm.c +@@ -0,0 +1,70 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "hw/ub/hisi/ub_fm.h" ++#include "qemu/log.h" ++ ++uint64_t ub_fm_msgq_reg_read(void *opaque, hwaddr addr, unsigned len) ++{ ++ BusControllerState *s = opaque; ++ uint64_t val; ++ ++ switch (len) { ++ case BYTE_SIZE: ++ val = ub_get_byte(s->fm_msgq_reg + addr); ++ break; ++ case WORD_SIZE: ++ val = ub_get_word(s->fm_msgq_reg + addr); ++ break; ++ case DWORD_SIZE: ++ val = ub_get_long(s->fm_msgq_reg + addr); ++ break; ++ default: ++ qemu_log("invalid argument len 0x%x\n", len); ++ val = ~0x0; ++ break; ++ } ++ ++ qemu_log("ub_fm_msgq_reg_read addr 0x%lx len 0x%x val 0x%lx\n", ++ addr, len, val); ++ return val; ++} ++ ++void ub_fm_msgq_reg_write(void *opaque, hwaddr addr, uint64_t val, unsigned len) ++{ ++ BusControllerState *s = opaque; ++ ++ switch (len) { ++ case BYTE_SIZE: ++ ub_set_byte(s->fm_msgq_reg + addr, val); ++ break; ++ case WORD_SIZE: ++ ub_set_word(s->fm_msgq_reg + addr, val); ++ break; ++ case DWORD_SIZE: ++ ub_set_long(s->fm_msgq_reg + addr, val); ++ break; ++ default: ++ /* As length is under guest control, handle illegal values. */ ++ qemu_log("invalid argument len 0x%x addr 0x%lx val 0x%lx\n", ++ len, addr, val); ++ return; ++ } ++ qemu_log("ub_fm_msgq_reg_write addr 0x%lx len 0x%x val 0x%lx\n", ++ addr, len, val); ++} +\ No newline at end of file +diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c +index 1023ec9deb..d28ba2024f 100644 +--- a/hw/ub/ub_ubc.c ++++ b/hw/ub/ub_ubc.c +@@ -29,6 +29,7 @@ + #include "hw/ub/ub_config.h" + #include "hw/ub/hisi/ubc.h" + #include "hw/ub/hisi/ub_mem.h" ++#include "hw/ub/hisi/ub_fm.h" + #include "migration/vmstate.h" + + static uint64_t ub_msgq_reg_read(void *opaque, hwaddr addr, unsigned len) +@@ -83,7 +84,9 @@ static const MemoryRegionOps ub_msgq_reg_ops = { + }; + + static const MemoryRegionOps ub_fm_msgq_reg_ops = { +- ++ .read = ub_fm_msgq_reg_read, ++ .write = ub_fm_msgq_reg_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, + }; + + static void ub_reg_alloc(DeviceState *dev) +diff --git a/include/hw/ub/hisi/ub_fm.h b/include/hw/ub/hisi/ub_fm.h +index bd606227a6..93255e47d8 100644 +--- a/include/hw/ub/hisi/ub_fm.h ++++ b/include/hw/ub/hisi/ub_fm.h +@@ -18,8 +18,12 @@ + #ifndef UB_HISI_FM_H + #define UB_HISI_FM_H + #include "hw/ub/hisi/ubc.h" ++#include "hw/qdev-core.h" ++#include "hw/ub/ub_common.h" + + #define FM_MSGQ_REG_OFFSET (UBC_MSGQ_REG_OFFSET + UBC_MSGQ_REG_SIZE) + #define FM_MSGQ_REG_SIZE 0x100000 /* 1MiB */ + +-#endif ++uint64_t ub_fm_msgq_reg_read(void *opaque, hwaddr addr, unsigned len); ++void ub_fm_msgq_reg_write(void *opaque, hwaddr addr, uint64_t val, unsigned len); ++#endif +\ No newline at end of file +-- +2.33.0 + diff --git a/ub-realize-ubbus-and-udev-reset-callbalk.patch b/ub-realize-ubbus-and-udev-reset-callbalk.patch new file mode 100644 index 0000000000000000000000000000000000000000..d907b0d0643469cf9044f610cb98b176a8a2329d --- /dev/null +++ b/ub-realize-ubbus-and-udev-reset-callbalk.patch @@ -0,0 +1,157 @@ +From 08109ad1a1cc8dde1fb3430f55de7254ef6fbc70 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 14 Nov 2025 15:30:32 +0800 +Subject: [PATCH 3/7] ub: realize ubbus and udev reset callbalk + +support ubbus and udev reset + +Signed-off-by: caojinhuahw +--- + hw/ub/ub.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 105 insertions(+) + +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 45a2c84968..23e1279cf4 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -27,6 +27,7 @@ + #include "hw/ub/ub_config.h" + #include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_usi.h" + #include "hw/ub/ub_acpi.h" + #include "qemu/log.h" + #include "qapi/error.h" +@@ -39,6 +40,7 @@ + #include "trace.h" + + QLIST_HEAD(, BusControllerState) ub_bus_controllers; ++static void ub_update_mappings(UBDevice *dev); + + static void ubbus_dev_print(Monitor *mon, DeviceState *dev, int indent) + { +@@ -112,8 +114,93 @@ static void ub_bus_unrealize(BusState *qbus) + vmstate_unregister(NULL, &vmstate_ubbus, bus); + } + ++static void ub_dev_clear_cfg0(UBDevice *dev) ++{ ++ UbCfg0Basic *cfg0_basic; ++ uint64_t offset; ++ ++ /* emulated feilds in cfg0 basic */ ++ offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_START, true); ++ cfg0_basic = (UbCfg0Basic *)(dev->config + offset); ++ memset(&cfg0_basic->eid, 0, sizeof(cfg0_basic->eid)); ++ memset(&cfg0_basic->fm_eid, 0, sizeof(cfg0_basic->fm_eid)); ++ memset(&cfg0_basic->net_addr_info, 0, ++ sizeof(cfg0_basic->net_addr_info)); ++ cfg0_basic->upi = 0; ++ cfg0_basic->mtu_cfg = 0; ++ cfg0_basic->dev_rst = 0; ++ cfg0_basic->th_en = 0; ++ cfg0_basic->cc_en = 0; ++ cfg0_basic->ueid_low = 0; ++ cfg0_basic->ueid_high = 0; ++ cfg0_basic->ucna = 0; ++ cfg0_basic->fm_cna = 0; ++} ++ ++static void ub_dev_clear_cfg1(UBDevice *dev) ++{ ++ UbCfg1Basic *cfg1_basic; ++ uint64_t offset; ++ ++ offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic = (UbCfg1Basic *)(dev->config + offset); ++ cfg1_basic->elr = 0; ++ cfg1_basic->elr_done = 0; ++ cfg1_basic->mig_ctrl = 0; ++ cfg1_basic->sys_pgs = 0; ++ cfg1_basic->eid_upi_tab = 0; ++ cfg1_basic->ctp_tb_bypass = 0; ++ cfg1_basic->crystal_dma_en = 0; ++ cfg1_basic->dev_token_id = 0; ++ cfg1_basic->bus_access_en = 0; ++ cfg1_basic->dev_rs_access_en = 0; ++} ++ ++static void ub_reset_regions(UBDevice *dev) ++{ ++ UbCfg1Basic *cfg1_basic; ++ uint64_t offset; ++ UBIORegion *region; ++ int i; ++ ++ offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic = (UbCfg1Basic *)(dev->config + offset); ++ ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ region = &dev->io_regions[i]; ++ region->addr = UB_ER_UNMAPPED; ++ cfg1_basic->ers_ubba[i] = UB_ER_UNMAPPED; ++ } ++ qemu_log("ub device(%s %s) clear ubba\n", ++ dev->name, dev->qdev.id); ++} ++ ++static void ub_do_device_reset(UBDevice *dev) ++{ ++ /* ubba of idev is allocated by virtualization not by driver */ ++ if (dev->dev_type != UB_TYPE_IDEVICE) { ++ ub_reset_regions(dev); ++ ub_update_mappings(dev); ++ } ++ ub_dev_clear_cfg0(dev); ++ ub_dev_clear_cfg1(dev); ++ usi_reset(dev); ++ dev->rst_cnt++; ++} ++ + static void ubbus_reset(BusState *qbus) + { ++ UBBus *bus = DO_UPCAST(UBBus, qbus, qbus); ++ UBDevice *dev; ++ ++ QLIST_FOREACH(dev, &bus->devices, node) { ++ if (dev->dev_type != UB_TYPE_DEVICE && dev->dev_type != UB_TYPE_IDEVICE) { ++ continue; ++ } ++ qemu_log("ub device(%s %s) ub_do_device_reset\n", ++ dev->name, dev->qdev.id); ++ ub_do_device_reset(dev); ++ } + } + + UBBus *ub_register_root_bus(DeviceState *parent, const char *name, +@@ -566,9 +653,27 @@ static void ub_qdev_realize(DeviceState *qdev, Error **errp) + } + } + ++static void ub_unregister_io_regions(UBDevice *ub_dev) ++{ ++ UBIORegion *r; ++ int i; ++ ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ r = &ub_dev->io_regions[i]; ++ if (!r->size || r->addr == UB_ER_UNMAPPED) ++ continue; ++ memory_region_del_subregion(r->address_space, r->memory); ++ } ++} ++ + static void ub_qdev_unrealize(DeviceState *dev) + { ++ UBDevice *ub_dev = UB_DEVICE(dev); ++ ++ ub_unregister_io_regions(ub_dev); ++ do_ub_unregister_device(ub_dev); + } ++ + #define DECLARE_PORT_INFO(n) \ + DEFINE_PROP_UB_DEV_NEIGHBOR_INFO("port"#n, UBDevice, port), + static Property ub_props[] = { +-- +2.33.0 + diff --git a/ub-realize-vfio-ers-exit.patch b/ub-realize-vfio-ers-exit.patch new file mode 100644 index 0000000000000000000000000000000000000000..10e86bd93b018d82a196be3ce6ba3ef5cdee9963 --- /dev/null +++ b/ub-realize-vfio-ers-exit.patch @@ -0,0 +1,37 @@ +From e15484ea332dc3db1dbf53855073760356dd6876 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Fri, 21 Nov 2025 15:21:48 +0800 +Subject: [PATCH 7/7] ub: realize vfio ers exit + +do some clean when vfio ers exit + +Signed-off-by: caojinhuahw +--- + hw/vfio/ub.c | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +diff --git a/hw/vfio/ub.c b/hw/vfio/ub.c +index cfbf9eef3c..1f0d511bc0 100644 +--- a/hw/vfio/ub.c ++++ b/hw/vfio/ub.c +@@ -675,7 +675,16 @@ static void vfio_usi_early_setup(VFIOUBDevice *vdev, Error **errp) + + static void vfio_ers_exit(VFIOUBDevice *vdev) + { +- /* do nothing now */ ++ int i; ++ ++ for (i = 0; i < UB_NUM_REGIONS; i++) { ++ VFIOERS *er = &vdev->ers[i]; ++ ++ vfio_region_exit(&er->region); ++ if (er->region.size) { ++ memory_region_del_subregion(er->mr, er->region.mem); ++ } ++ } + } + + static void vfio_copy_config_space_slice(VFIOUBDevice *vdev, size_t offset, size_t read_size) +-- +2.33.0 + diff --git a/ub-support-enable-or-disable-ub-feature.patch b/ub-support-enable-or-disable-ub-feature.patch new file mode 100644 index 0000000000000000000000000000000000000000..0824c7dad5a31066f64026739898ce45f3dd9a0b --- /dev/null +++ b/ub-support-enable-or-disable-ub-feature.patch @@ -0,0 +1,184 @@ +From b9c4bedd334a53c04c399367d97a4d65dc267121 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Mon, 10 Nov 2025 15:16:31 +0800 +Subject: [PATCH] ub: support enable or disable ub feature + +support enable or disable ub feature by --enable-ub/--diable-ub + +Signed-off-by: caojinhuahw +--- + Kconfig.host | 3 +++ + hw/Kconfig | 1 + + hw/meson.build | 1 + + hw/ub/Kconfig | 3 +++ + hw/ub/hisi/meson.build | 0 + hw/ub/hisi/trace-events | 0 + hw/ub/meson.build | 5 +++++ + hw/ub/trace-events | 0 + meson.build | 10 ++++++++++ + meson_options.txt | 3 +++ + scripts/meson-buildoptions.sh | 5 +++++ + 11 files changed, 31 insertions(+) + create mode 100644 hw/ub/Kconfig + create mode 100644 hw/ub/hisi/meson.build + create mode 100644 hw/ub/hisi/trace-events + create mode 100644 hw/ub/meson.build + create mode 100644 hw/ub/trace-events + +diff --git a/Kconfig.host b/Kconfig.host +index faf58d9af5..e124f33231 100644 +--- a/Kconfig.host ++++ b/Kconfig.host +@@ -53,3 +53,6 @@ config VFIO_USER_SERVER_ALLOWED + + config HV_BALLOON_POSSIBLE + bool ++ ++config UB ++ bool +diff --git a/hw/Kconfig b/hw/Kconfig +index 9ca7b38c31..c91c0ba0cc 100644 +--- a/hw/Kconfig ++++ b/hw/Kconfig +@@ -44,6 +44,7 @@ source virtio/Kconfig + source vfio/Kconfig + source xen/Kconfig + source watchdog/Kconfig ++source ub/Kconfig + + # arch Kconfig + source arm/Kconfig +diff --git a/hw/meson.build b/hw/meson.build +index f01fac4617..51857f5613 100644 +--- a/hw/meson.build ++++ b/hw/meson.build +@@ -67,3 +67,4 @@ subdir('sparc') + subdir('sparc64') + subdir('tricore') + subdir('xtensa') ++subdir('ub') +diff --git a/hw/ub/Kconfig b/hw/ub/Kconfig +new file mode 100644 +index 0000000000..df08fc61af +--- /dev/null ++++ b/hw/ub/Kconfig +@@ -0,0 +1,3 @@ ++config HW_UB ++ bool ++ default y if UB +diff --git a/hw/ub/hisi/meson.build b/hw/ub/hisi/meson.build +new file mode 100644 +index 0000000000..e69de29bb2 +diff --git a/hw/ub/hisi/trace-events b/hw/ub/hisi/trace-events +new file mode 100644 +index 0000000000..e69de29bb2 +diff --git a/hw/ub/meson.build b/hw/ub/meson.build +new file mode 100644 +index 0000000000..21c3f0ea6c +--- /dev/null ++++ b/hw/ub/meson.build +@@ -0,0 +1,5 @@ ++ub_ss = ss.source_set() ++ub_ss.add(files( ++)) ++system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss) ++subdir('hisi') +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +new file mode 100644 +index 0000000000..e69de29bb2 +diff --git a/meson.build b/meson.build +index d379a71927..1dcfb794fa 100644 +--- a/meson.build ++++ b/meson.build +@@ -580,6 +580,11 @@ have_hugepage_pod = get_option('hugepage_pod') \ + + config_host_data.set('CONFIG_HUGEPAGE_POD', have_hugepage_pod) + ++# ub ++have_ub = get_option('ub') \ ++ .require(targetos == 'linux', error_message: 'UB is supported only on Linux') \ ++ .allowed() ++ + # vhost + have_vhost_user = get_option('vhost_user') \ + .disable_auto_if(targetos != 'linux') \ +@@ -2287,6 +2292,7 @@ config_host_data.set('CONFIG_VHOST_NET_VDPA', have_vhost_net_vdpa) + config_host_data.set('CONFIG_VHOST_KERNEL', have_vhost_kernel) + config_host_data.set('CONFIG_VHOST_USER', have_vhost_user) + config_host_data.set('CONFIG_VHOST_CRYPTO', have_vhost_user_crypto) ++config_host_data.set('CONFIG_UB', have_ub) + config_host_data.set('CONFIG_VHOST_VDPA', have_vhost_vdpa) + config_host_data.set('CONFIG_VMNET', vmnet.found()) + config_host_data.set('CONFIG_VHOST_USER_BLK_SERVER', have_vhost_user_blk_server) +@@ -2999,6 +3005,7 @@ host_kconfig = \ + (have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \ + (opengl.found() ? ['CONFIG_OPENGL=y'] : []) + \ + (x11.found() ? ['CONFIG_X11=y'] : []) + \ ++ (have_ub ? ['CONFIG_UB=y'] : []) + \ + (have_vhost_user ? ['CONFIG_VHOST_USER=y'] : []) + \ + (have_vhost_vdpa ? ['CONFIG_VHOST_VDPA=y'] : []) + \ + (have_vhost_kernel ? ['CONFIG_VHOST_KERNEL=y'] : []) + \ +@@ -3454,6 +3461,8 @@ if have_system + 'system', + 'ui', + 'hw/remote', ++ 'hw/ub', ++ 'hw/ub/hisi' + ] + endif + if have_system or have_user +@@ -4226,6 +4235,7 @@ endif + summary_info += {'D-Bus display': dbus_display} + summary_info += {'QOM debugging': get_option('qom_cast_debug')} + summary_info += {'Relocatable install': get_option('relocatable')} ++summary_info += {'ub support': have_ub} + summary_info += {'vhost-kernel support': have_vhost_kernel} + summary_info += {'vhost-net support': have_vhost_net} + summary_info += {'vhost-user support': have_vhost_user} +diff --git a/meson_options.txt b/meson_options.txt +index f446612ff6..6152543e5d 100644 +--- a/meson_options.txt ++++ b/meson_options.txt +@@ -291,6 +291,9 @@ option('pipewire', type: 'feature', value: 'auto', + option('sndio', type: 'feature', value: 'auto', + description: 'sndio sound support') + ++option('ub', type: 'feature', value: 'auto', ++ description: 'unify bus support') ++ + option('vhost_kernel', type: 'feature', value: 'auto', + description: 'vhost kernel backend support') + option('vhost_net', type: 'feature', value: 'auto', +diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh +index 06f4f803c9..7f8f043039 100644 +--- a/scripts/meson-buildoptions.sh ++++ b/scripts/meson-buildoptions.sh +@@ -52,6 +52,8 @@ meson_options_help() { + printf "%s\n" ' Set available tracing backends [log] (choices:' + printf "%s\n" ' dtrace/ftrace/log/nop/simple/syslog/ust)' + printf "%s\n" ' --enable-tsan enable thread sanitizer' ++ printf "%s\n" ' --enable-ub enable unify bus' ++ printf "%s\n" ' --disable-ub disable unify bus' + printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-' + printf "%s\n" ' firmware]' + printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler' +@@ -189,6 +191,7 @@ meson_options_help() { + printf "%s\n" ' tools build support utilities that come with QEMU' + printf "%s\n" ' tpm TPM support' + printf "%s\n" ' u2f U2F emulation support' ++ printf "%s\n" ' ub unify bus support' + printf "%s\n" ' usb-redir libusbredir support' + printf "%s\n" ' vde vde network backend support' + printf "%s\n" ' vdi vdi image format support' +@@ -513,6 +516,8 @@ _meson_option_parse() { + --disable-tsan) printf "%s" -Dtsan=false ;; + --enable-u2f) printf "%s" -Du2f=enabled ;; + --disable-u2f) printf "%s" -Du2f=disabled ;; ++ --enable-ub) printf "%s" -Dub=enabled ;; ++ --disable-ub) printf "%s" -Dub=disabled ;; + --enable-usb-redir) printf "%s" -Dusb_redir=enabled ;; + --disable-usb-redir) printf "%s" -Dusb_redir=disabled ;; + --enable-vde) printf "%s" -Dvde=enabled ;; +-- +2.33.0 + diff --git a/ub-support-enum-msg-process.patch b/ub-support-enum-msg-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..7cdf1b1ae9782966cee667f07e19c13597ff01d7 --- /dev/null +++ b/ub-support-enum-msg-process.patch @@ -0,0 +1,310 @@ +From 8dc21f4a732a6a489a553817ba51c777d030eaf4 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 20:48:44 +0800 +Subject: [PATCH 4/7] ub: support enum msg process + +realize enum msg for GuestOS enum all ub devices + +Signed-off-by: caojinhuahw +--- + hw/ub/trace-events | 4 + + hw/ub/ub_enum.c | 244 +++++++++++++++++++++++++++++++++++++++++ + include/hw/ub/ub_bus.h | 10 ++ + 3 files changed, 258 insertions(+) + +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index 4083217e7e..1fd621243b 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -1,3 +1,7 @@ ++# ub_enum.c ++enum_query_set_rsp_port_num(uint32_t num) "tlv total num ports is %u" ++handle_enum_query_request(uint32_t hops, uint32_t opcode, uint32_t idx_start, uint32_t num_ports, uint32_t max_num_ports, char *guid) "hops: %u, opcode: %u, port_idx_start:%u, rsp_num_ports:%u, max_num_ports:%u guid %s" ++ + # ub_cna_mgmt.c + handle_enum_cna_config_request(char *guid, uint32_t port_idx, uint32_t cmd, uint32_t opcode) "guid %s port_idx %u cmd %u opcode %u" + enum_set_cna_config_space_port(char *guid, uint32_t port_idx, uint32_t cna) "guid: %s, port_idx: %u, cna: %u" +diff --git a/hw/ub/ub_enum.c b/hw/ub/ub_enum.c +index 0419e2f295..e514f1732f 100644 +--- a/hw/ub/ub_enum.c ++++ b/hw/ub/ub_enum.c +@@ -22,10 +22,254 @@ + #include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" + #include "hw/ub/ub_enum.h" ++#include "hw/ub/ub_cna_mgmt.h" + #include "qemu/log.h" + #include "qapi/error.h" + #include "trace.h" + ++static void enum_get_port_info_from_config_space(UBDevice *dev, uint16_t port_idx, ++ EnumTlvPortInfo *port_info) ++{ ++ uint64_t offset = UB_PORT_SLICE_START + port_idx * UB_PORT_SZ; ++ uint64_t emulated_offset = ub_cfg_offset_to_emulated_offset(offset, true); ++ ConfigPortBasic *port_basic = (ConfigPortBasic *)(dev->config + emulated_offset); ++ ++ memset(port_info, 0, sizeof(EnumTlvPortInfo)); ++ port_info->bits0.len = sizeof(EnumTlvPortInfo); ++ port_info->bits0.type = TLV_PORT_INFO; ++ port_info->bits0.w = 1; ++ if (!memcmp(&port_basic->neighbor_port_info.neighbot_port_guid, ++ &port_info->remote_guid, sizeof(UbGuid))) { ++ port_info->bits0.s = UB_PORT_STATUS_DOWN; ++ } else { ++ /* dw0 */ ++ port_info->bits0.s = UB_PORT_STATUS_UP; ++ port_info->bits0.b = port_basic->port_info.enum_boundary; ++ port_info->bits0.t = port_basic->port_info.port_type; ++ /* dw1 */ ++ port_info->remote_port_idx = port_basic->neighbor_port_info.neighbor_port_idx; ++ port_info->local_port_idx = port_basic->port_info.port_idx; ++ /* dw3~dw6 */ ++ port_info->remote_guid = port_basic->neighbor_port_info.neighbot_port_guid; ++ } ++} ++ ++static void enum_query_set_rsp_port_info(EnumTopoQueryRspPdu *rsp_pdu, uint16_t num_ports, ++ uint16_t start_port_idx, UBDevice *dev) ++{ ++ EnumTlvPortInfo port_info; ++ uint32_t port_idx = start_port_idx; ++ for (uint32_t idx = 0; idx < num_ports; ++idx) { ++ uint8_t *dst_port_info_ptr = (uint8_t *)rsp_pdu->port_info + idx * sizeof(EnumTlvPortInfo); ++ enum_get_port_info_from_config_space(dev, port_idx, &port_info); ++ memcpy(dst_port_info_ptr, &port_info, sizeof(EnumTlvPortInfo)); ++ port_idx++; ++ } ++} ++ ++static uint32_t enum_query_get_slice0_resv_size(void) ++{ ++ uint32_t size = 0; ++ ++ size += sizeof(EnumTlvPortNum); ++ size += sizeof(EnumTlvSliceInfo); ++ size += sizeof(EnumTlvCapInfo); ++ ++ return size; ++} ++ ++static uint16_t enum_query_get_max_num_ports(EnumPldScanHeader *scan_header) ++{ ++ return (ENUM_TOPO_QUERY_RSP_PDU_MAX_LEN - ENUM_PLD_SCAN_PDU_COMMON_SIZE - ++ enum_query_get_slice0_resv_size()) / sizeof(EnumTlvPortInfo); ++} ++ ++static size_t enum_query_get_rsp_size(EnumPldScanHeader *scan_header, uint16_t rsp_num_ports, uint8_t slice_id) ++{ ++ size_t size; ++ ++ size = sizeof(EnumTopoQueryRsp) + calc_forward_path_size(scan_header) + ++ rsp_num_ports * sizeof(EnumTlvPortInfo); ++ if (slice_id == 0) { ++ size += enum_query_get_slice0_resv_size(); ++ } ++ ++ return size; ++} ++ ++static uint32_t enum_query_get_rsp_pdu_len(uint16_t rsp_num_ports, uint8_t slice_id) ++{ ++ uint32_t size; ++ ++ size = ENUM_PLD_SCAN_PDU_COMMON_SIZE + rsp_num_ports * sizeof(EnumTlvPortInfo); ++ if (slice_id == 0) { ++ size += enum_query_get_slice0_resv_size(); ++ } ++ ++ return size / DWORD_SIZE; ++} ++ ++static void enum_query_set_rsp_port_num(EnumTopoQueryRspPdu *rsp_pdu, uint16_t rsp_num_ports, UBDevice *dev) ++{ ++ EnumTlvPortNum *tlv_port_num = NULL; ++ ++ tlv_port_num = (EnumTlvPortNum *)((uint8_t *)rsp_pdu + ENUM_PLD_SCAN_PDU_COMMON_SIZE + ++ rsp_num_ports * sizeof(EnumTlvPortInfo)); ++ tlv_port_num->type = TLV_PORT_NUM; ++ tlv_port_num->len = sizeof(EnumTlvPortNum); ++ tlv_port_num->total_num_ports = dev->port.port_num; ++ ++ trace_enum_query_set_rsp_port_num(tlv_port_num->total_num_ports); ++} ++ ++static void enum_query_set_rsp_slice_info(EnumTopoQueryRspPdu *rsp_pdu, uint16_t rsp_num_ports, uint8_t total_slice) ++{ ++ EnumTlvSliceInfo *tlv_slice_info = NULL; ++ ++ tlv_slice_info = (EnumTlvSliceInfo *)((uint8_t *)rsp_pdu + ENUM_PLD_SCAN_PDU_COMMON_SIZE + ++ rsp_num_ports * sizeof(EnumTlvPortInfo) + sizeof(EnumTlvPortNum)); ++ tlv_slice_info->type = TLV_SLICE_INFO; ++ tlv_slice_info->len = sizeof(EnumTlvSliceInfo); ++ tlv_slice_info->total_slice = total_slice; ++} ++ ++static void enum_query_set_rsp_cap_info(EnumTopoQueryRspPdu *rsp_pdu, uint16_t rsp_num_ports, UBDevice *dev) ++{ ++ EnumTlvCapInfo *tlv_cap_info = NULL; ++ UbCfg1Basic *cfg1_basic; ++ uint64_t emulated_offset; ++ ++ tlv_cap_info = (EnumTlvCapInfo *)((uint8_t *)rsp_pdu + ENUM_PLD_SCAN_PDU_COMMON_SIZE + ++ rsp_num_ports * sizeof(EnumTlvPortInfo) + sizeof(EnumTlvPortNum) + ++ sizeof(EnumTlvSliceInfo)); ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_BASIC_START, true); ++ cfg1_basic = (UbCfg1Basic *)(dev->config + emulated_offset); ++ tlv_cap_info->type = TLV_CAP_INFO; ++ tlv_cap_info->len = sizeof(EnumTlvCapInfo); ++ tlv_cap_info->class_code = cfg1_basic->class_code; ++ /* now cap add nothing */ ++} ++ ++// #pragma GCC push_options ++// #pragma GCC optimize ("O0") ++static void handle_enum_query_request(BusControllerState *s, HiMsgSqe *sqe, ++ void *buf) ++{ ++ /* req message */ ++ size_t header_sz; ++ EnumPktHeader *header = (EnumPktHeader *)buf; ++ struct ClanNetworkHeader *cnth = &header->cnth; ++ struct UbLinkHeader *ulh = &header->ulh; ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ EnumPldScanHeader *scan_header; ++ EnumTopoQueryReq *scan_pdu; ++ EnumPldScanPduCommon *scan_pdu_com; ++ UBDevice *dev; ++ uint16_t port_idx_start, remain_num_ports, max_num_ports, rsp_num_ports; ++ uint8_t slice_id, total_slice; ++ /* rsp message */ ++ size_t rsp_size; ++ void *rsp_buf; ++ EnumPktHeader *rsp_pkt_hdr; ++ EnumPldScanHeader *rsp_scan_header; ++ EnumTopoQueryRspPdu *rsp_pdu; ++ HiMsgCqe cqe; ++ ++ if (ulh->cfg != UB_CLAN_LINK_CFG || cnth->nth_nlp != NTH_NLP_WITHOUT_TPH || ++ header->upi != UB_CP_UPI) { ++ qemu_log("invalid enum pkt header, please check the driver inside guestos:" ++ " cfg %u nth_nlp %u upi 0x%x\n", ulh->cfg, cnth->nth_nlp, header->upi); ++ return; ++ } ++ ++ scan_header = (EnumPldScanHeader *)((uint8_t *)buf + ENUM_PKT_HEADER_SIZE); ++ header_sz = ENUM_PKT_HEADER_SIZE + calc_enum_pld_header_size(scan_header, true); ++ scan_pdu = (EnumTopoQueryReq *)((uint8_t *)buf + header_sz); ++ scan_pdu_com = (EnumPldScanPduCommon *)scan_pdu; ++ ub_device_get_str_from_guid(&scan_pdu_com->guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ dev = ub_find_device_by_guid(&scan_pdu_com->guid); ++ if (!dev) { ++ qemu_log("can not find device by guid %s\n", guid_str); ++ return; ++ } ++ ++ slice_id = scan_pdu->common.bits.slice_id; ++ max_num_ports = enum_query_get_max_num_ports(scan_header); ++ port_idx_start = slice_id * max_num_ports; ++ ++ remain_num_ports = dev->port.port_num - port_idx_start; ++ rsp_num_ports = remain_num_ports > max_num_ports ? max_num_ports : remain_num_ports; ++ trace_handle_enum_query_request(scan_header->bits.hops, scan_pdu_com->bits.opcode, ++ port_idx_start, rsp_num_ports, max_num_ports, guid_str); ++ ++ /* response includes forward path but not return path. */ ++ rsp_size = enum_query_get_rsp_size(scan_header, rsp_num_ports, slice_id); ++ rsp_buf = g_malloc(rsp_size); ++ memset(rsp_buf, 0, rsp_size); ++ rsp_pkt_hdr = (EnumPktHeader *)rsp_buf; ++ memcpy(rsp_pkt_hdr, header, sizeof(EnumPktHeader)); ++ rsp_scan_header = (EnumPldScanHeader *)(rsp_buf + ENUM_PKT_HEADER_SIZE); ++ memcpy(rsp_scan_header, scan_header, sizeof(EnumPldScanHeader)); ++ rsp_scan_header->bits.r = 0; ++ rsp_pdu = (EnumTopoQueryRspPdu *)(rsp_buf + ENUM_PKT_HEADER_SIZE + ++ ENUM_PLD_SCAN_HEADER_BASE_SIZE + calc_forward_path_size(scan_header)); ++ memcpy(&rsp_pdu->common, scan_pdu_com, sizeof(EnumPldScanPduCommon)); ++ rsp_pdu->common.bits.opcode = UB_ENUM_TOPO_QUERY_RSP; ++ rsp_pdu->common.bits.status = 0; ++ ++ /* set tlv port info */ ++ enum_query_set_rsp_port_info(rsp_pdu, rsp_num_ports, port_idx_start, dev); ++ ++ if (slice_id == 0) { ++ /* set tlv port num info */ ++ enum_query_set_rsp_port_num(rsp_pdu, rsp_num_ports, dev); ++ /* set tlv slice info */ ++ total_slice = (dev->port.port_num + max_num_ports - 1) / max_num_ports; ++ enum_query_set_rsp_slice_info(rsp_pdu, rsp_num_ports, total_slice); ++ /* set tlv cap info */ ++ enum_query_set_rsp_cap_info(rsp_pdu, rsp_num_ports, dev); ++ } ++ /* set pdu_len */ ++ rsp_pdu->common.pdu_len = enum_query_get_rsp_pdu_len(rsp_num_ports, slice_id); ++ ++ memset(&cqe, 0, sizeof(cqe)); ++ cqe.opcode = sqe->opcode; ++ cqe.task_type = PROTOCOL_ENUM; ++ cqe.msn = sqe->msn; ++ cqe.p_len = rsp_size; ++ cqe.status = CQE_SUCCESS; ++ cqe.rq_pi = fill_rq(s, rsp_buf, rsp_size); ++ (void)fill_cq(s, &cqe); ++ g_free(rsp_buf); ++} ++// #pragma GCC pop_options ++ ++static void (*msgq_enum_handlers[])(BusControllerState *s, HiMsgSqe *sqe, ++ void *payload) = { ++ [ENUM_CMD_TOPO_QUERY] = handle_enum_query_request, ++ [ENUM_CMD_CNA_CFG] = handle_enum_cna_config_request, ++ [ENUM_CMD_CNA_QUERY] = handle_enum_cna_query_request, ++}; ++ + void handle_msg_enum(void *opaque, HiMsgSqe *sqe, void *payload) + { ++ BusControllerState *s = opaque; ++ ++ if (sqe->task_type != PROTOCOL_ENUM) { ++ qemu_log("invalid enum task type, please check the driver inside guestos:" ++ " task_type %u\n", sqe->task_type); ++ return; ++ } ++ ++ if (sqe->opcode >= ARRAY_SIZE(msgq_enum_handlers)) { ++ qemu_log("invalid msg code %u, array size %lu\n", ++ sqe->opcode, ARRAY_SIZE(msgq_enum_handlers)); ++ return; ++ } ++ ++ if (msgq_enum_handlers[sqe->opcode]) { ++ msgq_enum_handlers[sqe->opcode](s, sqe, payload); ++ } else { ++ qemu_log("cannot process PROTOCOL_ENUM opcode: %d\n", sqe->opcode); ++ } + } +\ No newline at end of file +diff --git a/include/hw/ub/ub_bus.h b/include/hw/ub/ub_bus.h +index 4fbc9407d5..58baea4efb 100644 +--- a/include/hw/ub/ub_bus.h ++++ b/include/hw/ub/ub_bus.h +@@ -20,6 +20,16 @@ + + #include "hw/ub/ub.h" + ++enum EnumTlvType { /* M: Mandatory , O: optional */ ++ TLV_SLICE_INFO = 0, /* M */ ++ TLV_PORT_NUM = 1, /* M */ ++ TLV_PORT_INFO = 2, /* M */ ++ TLV_RSV0 = 3, /* O */ ++ TLV_CAP_INFO = 4, /* M */ ++ TLV_RSV1 = 5, /* O */ ++ TLV_RSV2 = 6, /* O */ ++}; ++ + struct UBBusClass { + /* < private > */ + BusClass parent_class; +-- +2.33.0 + diff --git a/ub-support-mcmdq-process.patch b/ub-support-mcmdq-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..8c4f902582660263bc392198ec4c16e1676e6201 --- /dev/null +++ b/ub-support-mcmdq-process.patch @@ -0,0 +1,255 @@ +From c5cfb6f30ea9e13334fe17ed04742bbe60a70082 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 16:23:03 +0800 +Subject: [PATCH 4/6] ub: support mcmdq process + +1. support mcmdq process framework +2. realize mcmdq prefet_cfg process +3. realize mcmdq null cmd process +4. realize mcdmq tlbi cmd process +5. realize mcmdq plbi cmd process + +Signed-off-by: caojinhuahw +--- + hw/ub/trace-events | 4 + + hw/ub/ub_ummu.c | 200 +++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 204 insertions(+) + +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index acc25a7052..bcecd2ad67 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -2,6 +2,10 @@ + + # ub_ummu.c + ummu_mcmdq_reg_writel(uint32_t idx, uint32_t prod, uint32_t cons) "mcmdq process: idx(%u), prod(%u), cons(%u)" ++mcmdq_process_task(uint32_t mcmdq_idx, const char *cmd) "mcmdq_idx: %u, cmd: %s" ++mcmdq_cmd_plbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_PLBIx: mcmdq_idx(%u) cmd(%s)" ++mcmdq_cmd_tlbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_TLBIx: mcmdq_idx(%u) cmd(%s)" ++mcmdq_cmd_null(uint32_t mcmdq_idx, uint64_t addr, void *hva, uint64_t size, uint64_t rb_size) "CMD_NULL: mcmdq_idx(%u) addr(0x%lx) hva(%p) size(0x%lx) rb_size(0x%lx)" + ummu_mcmdq_base_reg_writell(uint8_t idx, uint64_t base, uint8_t log2size) "idx(%u) base(0x%lx) log2size(0x%x)" + ummu_eventq_req_writell(uint64_t base, uint8_t log2size) "base(0x%lx) log2size(0x%x)" + ummu_eventq_usi_reg_writell(uint64_t addr) "set eventq usi addr 0x%lx" +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index 75ac7659b5..87a6bfb075 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -35,6 +35,42 @@ + #include "qemu/error-report.h" + #include "trace.h" + ++static const char *const mcmdq_cmd_strings[MCMDQ_CMD_MAX] = { ++ [CMD_SYNC] = "CMD_SYNC", ++ [CMD_STALL_RESUME] = "CMD_STALL_RESUME", ++ [CMD_PREFET_CFG] = "CMD_PREFET_CFG", ++ [CMD_CFGI_TECT] = "CMD_CFGI_TECT", ++ [CMD_CFGI_TECT_RANGE] = "CMD_CFGI_TECT_RANGE", ++ [CMD_CFGI_TCT] = "CMD_CFGI_TCT", ++ [CMD_CFGI_TCT_ALL] = "CMD_CFGI_TCT_ALL", ++ [CMD_CFGI_VMS_PIDM] = "CMD_CFGI_VMS_PIDM", ++ [CMD_PLBI_OS_EID] = "CMD_PLBI_OS_EID", ++ [CMD_PLBI_OS_EIDTID] = "CMD_PLBI_OS_EIDTID", ++ [CMD_PLBI_OS_VA] = "CMD_PLBI_OS_VA", ++ [CMD_TLBI_OS_ALL] = "CMD_TLBI_OS_ALL", ++ [CMD_TLBI_OS_TID] = "CMD_TLBI_OS_TID", ++ [CMD_TLBI_OS_VA] = "CMD_TLBI_OS_VA", ++ [CMD_TLBI_OS_VAA] = "CMD_TLBI_OS_VAA", ++ [CMD_TLBI_HYP_ALL] = "CMD_TLBI_HYP_ALL", ++ [CMD_TLBI_HYP_TID] = "CMD_TLBI_HYP_TID", ++ [CMD_TLBI_HYP_VA] = "CMD_TLBI_HYP_VA", ++ [CMD_TLBI_HYP_VAA] = "CMD_TLBI_HYP_VAA", ++ [CMD_TLBI_S1S2_VMALL] = "CMD_TLBI_S1S2_VMALL", ++ [CMD_TLBI_S2_IPA] = "CMD_TLBI_S2_IPA", ++ [CMD_TLBI_NS_OS_ALL] = "CMD_TLBI_NS_OS_ALL", ++ [CMD_RESUME] = "CMD_RESUME", ++ [CMD_CREATE_KVTBL] = "CMD_CREATE_KVTBL", ++ [CMD_DELETE_KVTBL] = "CMD_DELETE_KVTBL", ++ [CMD_TLBI_OS_ALL_U] = "CMD_TLBI_OS_ALL_U", ++ [CMD_TLBI_OS_ASID_U] = "CMD_TLBI_OS_ASID_U", ++ [CMD_TLBI_OS_VA_U] = "CMD_TLBI_OS_VA_U", ++ [CMD_TLBI_OS_VAA_U] = "CMD_TLBI_OS_VAA_U", ++ [CMD_TLBI_HYP_ASID_U] = "CMD_TLBI_HYP_ASID_U", ++ [CMD_TLBI_HYP_VA_U] = "CMD_TLBI_HYP_VA_U", ++ [CMD_TLBI_S1S2_VMALL_U] = "CMD_TLBI_S1S2_VMALL_U", ++ [CMD_TLBI_S2_IPA_U] = "CMD_TLBI_S2_IPA_U", ++}; ++ + QLIST_HEAD(, UMMUState) ub_umms; + UMMUState *ummu_find_by_bus_num(uint8_t bus_num) + { +@@ -343,8 +379,172 @@ static uint64_t ummu_reg_read(void *opaque, hwaddr offset, unsigned size) + return val; + } + ++static void mcmdq_cmd_plbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ trace_mcmdq_cmd_plbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]); ++} ++ ++static void mcmdq_cmd_tlbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ trace_mcmdq_cmd_tlbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]); ++} ++ ++static void mcmdq_check_pa_continuity_fill_result(UMMUMcmdQueue *mcmdq, bool continuity) ++{ ++ uint8_t result = 0; ++ dma_addr_t addr; ++ ++ result |= UMMU_RUN_IN_VM_FLAG; ++ if (continuity) { ++ result |= PA_CONTINUITY; ++ } else { ++ result |= PA_NOT_CONTINUITY; ++ } ++ ++#define CHECK_PA_CONTINUITY_RESULT_OFFSET 0x2 ++ addr = MCMD_QUE_BASE_ADDR(&mcmdq->queue) + ++ MCMD_QUE_RD_IDX(&mcmdq->queue) * mcmdq->queue.entry_size; ++ if (dma_memory_write(&address_space_memory, addr + CHECK_PA_CONTINUITY_RESULT_OFFSET, ++ &result, sizeof(result), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("dma failed to wirte result(0x%x) to addr 0x%lx\n", result, addr); ++ return; ++ } ++ ++ qemu_log("mcmdq check pa continuity update result(0x%x) success.\n", result); ++} ++ ++static void mcmdq_cmd_null(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ uint64_t size; ++ uint64_t addr; ++ void *hva = NULL; ++ ram_addr_t rb_offset; ++ RAMBlock *rb = NULL; ++ size_t rb_page_size = 0; ++ ++ if (CMD_NULL_SUBOP(cmd) != CMD_NULL_SUBOP_CHECK_PA_CONTINUITY) { ++ qemu_log("current cannot process CMD_NULL subop %u.\n", CMD_NULL_SUBOP(cmd)); ++ return; ++ } ++ ++ size = CMD_NULL_CHECK_PA_CONTI_SIZE(cmd); ++ addr = CMD_NULL_CHECK_PA_CONTI_ADDR(cmd); ++ hva = cpu_physical_memory_map(addr, &size, false); ++ rb = qemu_ram_block_from_host(hva, false, &rb_offset); ++ if (rb) { ++ rb_page_size = qemu_ram_pagesize(rb); ++ } else { ++ qemu_log("failed to get ram block from host(%p)\n", hva); ++ } ++ ++ trace_mcmdq_cmd_null(mcmdq_idx, addr, hva, size, rb_page_size); ++ ++#define PAGESZ_2M 0x200000 ++ if (rb_page_size < PAGESZ_2M) { ++ mcmdq_check_pa_continuity_fill_result(&u->mcmdqs[mcmdq_idx], false); ++ } else { ++ mcmdq_check_pa_continuity_fill_result(&u->mcmdqs[mcmdq_idx], true); ++ } ++} ++ ++static void mcmdq_cmd_prefet_cfg(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ /* do nothing */ ++} ++ ++static void (*mcmdq_cmd_handlers[])(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) = { ++ [CMD_SYNC] = NULL, ++ [CMD_STALL_RESUME] = NULL, ++ [CMD_PREFET_CFG] = mcmdq_cmd_prefet_cfg, ++ [CMD_CFGI_TECT] = NULL, ++ [CMD_CFGI_TECT_RANGE] = NULL, ++ [CMD_CFGI_TCT] = NULL, ++ [CMD_CFGI_TCT_ALL] = NULL, ++ [CMD_CFGI_VMS_PIDM] = NULL, ++ [CMD_PLBI_OS_EID] = mcmdq_cmd_plbi_x_process, ++ [CMD_PLBI_OS_EIDTID] = mcmdq_cmd_plbi_x_process, ++ [CMD_PLBI_OS_VA] = mcmdq_cmd_plbi_x_process, ++ [CMD_TLBI_OS_ALL] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_OS_TID] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_OS_VA] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_OS_VAA] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_HYP_ALL] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_HYP_TID] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_HYP_VA] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_HYP_VAA] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_S1S2_VMALL] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_S2_IPA] = mcmdq_cmd_tlbi_x_process, ++ [CMD_TLBI_NS_OS_ALL] = mcmdq_cmd_tlbi_x_process, ++ [CMD_RESUME] = NULL, ++ [CMD_CREATE_KVTBL] = NULL, ++ [CMD_DELETE_KVTBL] = NULL, ++ [CMD_NULL] = mcmdq_cmd_null, ++ [CMD_TLBI_OS_ALL_U] = NULL, ++ [CMD_TLBI_OS_ASID_U] = NULL, ++ [CMD_TLBI_OS_VA_U] = NULL, ++ [CMD_TLBI_OS_VAA_U] = NULL, ++ [CMD_TLBI_HYP_ASID_U] = NULL, ++ [CMD_TLBI_HYP_VA_U] = NULL, ++ [CMD_TLBI_S1S2_VMALL_U] = NULL, ++ [CMD_TLBI_S2_IPA_U] = NULL, ++}; ++ ++static MemTxResult ummu_cmdq_fetch_cmd(UMMUMcmdQueue *mcmdq, UMMUMcmdqCmd *cmd) ++{ ++ uint64_t addr, mcmdq_base_addr; ++ MemTxResult ret; ++ int i; ++ ++ mcmdq_base_addr = MCMD_QUE_BASE_ADDR(&mcmdq->queue); ++ addr = mcmdq_base_addr + MCMD_QUE_RD_IDX(&mcmdq->queue) * mcmdq->queue.entry_size; ++ ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(UMMUMcmdqCmd), ++ MEMTXATTRS_UNSPECIFIED); ++ if (ret != MEMTX_OK) { ++ qemu_log("addr 0x%lx failed to fectch mcmdq cmd\n", addr); ++ return ret; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(cmd->word); i++) { ++ le32_to_cpus(&cmd->word[i]); ++ } ++ ++ return ret; ++} ++ + static void mcmdq_process_task(UMMUState *u, uint8_t mcmdq_idx) + { ++ UMMUMcmdQueue *mcmdq = &u->mcmdqs[mcmdq_idx]; ++ UMMUMcmdqCmd cmd; ++ UmmuMcmdqCmdType cmd_type; ++ ++ if (!ummu_mcmdq_enabled(mcmdq)) { ++ ummu_mcmdq_disable_resp(mcmdq); ++ return; ++ } ++ ++ while (!ummu_mcmdq_empty(mcmdq)) { ++ if (ummu_cmdq_fetch_cmd(mcmdq, &cmd) != MEMTX_OK) { ++ /* eventq generate later */ ++ break; ++ } ++ ++ cmd_type = CMD_TYPE(&cmd); ++ if (cmd_type >= MCMDQ_CMD_MAX) { ++ /* eventq generate later */ ++ break; ++ } ++ ++ if (mcmdq_cmd_handlers[cmd_type]) { ++ trace_mcmdq_process_task(mcmdq_idx, mcmdq_cmd_strings[cmd_type]); ++ mcmdq_cmd_handlers[cmd_type](u, &cmd, mcmdq_idx); ++ } else { ++ qemu_log("current cannot process mcmdq cmd: %s.\n", mcmdq_cmd_strings[cmd_type]); ++ } ++ ++ ummu_mcmdq_cons_incr(mcmdq); ++ } ++ ++ ummu_mcmdq_enable_resp(mcmdq); + } + + static void ummu_mcmdq_reg_writel(UMMUState *u, hwaddr offset, uint64_t data) +-- +2.33.0 + diff --git a/ub-support-mcmdq-sync-handler.patch b/ub-support-mcmdq-sync-handler.patch new file mode 100644 index 0000000000000000000000000000000000000000..d85ac39ff9f8c35a435b6889173db311e06a9d8b --- /dev/null +++ b/ub-support-mcmdq-sync-handler.patch @@ -0,0 +1,70 @@ +From eadc470350cf8587364d487b4f5c0a7fc64cafb4 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 16:44:43 +0800 +Subject: [PATCH 6/6] ub: support mcmdq sync handler + +realize mcmdq sync handler + +Signed-off-by: caojinhuahw +--- + hw/ub/trace-events | 1 + + hw/ub/ub_ummu.c | 24 +++++++++++++++++++++++- + 2 files changed, 24 insertions(+), 1 deletion(-) + +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index ac55f5e406..e53af1bd75 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -3,6 +3,7 @@ + # ub_ummu.c + ummu_mcmdq_reg_writel(uint32_t idx, uint32_t prod, uint32_t cons) "mcmdq process: idx(%u), prod(%u), cons(%u)" + mcmdq_process_task(uint32_t mcmdq_idx, const char *cmd) "mcmdq_idx: %u, cmd: %s" ++mcmdq_cmd_sync_handler(uint32_t mcmdq_idx, uint64_t usi_addr, uint32_t usi_data) "CMD_SYNC: mcmdq_idx(%u) usi_addr(0x%lx) usi_data(0x%x)" + mcmdq_cmd_plbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_PLBIx: mcmdq_idx(%u) cmd(%s)" + mcmdq_cmd_tlbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_TLBIx: mcmdq_idx(%u) cmd(%s)" + mcmdq_cmd_create_kvtbl(uint32_t mcmdq_idx, uint32_t dest_eid, uint32_t tecte_tag) "CMD_CREATE_KVTBL: mcmdq_idx(%u) dest_eid(%u) tecte_tag(%u)" +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index d610c7d9e4..033fcb9a34 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -379,6 +379,28 @@ static uint64_t ummu_reg_read(void *opaque, hwaddr offset, unsigned size) + return val; + } + ++static void mcmdq_cmd_sync_usi_irq(uint64_t addr, uint32_t data) ++{ ++ cpu_physical_memory_rw(addr, &data, sizeof(uint32_t), true); ++} ++ ++static void mcmdq_cmd_sync_sev_irq(void) ++{ ++ qemu_log("cannot support CMD_SYNC SEV event.\n"); ++} ++ ++static void mcmdq_cmd_sync_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ uint32_t cm = CMD_SYNC_CM(cmd); ++ ++ trace_mcmdq_cmd_sync_handler(mcmdq_idx, CMD_SYNC_USI_ADDR(cmd), CMD_SYNC_USI_DATA(cmd)); ++ if (cm & CMD_SYNC_CM_USI) { ++ mcmdq_cmd_sync_usi_irq(CMD_SYNC_USI_ADDR(cmd), CMD_SYNC_USI_DATA(cmd)); ++ } else if (cm & CMD_SYNC_CM_SEV) { ++ mcmdq_cmd_sync_sev_irq(); ++ } ++} ++ + static void mcmdq_cmd_create_kvtbl(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) + { + UMMUKVTblEntry *entry = NULL; +@@ -502,7 +524,7 @@ static void mcmdq_cmd_prefet_cfg(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_ + } + + static void (*mcmdq_cmd_handlers[])(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) = { +- [CMD_SYNC] = NULL, ++ [CMD_SYNC] = mcmdq_cmd_sync_handler, + [CMD_STALL_RESUME] = NULL, + [CMD_PREFET_CFG] = mcmdq_cmd_prefet_cfg, + [CMD_CFGI_TECT] = NULL, +-- +2.33.0 + diff --git a/ub-support-sec-msg-process.patch b/ub-support-sec-msg-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..964798566c42de4bbac4694ba601488fab9b6525 --- /dev/null +++ b/ub-support-sec-msg-process.patch @@ -0,0 +1,142 @@ +From 8bc971858932e7fed518cedd5c07f0951e674d8c Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 20:56:29 +0800 +Subject: [PATCH 5/7] ub: support sec msg process + +realize ub sec msg process + +Signed-off-by: caojinhuahw +--- + hw/ub/ub_sec.c | 71 +++++++++++++++++++++++++++++++++++++++++- + include/hw/ub/ub_sec.h | 33 ++++++++++++++++++++ + 2 files changed, 103 insertions(+), 1 deletion(-) + +diff --git a/hw/ub/ub_sec.c b/hw/ub/ub_sec.c +index 4939e76925..e8b703824e 100644 +--- a/hw/ub/ub_sec.c ++++ b/hw/ub/ub_sec.c +@@ -19,6 +19,75 @@ + #include "hw/ub/ub_sec.h" + #include "qemu/log.h" + ++static void ub_sec_msg_fill_cq_rq(BusControllerState *s, HiMsgSqe *sqe, MsgPktHeader *header, ++ QueryTokenMsgPkt *rsp_pkt) ++{ ++ HiMsgCqe cqe; ++ uint32_t pi; ++ ++ memset(&cqe, 0, sizeof(cqe)); ++ cqe.type = MSG_RSP; ++ cqe.msg_code = UB_MSG_CODE_SEC; ++ cqe.sub_msg_code = header->msgetah.sub_msg_code; ++ ++ rsp_pkt->header.nth.scna = header->nth.dcna; ++ rsp_pkt->header.nth.dcna = header->nth.scna; ++ rsp_pkt->header.deid = EID_GEN(header->seid_h, header->seid_l); ++ rsp_pkt->header.seid_h = EID_HIGH(header->deid); ++ rsp_pkt->header.seid_l = EID_LOW(header->deid); ++ ++ cqe.msn = sqe->msn; ++ cqe.p_len = MSG_SEC_QUERY_TOKEN_MSG_PKT_SIZE; ++ pi = fill_rq(s, rsp_pkt, sizeof(*rsp_pkt)); ++ if (pi == UINT32_MAX) { ++ qemu_log("fill rq failed!\n"); ++ return; ++ } ++ ++ cqe.status = CQE_SUCCESS; ++ cqe.rq_pi = pi; ++ (void)fill_cq(s, &cqe); ++} ++ ++static void ub_sec_token_get_req(BusControllerState *s, HiMsgSqe *sqe, MsgPktHeader *header) ++{ ++ QueryTokenMsgPkt rsp_pkt; ++ ++ memset(&rsp_pkt, 0, sizeof(rsp_pkt)); ++ memcpy(&rsp_pkt.header, header, sizeof(rsp_pkt.header)); ++ rsp_pkt.pld.rsp.token_id = 0; ++ rsp_pkt.pld.rsp.token_value = 0; ++ rsp_pkt.header.msgetah.rsp_status = UB_MSG_RSP_SUCCESS; ++ ub_sec_msg_fill_cq_rq(s, sqe, header, &rsp_pkt); ++} ++ ++static void (*msgq_sec_handlers[])(BusControllerState *s, HiMsgSqe *sqe, ++ MsgPktHeader *header) = { ++ [UB_DEV_ATTESTATION] = NULL, ++ [UB_DEV_AUTH] = NULL, ++ [UB_DEV_TOKEN_GET] = ub_sec_token_get_req, ++ [UB_DEV_TOKEN_SET] = NULL, ++ [UB_DEV_KEY_EXCHANGE] = NULL, ++ [UB_DEV_TOKEN_GET_RSP] = NULL, ++ [UB_DEV_TOKEN_SET_RSP] = NULL, ++}; ++ + void handle_msg_sec(void *opaque, HiMsgSqe *sqe, void *payload) + { +-} +\ No newline at end of file ++ BusControllerState *s = opaque; ++ MsgPktHeader *header = (MsgPktHeader *)payload; ++ MsgExtendedHeader *msgetah = &header->msgetah; ++ ++ if (msgetah->msg_code != UB_MSG_CODE_SEC || ++ msgetah->sub_msg_code >= ARRAY_SIZE(msgq_sec_handlers)) { ++ qemu_log("invalid msg code %u or sub msg code %u, array size %lu\n", ++ msgetah->msg_code, msgetah->sub_msg_code, ARRAY_SIZE(msgq_sec_handlers)); ++ return; ++ } ++ ++ if (msgq_sec_handlers[msgetah->sub_msg_code]) { ++ msgq_sec_handlers[msgetah->sub_msg_code](s, sqe, header); ++ } else { ++ qemu_log("dont support sec sub msg code %d.\n", msgetah->sub_msg_code); ++ } ++} +diff --git a/include/hw/ub/ub_sec.h b/include/hw/ub/ub_sec.h +index 000cf83812..860a7419db 100644 +--- a/include/hw/ub/ub_sec.h ++++ b/include/hw/ub/ub_sec.h +@@ -20,6 +20,39 @@ + #include "hw/ub/hisi/ubc.h" + #include "hw/qdev-core.h" + #include "hw/ub/ub_common.h" ++enum UbSecSubMsgCode { ++ UB_DEV_ATTESTATION = 0, ++ UB_DEV_AUTH = 1, ++ UB_DEV_TOKEN_GET = 2, ++ UB_DEV_TOKEN_SET = 3, ++ UB_DEV_KEY_EXCHANGE = 4, ++ UB_DEV_TOKEN_GET_RSP = 10, ++ UB_DEV_TOKEN_SET_RSP = 11, ++}; ++ ++typedef struct QueryTokenMsgPldRsp { ++ uint32_t token_check_support : 1; ++ uint32_t encode_decode_support : 1; ++ uint32_t reserved : 14; ++ uint32_t token_id : 16; ++ uint32_t token_value; ++} QueryTokenMsgPldRsp; ++#define QUERY_TOKEN_MSG_PLD_RSP_LEN sizeof(QueryTokenMsgPldRsp) ++ ++typedef struct QueryTokenMsgPld { ++ union { ++ /* request payload is NULL */ ++ struct QueryTokenMsgPldRsp rsp; ++ }; ++} QueryTokenMsgPld; ++#define QUERY_TOKEN_MSG_PLD_SIZE sizeof(QueryTokenMsgPld) ++ ++typedef struct QueryTokenMsgPkt { ++ struct MsgPktHeader header; ++ struct QueryTokenMsgPld pld; ++} QueryTokenMsgPkt; ++ ++#define MSG_SEC_QUERY_TOKEN_MSG_PKT_SIZE (MSG_PKT_HEADER_SIZE + QUERY_TOKEN_MSG_PLD_SIZE) + + void handle_msg_sec(void *opaque, HiMsgSqe *sqe, void *payload); + +-- +2.33.0 + diff --git a/ub-support-ub-acpi-report.patch b/ub-support-ub-acpi-report.patch new file mode 100644 index 0000000000000000000000000000000000000000..87cb4c698ab58abd0cbbeaf27cc05b6db12aa35f --- /dev/null +++ b/ub-support-ub-acpi-report.patch @@ -0,0 +1,1047 @@ +From 20821a031e158c05af7a8f335f431cb0ce58bbbd Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 14:37:11 +0800 +Subject: [PATCH 6/7] ub: support ub acpi report + +support report ub acpi table for guest + +Signed-off-by: caojinhuahw +--- + hw/arm/virt-acpi-build.c | 23 ++- + hw/arm/virt.c | 154 +++++++++++++- + hw/ub/ub_acpi.c | 411 ++++++++++++++++++++++++++++++++++++++ + include/hw/arm/virt.h | 17 ++ + include/hw/ub/hisi/ubc.h | 139 +++++++++++++ + include/hw/ub/ub_acpi.h | 8 + + include/hw/ub/ub_common.h | 30 +++ + include/hw/ub/ub_ummu.h | 29 +++ + 8 files changed, 806 insertions(+), 5 deletions(-) + create mode 100644 include/hw/ub/ub_ummu.h + +diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c +index b389ef7622..120fe0d0a1 100644 +--- a/hw/arm/virt-acpi-build.c ++++ b/hw/arm/virt-acpi-build.c +@@ -60,6 +60,9 @@ + #include "hw/acpi/viot.h" + #include "kvm_arm.h" + #include "hw/virtio/virtio-acpi.h" ++#ifdef CONFIG_UB ++#include "hw/ub/ub_acpi.h" ++#endif + + #define ARM_SPI_BASE 32 + +@@ -679,6 +682,11 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) + } else { + rc_mapping_count = 1; + } ++ ++#ifdef CONFIG_UB ++ nb_nodes += 3; /* UBC0, UMU0, PMU0 */ ++#endif ++ + /* Number of IORT Nodes */ + build_append_int_noprefix(table_data, nb_nodes, 4); + +@@ -788,6 +796,10 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) + build_iort_rmr_nodes(table_data, smmu_idmaps, smmu_offset, &id); + } + ++#ifdef CONFIG_UB ++ acpi_iort_add_ub(table_data); ++#endif ++ + acpi_table_end(linker, &table); + g_array_free(smmu_idmaps, true); + g_array_free(its_idmaps, true); +@@ -1318,6 +1330,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) + acpi_dsdt_add_tpm(scope, vms); + #endif + ++#ifdef CONFIG_UB ++ acpi_dsdt_add_ub(scope); ++#endif ++ + aml_append(dsdt, scope); + + /* copy AML table into ACPI tables blob */ +@@ -1365,7 +1381,10 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) + /* DSDT is pointed to by FADT */ + dsdt = tables_blob->len; + build_dsdt(tables_blob, tables->linker, vms); +- ++#ifdef CONFIG_UB ++ acpi_add_table(table_offsets, tables_blob); ++ build_ubrt(tables_blob, tables->linker, vms); ++#endif + /* FADT MADT PPTT GTDT MCFG SPCR DBG2 pointed to by RSDT */ + acpi_add_table(table_offsets, tables_blob); + build_fadt_rev6(tables_blob, tables->linker, vms, dsdt); +@@ -1474,8 +1493,6 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) + " or PCI bridges."); + } + acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); +- +- + /* Cleanup memory that's no longer used. */ + g_array_free(table_offsets, true); + } +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index b209140684..a27d3b5fc3 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -90,9 +90,14 @@ + #include "qemu/log.h" + #ifdef CONFIG_UB + #include "hw/ub/ub.h" ++#include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" ++#include "hw/ub/hisi/ub_mem.h" ++#include "hw/ub/ub_acpi.h" + #include "hw/ub/hisi/ubc.h" + #include "hw/ub/hisi/ub_fm.h" ++#include "hw/ub/ub_ummu.h" ++#include "hw/ub/ub_common.h" + #endif // CONFIG_UB + + #define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \ +@@ -213,6 +218,16 @@ static MemMapEntry extended_memmap[] = { + [VIRT_HIGH_PCIE_ECAM] = { 0x0, 256 * MiB }, + /* Second PCIe window */ + [VIRT_HIGH_PCIE_MMIO] = { 0x0, 512 * GiB }, ++#ifdef CONFIG_UB ++ /* ub mmio window */ ++ [VIRT_HIGH_UB_MMIO] = { 0x0, UBIOS_MMIOS_SIZE_PER_UBC * UBIOS_UBC_TABLE_CNT}, ++ /* ub idev fers window */ ++ [VIRT_UB_IDEV_ERS] = { 0x0, 512 * GiB}, ++ [VIRT_UBC_BASE_REG] = { 0x0, BASE_REG_SIZE}, /* now only support one UBC */ ++ [VIRT_UBIOS_INFO_TABLE] = { 0x0, UBIOS_TABLE_SIZE}, ++ [VIRT_UB_MEM_CC] = { 0x0, UB_MEM_SPACE_SIZE}, ++ [VIRT_UB_MEM_NC] = { 0x0, UB_MEM_SPACE_SIZE}, ++#endif // CONFIG_UB + }; + + static const int a15irqmap[] = { +@@ -376,6 +391,20 @@ static void create_fdt(VirtMachineState *vms) + } + } + ++#ifdef CONFIG_UB ++static void create_ubios_info_table_fdt(VirtMachineState *vms, MemoryRegion *machine_ram) ++{ ++ MachineState *ms = MACHINE(vms); ++ ++ qemu_fdt_setprop_u64(ms->fdt, "/chosen", "linux,ubios-information-table", ++ vms->memmap[VIRT_UBIOS_INFO_TABLE].base); ++ qemu_log("create fdt for ubios-information-table 0x%lx\n", ++ vms->memmap[VIRT_UBIOS_INFO_TABLE].base); ++ ++ ub_init_ubios_info_table(vms, ROUND_UP(UBIOS_TABLE_SIZE, 4 * KiB)); ++} ++#endif // CONFIG_UB ++ + static void fdt_add_timer_nodes(const VirtMachineState *vms) + { + /* On real hardware these interrupts are level-triggered. +@@ -1725,11 +1754,41 @@ static void create_virtio_iommu_dt_bindings(VirtMachineState *vms) + static void create_ub(VirtMachineState *vms) + { + DeviceState *ubc; ++ MemoryRegion *mmio_reg; ++ MemoryRegion *mmio_alias; + + ubc = qdev_new(TYPE_BUS_CONTROLLER); + qdev_prop_set_uint32(ubc, "ub-bus-controller-msgq-reg-size", UBC_MSGQ_REG_SIZE); + qdev_prop_set_uint32(ubc, "ub-bus-controller-fm-msgq-reg-size", FM_MSGQ_REG_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ubc), &error_fatal); ++ ++ /* in ub_bus_controller_realize will call sysbus_init_mmio init memory_region in order, ++ * 0: msgq_reg_mem ++ * 1: fm_msgq_reg_mem ++ * 2: ub controller io_mmio ++ * sysbus_mmio_map get inited memory_region by index 0, msgq_reg_mem ++ */ ++ sysbus_mmio_map(SYS_BUS_DEVICE(ubc), 0, ++ vms->memmap[VIRT_UBC_BASE_REG].base + UBC_MSGQ_REG_OFFSET); ++ /* sysbus_mmio_map get inited memory_region by index 1, fm_msgq_reg_mem */ ++ sysbus_mmio_map(SYS_BUS_DEVICE(ubc), 1, ++ vms->memmap[VIRT_UBC_BASE_REG].base + FM_MSGQ_REG_OFFSET); ++ mmio_alias = g_new0(MemoryRegion, 1); ++ /* here get inited memory_region by index 3, ub controller io_mmio */ ++ mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(ubc), 2); ++ memory_region_init_alias(mmio_alias, OBJECT(ubc), "ub-mmio", ++ mmio_reg, vms->memmap[VIRT_HIGH_UB_MMIO].base, ++ vms->memmap[VIRT_HIGH_UB_MMIO].size); ++ memory_region_add_subregion(get_system_memory(), ++ vms->memmap[VIRT_HIGH_UB_MMIO].base, ++ mmio_alias); ++ ++ mmio_alias = g_new0(MemoryRegion, 1); ++ memory_region_init_alias(mmio_alias, OBJECT(ubc), "ub-idev-fers-as", ++ mmio_reg, vms->memmap[VIRT_UB_IDEV_ERS].base, ++ vms->memmap[VIRT_UB_IDEV_ERS].size); ++ memory_region_add_subregion(get_system_memory(), ++ vms->memmap[VIRT_UB_IDEV_ERS].base, mmio_alias); + } + #endif // CONFIG_UB + static void create_pcie(VirtMachineState *vms) +@@ -2040,6 +2099,14 @@ static inline bool *virt_get_high_memmap_enabled(VirtMachineState *vms, + &vms->highmem_redists, + &vms->highmem_ecam, + &vms->highmem_mmio, ++#ifdef CONFIG_UB ++ &vms->highmem_ub_mmio, ++ &vms->highmem_idev_ers, ++ &vms->highmem_ubc_base_reg, ++ &vms->highmem_ubios_info_table, ++ &vms->highmem_ub_mem_cc, ++ &vms->highmem_ub_mem_nc, ++#endif // CONFIG_UB + }; + + assert(ARRAY_SIZE(extended_memmap) - VIRT_LOWMEMMAP_LAST == +@@ -2056,6 +2123,9 @@ static void virt_set_high_memmap(VirtMachineState *vms, + bool *region_enabled, fits; + int i; + ++#ifdef CONFIG_UB ++ ub_set_gpa_bits((uint8_t)pa_bits); ++#endif + for (i = VIRT_LOWMEMMAP_LAST; i < ARRAY_SIZE(extended_memmap); i++) { + region_enabled = virt_get_high_memmap_enabled(vms, i); + region_base = ROUND_UP(base, extended_memmap[i].size); +@@ -2074,6 +2144,10 @@ static void virt_set_high_memmap(VirtMachineState *vms, + */ + fits = (region_base + region_size) <= BIT_ULL(pa_bits); + *region_enabled &= fits; ++#ifdef CONFIG_UB ++ qemu_log("%d base 0x%lx size 0x%lx enable %u highmem_compact %u\n", i, ++ region_base, region_size, *region_enabled, vms->highmem_compact); ++#endif + if (vms->highmem_compact && !*region_enabled) { + continue; + } +@@ -2152,8 +2226,8 @@ static void virt_set_memmap(VirtMachineState *vms, int pa_bits) + /* Base address of the high IO region */ + memtop = base = device_memory_base + ROUND_UP(device_memory_size, GiB); + if (memtop > BIT_ULL(pa_bits)) { +- error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes\n", +- pa_bits, memtop - BIT_ULL(pa_bits)); ++ error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes\n", ++ pa_bits, memtop - BIT_ULL(pa_bits)); + exit(EXIT_FAILURE); + } + if (base < device_memory_base) { +@@ -2861,6 +2935,16 @@ static void machvirt_init(MachineState *machine) + machine->ram); + + virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem); ++#ifdef CONFIG_UB ++ qemu_log("memory_region_add_reservation 0x%lx size %ld round up %ld\n", ++ vms->memmap[VIRT_UBIOS_INFO_TABLE].base, UBIOS_TABLE_SIZE, ++ ROUND_UP(UBIOS_TABLE_SIZE, 4 * KiB)); ++ memory_region_add_reservation_with_ram(get_system_memory(), ++ OBJECT(machine->memdev), "ubios-information-table", ++ vms->memmap[VIRT_UBIOS_INFO_TABLE].base, ++ ROUND_UP(UBIOS_TABLE_SIZE, 4 * KiB)); ++ create_ubios_info_table_fdt(vms, machine->ram); ++#endif // CONFIG_UB + + create_gic(vms, sysmem); + +@@ -3995,6 +4079,50 @@ static int virt_kvm_type(MachineState *ms, const char *type_str) + return requested_pa_size | rme_vm_type | type; + } + ++#ifdef CONFIG_UB ++static bool virt_get_ummu(Object *obj, Error **errp) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ ++ return vms->ummu; ++} ++ ++static void virt_set_ummu(Object *obj, bool value, Error **errp) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ ++ vms->ummu = value; ++} ++ ++static bool virt_ub_get_cluster_mode(Object *obj, Error **errp) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ ++ return vms->ub_cluster_mode; ++} ++ ++static void virt_ub_set_cluster_mode(Object *obj, bool value, Error **errp) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ ++ vms->ub_cluster_mode = value; ++} ++ ++static bool virt_ub_get_fm_deployment_info(Object *obj, Error **errp) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ ++ return vms->fm_deployment; ++} ++ ++static void virt_ub_set_fm_deployment_info(Object *obj, bool value, Error **errp) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ ++ vms->fm_deployment = value; ++} ++#endif // CONFIG_UB ++ + static void virt_machine_class_init(ObjectClass *oc, void *data) + { + MachineClass *mc = MACHINE_CLASS(oc); +@@ -4169,6 +4297,17 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) + object_class_property_set_description(oc, "x-target-impl-cpus", + "Describe target cpu impl in the format midr1:revidr1-midr2:revidr2" + "Maximum 4 midr:revidr pair is supported"); ++#ifdef CONFIG_UB ++ object_class_property_add_bool(oc, "ummu", virt_get_ummu, virt_set_ummu); ++ object_class_property_add_bool(oc, "ub-cluster-mode", virt_ub_get_cluster_mode, ++ virt_ub_set_cluster_mode); ++ object_class_property_set_description(oc, "ub-cluster-mode", ++ "Set on/off to enable/disable ub cluster mode"); ++ object_class_property_add_bool(oc, "fm-deployment", virt_ub_get_fm_deployment_info, ++ virt_ub_set_fm_deployment_info); ++ object_class_property_set_description(oc, "fm-deployment", ++ "Set on/off to support FM msg queue or not"); ++#endif // CONFIG_UB + } + + static char *virt_get_kvm_type(Object *obj, Error **errp G_GNUC_UNUSED) +@@ -4235,6 +4374,17 @@ static void virt_instance_init(Object *obj) + /* MTE is disabled by default. */ + vms->mte = false; + ++#ifdef CONFIG_UB ++ vms->highmem_ub_mmio = true; ++ vms->highmem_idev_ers = true; ++ vms->highmem_ubc_base_reg = true; ++ vms->highmem_ubios_info_table = true; ++ vms->highmem_ub_mem_cc = true; ++ vms->highmem_ub_mem_nc = true; ++ vms->ub_cluster_mode = false; ++ vms->fm_deployment = false; ++#endif ++ + vms->irqmap = a15irqmap; + + virt_flash_create(vms); +diff --git a/hw/ub/ub_acpi.c b/hw/ub/ub_acpi.c +index 9b3af82203..f9e38a2da3 100644 +--- a/hw/ub/ub_acpi.c ++++ b/hw/ub/ub_acpi.c +@@ -31,7 +31,418 @@ + #include "qapi/error.h" + #include "qapi/util.h" + #include "qapi/qmp/qstring.h" ++#include "hw/ub/ub_ummu.h" + #include "hw/ub/hisi/ub_mem.h" + #include "hw/ub/hisi/ub_fm.h" ++#include "hw/ub/hisi/ubc.h" + #include "hw/acpi/aml-build.h" ++#include "hw/ub/ub_common.h" ++#define UBIOS_VERSION 1 ++#define DTS_SIG_UBCTL "bus controller" ++#define DTS_SIG_UMMU "ummu" ++#define DTS_SIG_RSV_MEM "rsv_mem" + ++static uint8_t gpa_bits; ++void ub_set_gpa_bits(uint8_t bits) ++{ ++ gpa_bits = bits; ++} ++ ++static void ub_init_table_header(DtsTableHeader *header, ++ const char *name, ++ uint32_t size, uint16_t version) ++{ ++ strncpy(header->name, name, sizeof(header->name) - 1); ++ header->total_size = size; ++ header->version = version; ++ header->remain_size = 0; ++ qemu_log("%s total_size %u\n", name, size); ++} ++ ++static void ub_init_vendor_info(UbcVendorInfo *vendor_info, VirtMachineState *vms) ++{ ++ uint16_t mar_id; ++ uint64_t base_reg = vms->memmap[VIRT_UBC_BASE_REG].base; ++ uint64_t addr_cc = vms->memmap[VIRT_UB_MEM_CC].base; ++ uint64_t addr_nc = vms->memmap[VIRT_UB_MEM_NC].base; ++ UbMemDecoderInfo *mem_info; ++ uint64_t local_reg_offset[] = { ++ BA0_OFFSET, ++ BA1_OFFSET, ++ BA2_OFFSET, ++ BA3_OFFSET, ++ BA4_OFFSET, ++ }; ++ uint64_t mar_space_size[] = { ++ UB_MEM_MAR0_SPACE_SIZE, ++ UB_MEM_MAR1_SPACE_SIZE, ++ UB_MEM_MAR2_SPACE_SIZE, ++ UB_MEM_MAR3_SPACE_SIZE, ++ UB_MEM_MAR4_SPACE_SIZE, ++ }; ++ ++ memset(vendor_info, 0, sizeof(UbcVendorInfo)); ++ vendor_info->ub_mem_ver = 0; ++ vendor_info->max_addr_bits = gpa_bits; ++ /* now only support one UBC */ ++ vendor_info->cmd_queue_base = vms->memmap[VIRT_UBC_BASE_REG].base + CMDQ_BASE_ADDR; ++ vendor_info->event_queue_base = vms->memmap[VIRT_UBC_BASE_REG].base + EVTQ_BASE_ADDR; ++ vendor_info->vendor_feature_sets = 0; ++ ++ for (mar_id = 0; mar_id < MAR_NUM_ONE_UDIE; mar_id++) { ++ mem_info = &vendor_info->mem_info[mar_id]; ++ mem_info->decode_addr = base_reg + local_reg_offset[mar_id] + MAR_OFFSET; ++ mem_info->cc_base_addr = mar_space_size[mar_id] ? ++ addr_cc >> MB_SIZE_OFFSET : 0; ++ mem_info->cc_base_size = mar_space_size[mar_id] >> MB_SIZE_OFFSET; ++ mem_info->nc_base_addr = mar_space_size[mar_id] ? ++ addr_nc >> MB_SIZE_OFFSET : 0; ++ mem_info->nc_base_size = mar_space_size[mar_id] >> MB_SIZE_OFFSET; ++ addr_cc += mar_space_size[mar_id]; ++ addr_nc += mar_space_size[mar_id]; ++ qemu_log("MAR%u decode_addr 0x%lx, cc ba 0x%x size 0x%x," ++ " nc ba 0x%x size 0x%x\n", ++ mar_id, mem_info->decode_addr, ++ mem_info->cc_base_addr, mem_info->cc_base_size, ++ mem_info->nc_base_addr, mem_info->nc_base_size); ++ } ++} ++ ++static void ub_init_ubc_node(uint16_t ubc_count, UbcNode *ubc, VirtMachineState *vms) ++{ ++ uint16_t i; ++ uint64_t ub_mmio_addr = vms->memmap[VIRT_HIGH_UB_MMIO].base; ++ for (i = 0; i < ubc_count; i++) { ++ (ubc + i)->interrupt_id_start = UBC_INTERRUPT_ID_START + i * UBC_INTERRUPT_ID_CNT; ++ (ubc + i)->interrupt_id_end = (ubc + i)->interrupt_id_start + UBC_INTERRUPT_ID_CNT - 1; ++ (ubc + i)->gpa_base = ub_mmio_addr + i * UBIOS_MMIOS_SIZE_PER_UBC; ++ (ubc + i)->gpa_size = UBIOS_MMIOS_SIZE_PER_UBC; ++ (ubc + i)->memory_size_limit = gpa_bits; ++ (ubc + i)->dma_cca = 1; /* 1: DMA(Y) CCA(Y) */ ++ (ubc + i)->ummu_mapping = UBIOS_UMMU_TABLE_CNT ? 0 : 0xffff; ++ (ubc + i)->proximity_domain = 0; ++ (ubc + i)->msg_queue_base = vms->memmap[VIRT_UBC_BASE_REG].base + ++ UBC_MSGQ_REG_OFFSET; ++ (ubc + i)->msg_queue_size = UBC_MSGQ_REG_SIZE; ++ (ubc + i)->msg_queue_depth = HI_MSGQ_DEPTH; ++ (ubc + i)->msg_queue_interrupt = UBC_QUEUE_INTERRUPT_DEFAULT; ++ /* ++ * Interrupt attributes ++ * BIT0: Triggering ++ * ACPI_LEVEL_SENSITIVE 0x00 ++ * ACPI_EDGE_SENSITIVE 0x01 ++ * BIT1: Polarity ++ * ACPI_ACTIVE_HIGH 0x00 ++ * ACPI_ACTIVE_LOW 0x01 ++ */ ++ (ubc + i)->msg_queue_interrupt_attr = 0x0; ++ memset(&(ubc + i)->ubc_info, 0, sizeof(UbGuid)); ++ ub_init_vendor_info((UbcVendorInfo *)&(ubc + i)->vendor_info, vms); ++ qemu_log("init ubc_table[%d]=0x%lx, interrupt_id=[0x%x-0x%x]\n", ++ i, (ubc + i)->gpa_base, (ubc + i)->interrupt_id_start, ++ (ubc + i)->interrupt_id_end); ++ } ++} ++ ++static void ub_init_ubios_ubc_table(DtsSubUbcTable *ubc_table, VirtMachineState *vms) ++{ ++ UbcNode *ubc = NULL; ++ ++ ubc_table->ubc_count = UBIOS_UBC_TABLE_CNT; ++ ub_init_table_header(&ubc_table->header, DTS_SIG_UBCTL, ++ UBIOS_UBC_TABLE_SIZE(ubc_table->ubc_count), ++ UBIOS_VERSION); ++ ubc_table->local_cna_start = LOCAL_CNA_START; ++ ubc_table->local_cna_end = LOCAL_CNA_END; ++ ubc_table->local_eid_start = LOCAL_EID_START; ++ ubc_table->local_eid_end = LOCAL_EID_END; ++ ubc_table->feature_set = 0; ++ /* ubc_table->cluster_mode ++ * System working mode ++ * 0: single-node system ++ * 1: cluster mode ++ */ ++ ubc_table->cluster_mode = vms->ub_cluster_mode; ++ qemu_log("init ub cluster mode %u\n", ubc_table->cluster_mode); ++ ubc = (UbcNode *)ubc_table->node; ++ ub_init_ubc_node(ubc_table->ubc_count, ubc, vms); ++} ++ ++static void ub_init_ummu_vendor_info(UbMemMmuInfo *vendor_info, VirtMachineState *vms) ++{ ++ vendor_info->valid_bits = UB_MEM_VALID_VALUE; ++ vendor_info->protection_table_bits = 0xa; ++ vendor_info->translation_table_bits = 0x11; ++ vendor_info->ext_reg_base = (vms->memmap[VIRT_UBC_BASE_REG].base | UMMU_OFFSET | UB_MEM_REG_BASE); ++ vendor_info->ext_reg_size = UMMU_EXT_REG_SIZE; ++ qemu_log("ummu vendor info reg_base=0x%lx\n", vendor_info->ext_reg_base); ++} ++ ++static void ub_init_ubios_ummu_table(DtsSubUmmuTable *ummu_table, VirtMachineState *vms) ++{ ++ uint16_t i; ++ UmmuNode *ummu = NULL; ++ UbMemMmuInfo *vendor_info = NULL; ++ ++ ummu_table->count = UBIOS_UMMU_TABLE_CNT; ++ ub_init_table_header(&ummu_table->header, DTS_SIG_UMMU, ++ UBIOS_UMMU_TABLE_SIZE(ummu_table->count), ++ UBIOS_VERSION); ++ ummu = (UmmuNode *)ummu_table->node; ++ for (i = 0; i < ummu_table->count; i++) { ++ (ummu + i)->base_addr = vms->memmap[VIRT_UBC_BASE_REG].base + UMMU_REG_OFFSET + ++ i * SINGLE_UMMU_REG_SIZE; ++ (ummu + i)->addr_size = UMMU_REG_SIZE; ++ (ummu + i)->interrupt_id = UMMU_INTERRUPT_ID; ++ (ummu + i)->proximity_domain = 0; ++ (ummu + i)->its_index = 0; ++ (ummu + i)->pmu_addr = (ummu + i)->base_addr + SINGLE_UMMU_REG_SIZE; ++ (ummu + i)->pmu_size = SINGLE_UMMU_PMU_REG_SIZE; ++ (ummu + i)->pmu_interrupt_id = UMMU_INTERRUPT_ID + 1; ++ (ummu + i)->min_tid = UMMU_RESERVED_TID_NUM + 1; ++ (ummu + i)->max_tid = 0xFFFFF; ++ (ummu + i)->vender_id = VENDER_ID_HUAWEI; ++ ++ vendor_info = (UbMemMmuInfo *)(ummu + i)->vender_info; ++ ub_init_ummu_vendor_info(vendor_info, vms); ++ qemu_log("init ummu_table[%d]=0x%lx,pmu_addr=0x%lx,pmu_size=0x%lx,pmu_interrupt_id=0x%x\n", ++ i, (ummu + i)->base_addr, (ummu + i)->pmu_addr, ++ (ummu + i)->pmu_size, (ummu + i)->pmu_interrupt_id); ++ } ++} ++ ++static void ub_init_ubios_rsv_mem_table(DtsRsvMemTable *rsv_mem_table, VirtMachineState *vms) ++{ ++ MemRange *mem_range; ++ rsv_mem_table->count = UBIOS_UMMU_TABLE_CNT; ++ ub_init_table_header(&rsv_mem_table->header, DTS_SIG_RSV_MEM, ++ UBIOS_RSV_MEM_TABLE_SIZE(rsv_mem_table->count), ++ UBIOS_VERSION); ++ mem_range = (MemRange *)rsv_mem_table->node; ++ mem_range->flags = 0x1; /* direct mapping */ ++ memset(mem_range->reserved, 0, sizeof(mem_range->reserved)); ++ mem_range->base = 0x8000000; /* MSI_IOVA_BASE */ ++ mem_range->size = 0x100000; /* MSI_IOVA_LENGTH */ ++} ++ ++void ub_init_ubios_info_table(VirtMachineState *vms, uint64_t total_size) ++{ ++ uint64_t ubios_info_tables = vms->memmap[VIRT_UBIOS_INFO_TABLE].base; ++ uint64_t ubc_tables_addr = ubios_info_tables + UBIOS_INFO_TABLE_SIZE; ++ uint64_t ummu_tables_addr; ++ uint64_t size = total_size; ++ DtsRootTable *ubios = (DtsRootTable *)cpu_physical_memory_map(ubios_info_tables, ++ &size, true); ++ DtsSubUbcTable *ubc_table = (DtsSubUbcTable *)(ubios + 1); ++ uint64_t ubc_table_size; ++ DtsSubUmmuTable *ummu_table; ++ uint64_t ummu_table_size; ++ uint64_t rsv_mem_tables_addr; ++ DtsRsvMemTable *rsv_mem_table; ++ ++ if (!ubios || size != total_size) { ++ if (ubios) { ++ cpu_physical_memory_unmap(ubios, size, true, size); ++ } ++ qemu_log("cpu_physical_memory_map failed, size %lu total %lu ptr %p\n", ++ size, total_size, ubios); ++ return; ++ } ++ qemu_log("ubios_info_tables=0x%lx, ubc_tables_addr=0x%lx," ++ "ubios table size=%lu, UBIOS_UBC_TABLE_CNT %u," ++ "UBIOS_UMMU_TABLE_CNT %u\n", ++ ubios_info_tables, ubc_tables_addr, total_size, ++ UBIOS_UBC_TABLE_CNT, UBIOS_UMMU_TABLE_CNT); ++ memset(ubios, 0, sizeof(DtsRootTable)); ++ ub_init_table_header(&ubios->header, "ubios root", ++ sizeof(DtsRootTable), UBIOS_VERSION); ++ /* init ubc table */ ++ ubios->tables[ubios->count] = ubc_tables_addr; ++ ub_init_ubios_ubc_table(ubc_table, vms); ++ qemu_log("ubc ubios->tables[%u] = 0x%lx ubc_table = 0x%lx \n", ++ ubios->count, ubc_tables_addr, (uint64_t)ubc_table); ++ ubios->count++; ++ ubc_table_size = UBIOS_UBC_TABLE_SIZE(ubc_table->ubc_count); ++ ++ /* init ummu table */ ++ ummu_tables_addr = ubc_tables_addr + ALIGN_UP(ubc_table_size, UB_ALIGNMENT); ++ ummu_table = (DtsSubUmmuTable *)((uint8_t *)(ubc_table) + ++ ALIGN_UP(ubc_table_size, UB_ALIGNMENT)); ++ ubios->tables[ubios->count] = ummu_tables_addr; ++ ub_init_ubios_ummu_table(ummu_table, vms); ++ qemu_log("ummu ubios->tables[%u] = 0x%lx ummu_table=0x%lx\n", ++ ubios->count, ummu_tables_addr, (uint64_t)ummu_table); ++ ubios->count++; ++ ummu_table_size = UBIOS_UMMU_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT); ++ ++ /* init rsv mem table */ ++ rsv_mem_tables_addr = ummu_tables_addr + ALIGN_UP(ummu_table_size, UB_ALIGNMENT); ++ rsv_mem_table = (DtsRsvMemTable *)((uint8_t *)(ummu_table) + ++ ALIGN_UP(ummu_table_size, UB_ALIGNMENT)); ++ ubios->tables[ubios->count] = rsv_mem_tables_addr; ++ ub_init_ubios_rsv_mem_table(rsv_mem_table, vms); ++ ubios->count++; ++ ++ cpu_physical_memory_unmap(ubios, size, true, size); ++} ++ ++void ub_set_ubinfo_in_ubc_table(VirtMachineState *vms) ++{ ++ uint64_t ubios_info_tables = vms->memmap[VIRT_UBIOS_INFO_TABLE].base; ++ uint64_t total_size = ROUND_UP(UBIOS_TABLE_SIZE, 4 * KiB); ++ uint64_t size = total_size; ++ UBBus *bus = vms->ub_bus; ++ ++ if (!bus) { ++ qemu_log("there is no ub bus\n"); ++ return; ++ } ++ ++ BusControllerState *ubc = container_of_ubbus(bus); ++ UbGuid guid = ubc->ubc_dev->parent.guid; ++ DtsRootTable *ubios = (DtsRootTable *)cpu_physical_memory_map(ubios_info_tables, ++ &size, true); ++ DtsSubUbcTable *ubc_table = (DtsSubUbcTable *)(ubios + 1); ++ UbcNode *ubc_node = (UbcNode *)ubc_table->node; ++ ++ if (!ubios || size != total_size) { ++ if (ubios) { ++ cpu_physical_memory_unmap(ubios, size, true, size); ++ } ++ qemu_log("cpu_physical_memory_map failed, size %lu total %lu ptr %p\n", ++ size, total_size, ubios); ++ return; ++ } ++ /* The virtual machine currently supports only one ub controller. */ ++ ubc_node->ubc_info = guid; ++ ++ cpu_physical_memory_unmap(ubios, size, true, size); ++} ++ ++void build_ubrt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) ++{ ++ /* 3 subtables: ubc, ummu, UB Reserved Memory */ ++ uint8_t table_cnt = 3; ++ uint64_t ubios_info_tables = vms->memmap[VIRT_UBIOS_INFO_TABLE].base; ++ uint64_t ubc_tables_addr = ubios_info_tables + UBIOS_INFO_TABLE_SIZE; ++ uint64_t ubc_table_size = UBIOS_UBC_TABLE_SIZE(UBIOS_UBC_TABLE_CNT); ++ uint64_t ummu_tables_addr = ubc_tables_addr + ALIGN_UP(ubc_table_size, UB_ALIGNMENT); ++ uint64_t ummu_table_size = UBIOS_UMMU_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT); ++ uint64_t rsv_mem_tables_addr = ummu_tables_addr + ALIGN_UP(ummu_table_size, UB_ALIGNMENT); ++ AcpiTable table = { .sig = "UBRT", .rev = 0, .oem_id = vms->oem_id, ++ .oem_table_id = vms->oem_table_id }; ++ ++ acpi_table_begin(&table, table_data); ++ build_append_int_noprefix(table_data, table_cnt, 4); ++ ++ build_append_int_noprefix(table_data, ACPI_UB_TABLE_TYPE_BUS_CONTROLLER, 1); ++ build_append_int_noprefix(table_data, 0, 7); ++ build_append_int_noprefix(table_data, ubc_tables_addr, 8); ++ ++ build_append_int_noprefix(table_data, ACPI_UB_TABLE_TYPE_UMMU, 1); ++ build_append_int_noprefix(table_data, 0, 7); ++ build_append_int_noprefix(table_data, ummu_tables_addr, 8); ++ ++ build_append_int_noprefix(table_data, ACPI_UB_TABLE_TYPE_RSV_MEM, 1); ++ build_append_int_noprefix(table_data, 0, 7); ++ build_append_int_noprefix(table_data, rsv_mem_tables_addr, 8); ++ ++ acpi_table_end(linker, &table); ++ qemu_log("init UBRT: ubc_tbl=0x%lx, ummu_tbl=0x%lx, rsv_mem_tbl=0x%lx\n", ++ ubc_tables_addr, ummu_tables_addr, rsv_mem_tables_addr); ++} ++ ++void acpi_dsdt_add_ub(Aml *scope) ++{ ++ Aml *dev_ubc = aml_device("UBC0"); ++ Aml *dev_ummu = aml_device("UMU0"); ++ Aml *dev_pmu = aml_device("PMU0"); ++ ++ aml_append(dev_ubc, aml_name_decl("_HID", aml_string("HISI0541"))); ++ aml_append(dev_ubc, aml_name_decl("_UID", aml_int(0))); ++ aml_append(scope, dev_ubc); ++ ++ aml_append(dev_ummu, aml_name_decl("_HID", aml_string("HISI0551"))); ++ aml_append(dev_ummu, aml_name_decl("_UID", aml_int(0))); ++ aml_append(scope, dev_ummu); ++ ++ aml_append(dev_pmu, aml_name_decl("_HID", aml_string("HISI0571"))); ++ aml_append(dev_pmu, aml_name_decl("_UID", aml_int(0))); ++ aml_append(scope, dev_pmu); ++} ++ ++void acpi_iort_add_ub(GArray *table_data) ++{ ++ char name_ubc[11] = "\\_SB_.UBC0"; ++ char name_ummu[11] = "\\_SB_.UMU0"; ++ char name_pmu[11] = "\\_SB_.PMU0"; ++ int name_ubc_len = sizeof(name_ubc); ++ int name_ummu_len = sizeof(name_ummu); ++ int name_pmu_len = sizeof(name_pmu); ++ ++ /* Table 16 UBC */ ++ build_append_int_noprefix(table_data, 1 /* Named component */, 1); /* Type */ ++ build_append_int_noprefix(table_data, 0x40, 2); /* Length */ ++ build_append_int_noprefix(table_data, 0, 1); /* Revision */ ++ build_append_int_noprefix(table_data, 0, 4); /* Identifier */ ++ build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */ ++ build_append_int_noprefix(table_data, 0x2c, 4); /* Reference to ID Array */ ++ /* Named component specific data */ ++ build_append_int_noprefix(table_data, 0, 4); /* Node Flags */ ++ build_append_int_noprefix(table_data, 0, 4); /* Memory access properties: Cache Coherency */ ++ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Hints */ ++ build_append_int_noprefix(table_data, 0, 2); /* Memory access properties: Reserved */ ++ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Memory Flags */ ++ build_append_int_noprefix(table_data, 0, 1); /* Memory Size Limit */ ++ g_array_append_vals(table_data, name_ubc, name_ubc_len); /* Device object name */ ++ build_append_int_noprefix(table_data, 0, 4); /* Padding */ ++ build_append_int_noprefix(table_data, 0, 4); /* Input base */ ++ build_append_int_noprefix(table_data, 1, 4); /* Number of IDs */ ++ build_append_int_noprefix(table_data, UBC_INTERRUPT_ID_START, 4); /* Output base */ ++ build_append_int_noprefix(table_data, 0x30, 4); /* Output Reference */ ++ build_append_int_noprefix(table_data, 1, 4); /* Flags */ ++ ++ /* Table 16 UMMU */ ++ build_append_int_noprefix(table_data, 1 /* Named component */, 1); /* Type */ ++ build_append_int_noprefix(table_data, 0x40, 2); /* Length */ ++ build_append_int_noprefix(table_data, 0, 1); /* Revision */ ++ build_append_int_noprefix(table_data, 0, 4); /* Identifier */ ++ build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */ ++ build_append_int_noprefix(table_data, 0x2c, 4); /* Reference to ID Array */ ++ /* Named component specific data */ ++ build_append_int_noprefix(table_data, 0, 4); /* Node Flags */ ++ build_append_int_noprefix(table_data, 0, 4); /* Memory access properties: Cache Coherency */ ++ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Hints */ ++ build_append_int_noprefix(table_data, 0, 2); /* Memory access properties: Reserved */ ++ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Memory Flags */ ++ build_append_int_noprefix(table_data, 0, 1); /* Memory Size Limit */ ++ g_array_append_vals(table_data, name_ummu, name_ummu_len); /* Device object name */ ++ build_append_int_noprefix(table_data, 0, 4); /* Padding */ ++ build_append_int_noprefix(table_data, 0, 4); /* Input base */ ++ build_append_int_noprefix(table_data, 1, 4); /* Number of IDs */ ++ build_append_int_noprefix(table_data, UMMU_INTERRUPT_ID, 4); /* Output base */ ++ build_append_int_noprefix(table_data, 0x30, 4); /* Output Reference */ ++ build_append_int_noprefix(table_data, 1, 4); /* Flags */ ++ ++ /* Table 16 PMU */ ++ build_append_int_noprefix(table_data, 1 /* Named component */, 1); /* Type */ ++ build_append_int_noprefix(table_data, 0x40, 2); /* Length */ ++ build_append_int_noprefix(table_data, 0, 1); /* Revision */ ++ build_append_int_noprefix(table_data, 0, 4); /* Identifier */ ++ build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */ ++ build_append_int_noprefix(table_data, 0x2c, 4); /* Reference to ID Array */ ++ /* Named component specific data */ ++ build_append_int_noprefix(table_data, 0, 4); /* Node Flags */ ++ build_append_int_noprefix(table_data, 0, 4); /* Memory access properties: Cache Coherency */ ++ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Hints */ ++ build_append_int_noprefix(table_data, 0, 2); /* Memory access properties: Reserved */ ++ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Memory Flags */ ++ build_append_int_noprefix(table_data, 0, 1); /* Memory Size Limit */ ++ g_array_append_vals(table_data, name_pmu, name_pmu_len); /* Device object name */ ++ build_append_int_noprefix(table_data, 0, 4); /* Padding */ ++ build_append_int_noprefix(table_data, 0, 4); /* Input base */ ++ build_append_int_noprefix(table_data, 1, 4); /* Number of IDs */ ++ build_append_int_noprefix(table_data, UMMU_INTERRUPT_ID + 1, 4); /* Output base */ ++ build_append_int_noprefix(table_data, 0x30, 4); /* Output Reference */ ++ build_append_int_noprefix(table_data, 1, 4); /* Flags */ ++} +diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h +index a621844eb3..7f0d3ed39d 100644 +--- a/include/hw/arm/virt.h ++++ b/include/hw/arm/virt.h +@@ -155,6 +155,14 @@ enum { + VIRT_HIGH_GIC_REDIST2 = VIRT_LOWMEMMAP_LAST, + VIRT_HIGH_PCIE_ECAM, + VIRT_HIGH_PCIE_MMIO, ++#ifdef CONFIG_UB ++ VIRT_HIGH_UB_MMIO, ++ VIRT_UB_IDEV_ERS, ++ VIRT_UBC_BASE_REG, ++ VIRT_UBIOS_INFO_TABLE, ++ VIRT_UB_MEM_CC, ++ VIRT_UB_MEM_NC, ++#endif // CONFIG_UB + }; + + typedef enum VirtIOMMUType { +@@ -221,6 +229,15 @@ struct VirtMachineState { + bool highmem_mmio; + bool highmem_redists; + #ifdef CONFIG_UB ++ bool highmem_ub_mmio; ++ bool highmem_idev_ers; ++ bool highmem_ubc_base_reg; ++ bool highmem_ubios_info_table; ++ bool highmem_ub_mem_cc; ++ bool highmem_ub_mem_nc; ++ bool ummu; ++ bool ub_cluster_mode; ++ bool fm_deployment; + UBBus *ub_bus; + #endif // CONFIG_UB + bool its; +diff --git a/include/hw/ub/hisi/ubc.h b/include/hw/ub/hisi/ubc.h +index fdaeae7b3e..c34693accb 100644 +--- a/include/hw/ub/hisi/ubc.h ++++ b/include/hw/ub/hisi/ubc.h +@@ -45,4 +45,143 @@ + #define UBC_INTERRUPT_ID_CNT 0x1000 + #define VENDER_ID_HUAWEI 0xCC08 + ++/* ++ * Local Register layout ++ * ++ * +-----------------------------+ ++ * | rsv 15M | ++ * +-----------------------------+ ++ * | 16th 1M CCUM | ++ * +-----------------------------+ 0xf00_0000 ++ * | 15th 16M UMMU | ++ * +-----------------------------+ 0xe00_0000 ++ * | 14th 16M NL4 | ++ * +-----------------------------+ 0xd00_0000 ++ * | 13th 16M BA4 | ++ * +-----------------------------+ 0xc00_0000 ++ * | 12th 16M NL3 | ++ * +-----------------------------+ 0xb00_0000 ++ * | 11th 16M BA3 | ++ * +-----------------------------+ 0xa00_0000 ++ * | 10th 16M NL2 | ++ * +-----------------------------+ 0x900_0000 ++ * | 9th 16M BA2 | ++ * +-----------------------------+ 0x800_000 ++ * | 8th 16M NL1 | ++ * +-----------------------------+ 0x700_0000 ++ * | 7th 16M BA1 | ++ * +-----------------------------+ 0x600_0000 ++ * | 6th 16M NL0 | ++ * +-----------------------------+ 0x500_0000 ++ * | 5th 16M TA | ++ * +-----------------------------+ 0x400_0000 ++ * | 4th 16M BA0 | ++ * +-----------------------------+ 0x300_0000 ++ * | 3th 16M TP | ++ * +-----------------------------+ 0x200_0000 ++ * | 2rd 16M MISC | ++ * +-----------------------------+ 0x100_0000 ++ * | 1st 16M | ++ * +-----------------------------+ 0x000_0000 ++ */ ++#define FST_OFFSET 0x0000000 ++#define MISC_OFFSET 0x1000000 ++#define TP_OFFSET 0x2000000 ++#define BA0_OFFSET 0x3000000 ++#define TA_OFFSET 0x4000000 ++#define NL0_OFFSET 0x5000000 ++#define BA1_OFFSET 0x6000000 ++#define NL1_OFFSET 0x7000000 ++#define BA2_OFFSET 0x8000000 ++#define NL2_OFFSET 0x9000000 ++#define BA3_OFFSET 0xa000000 ++#define NL3_OFFSET 0xb000000 ++#define BA4_OFFSET 0xc000000 ++#define NL4_OFFSET 0xd000000 ++#define UMMU_OFFSET 0xe000000 ++#define CCUM_OFFSET 0xf000000 ++#define LOCAL_REG_TYPE_SHIFT 24 ++#define LOCAL_REG_TYPE_MASK GENMASK_ULL(27, 24) ++#define LOCAL_REG_MEMBER_MASK GENMASK_ULL(23, 0) ++#define LOCAL_REG_ADDR_2_TYPE(addr) (((addr) & LOCAL_REG_TYPE_MASK) >> LOCAL_REG_TYPE_SHIFT) ++#define BA_REG_MEMBER_MASK GENMASK_ULL(19, 16) ++#define BA_REG_DATA_MASK GENMASK_ULL(15, 0) ++#define TOP_REG_OFFSET (0x00 * 64 * KiB) ++#define RXDMA_OFFSET (0x01 * 64 * KiB) ++#define TXDMA_OFFSET (0x02 * 64 * KiB) ++#define MASTER_OFFSET (0x03 * 64 * KiB) ++#define LSAD_OFFSET (0x04 * 64 * KiB) ++#define SMAP_OFFSET (0x0a * 64 * KiB) ++#define P2P_OFFSET (0x0c * 64 * KiB) ++#define MAR_OFFSET (0x0d * 64 * KiB) ++#define MAR_DECODE_OFFSET (0x0e * 64 * KiB) /* mem decoder table */ ++#define UB_RAS_OFFSET (0x12 * 64 * KiB) ++#define CCUA_OFFSET (0x14 * 64 * KiB) ++ ++/* references LinQuickCV100_UBOMMU_nManager */ ++#define TP_UBOMMU0_OFFSET 0x180000 ++#define TP_UBOMMU1_OFFSET 0x190000 ++#define TP_UBOMMU2_OFFSET 0x1c0000 ++#define TP_UBOMMU3_OFFSET 0x1d0000 ++#define TP_UBOMMU4_OFFSET 0x1e0000 ++#define TP_UBOMMU5_OFFSET 0x1f0000 ++#define TP_UBOMMU0_SELF_REG_OFFSET 0 ++#define TP_UBOMMU0_PMCG_OFFSET 0x3000 ++#define TP_UBOMMU0_PROTOCOL_OFFSET 0x4000 ++#define TP_UBOMMU0_CMDQ_OFFSET 0xc000 ++#define TP_UBOMMU0_EVENTQ_OFFSET 0xe000 ++#define SELF_ICG_CFG_OFFSET 0x0 ++#define UBOMMU_MEM_INIT_OFFSET 0x1804 ++#define DECODER_REG_BASE (TP_OFFSET + TP_UBOMMU0_OFFSET + \ ++ TP_UBOMMU0_SELF_REG_OFFSET) /* 0x2180000 */ ++#define CMDQ_BASE_ADDR (TP_OFFSET + TP_UBOMMU0_OFFSET + \ ++ TP_UBOMMU0_CMDQ_OFFSET) /* 0x218c000 */ ++#define EVTQ_BASE_ADDR (TP_OFFSET + TP_UBOMMU0_OFFSET + \ ++ TP_UBOMMU0_EVENTQ_OFFSET) /* 0x218e000 */ ++ ++#define DECODER_SELF_ICG_CFG_OFFSET (DECODER_REG_BASE + \ ++ SELF_ICG_CFG_OFFSET) /* 0x2180000 */ ++#define DECODER_SELF_MEM_INIT_OFFSET (DECODER_REG_BASE + \ ++ UBOMMU_MEM_INIT_OFFSET) /* 0x2181804 */ ++#define DECODER_MEM_INIT_DONE_VAL 0x3F ++#define DECODER_MEM_INIT_DONE_SHIFT 16 ++ ++#define SQ_ADDR_L 0x0 ++#define SQ_ADDR_H 0x4 ++#define SQ_PI 0x8 ++#define SQ_CI 0xc ++#define SQ_DEPTH 0x10 ++#define SQ_STATUS 0x14 ++#define RQ_ADDR_L 0x40 ++#define RQ_ADDR_H 0x44 ++#define RQ_PI 0x48 ++#define RQ_CI 0x4c ++#define RQ_DEPTH 0x50 ++#define RQ_ENTRY_SIZE 0x54 ++#define RQ_STATUS 0x58 ++#define CQ_ADDR_L 0x70 ++#define CQ_ADDR_H 0x74 ++#define CQ_PI 0x78 ++#define CQ_CI 0x7c ++#define CQ_DEPTH 0x80 ++#define CQ_STATUS 0x84 ++#define CQ_INT_MASK 0x88 ++#define CQ_INT_STATUS 0x8c ++#define CQ_INT_RO 0x90 ++#define CQ_INT_SET 0x94 ++#define MSGQ_RST 0xB0 ++ ++#define HI_MSG_SQE_PLD_SIZE 0x800 /* 2K */ ++#define HI_MSG_RQE_SIZE 0x800 /* 2K */ ++#define HI_MSGQ_DEPTH 16 ++#define HI_SQ_CFG_DEPTH HI_MSGQ_DEPTH ++#define HI_RQ_CFG_DEPTH HI_MSGQ_DEPTH ++#define HI_CQ_CFG_DEPTH HI_MSGQ_DEPTH ++#define HI_FM_MSG_ID_MIN 128 ++#define HI_FM_MSG_ID_MAX 191 ++#define MAP_COMMAND_MASK 0xff ++#define HI_FM_MSG_MAX (HI_SQ_CFG_DEPTH - 1) ++#define HI_MSGQ_MAX_DEPTH 1024 ++#define HI_MSGQ_MIN_DEPTH 4 ++ + #endif +diff --git a/include/hw/ub/ub_acpi.h b/include/hw/ub/ub_acpi.h +index d3af1c78bd..3579256444 100644 +--- a/include/hw/ub/ub_acpi.h ++++ b/include/hw/ub/ub_acpi.h +@@ -27,6 +27,8 @@ + #define DTS_TABLE_HEADER_RESERVE_LEN 3 + #define DTS_ROOT_TABLE_RESERVE_LEN 6 + #define DTS_TABLE_HEADER_NAME_LEN 16 ++/* ummu reserved tid num , don't modify */ ++#define UMMU_RESERVED_TID_NUM 64 + typedef struct DtsTableHeader { + char name[DTS_TABLE_HEADER_NAME_LEN]; + uint32_t total_size; +@@ -172,4 +174,10 @@ typedef struct AcpiUbrtTable { + UBIOS_UMMU_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT) + \ + UBIOS_RSV_MEM_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT)) + ++void ub_init_ubios_info_table(VirtMachineState *vms, uint64_t total_size); ++void ub_set_gpa_bits(uint8_t bits); ++void build_ubrt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms); ++void ub_set_ubinfo_in_ubc_table(VirtMachineState *vms); ++void acpi_dsdt_add_ub(Aml *scope); ++void acpi_iort_add_ub(GArray *table_data); + #endif +\ No newline at end of file +diff --git a/include/hw/ub/ub_common.h b/include/hw/ub/ub_common.h +index d52dc7e651..1336ea3ed3 100644 +--- a/include/hw/ub/ub_common.h ++++ b/include/hw/ub/ub_common.h +@@ -285,4 +285,34 @@ + #define LOOP_HELPER(macro, n) LOOP##n(macro) + #define LOOP(macro, n) LOOP_HELPER(macro, n) + ++#define for_each_set_bit(bit, addr, size) \ ++ for ((bit) = find_first_bit((addr), (size)); \ ++ (bit) < (size); \ ++ (bit) = find_next_bit((addr), (size), (bit) + 1)) ++ ++#define for_each_set_bit_from(bit, addr, size) \ ++ for ((bit) = find_next_bit((addr), (size), (bit)); \ ++ (bit) < (size); \ ++ (bit) = find_next_bit((addr), (size), (bit) + 1)) ++ ++#define EID_HIGH(eid) (((eid) >> 12) & 0xff) ++#define EID_LOW(eid) ((eid) & 0xfff) ++#define EID_GEN(eid_h, eid_l) ((eid_h) << 12 | (eid_l)) ++ ++#define UB_ALIGNMENT 64 ++ ++/* Round number down to multiple */ ++#define ALIGN_DOWN(n, m) ((n) / (m) * (m)) ++ ++/* Round number up to multiple */ ++#define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m)) ++#define GENMASK(h, l) \ ++ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) ++ ++#define BITS_PER_LONG_LONG 64 ++#define GENMASK_ULL(h, l) \ ++ (((~0ULL) - (1ULL << (l)) + 1) & \ ++ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) ++#define DASH_SZ 3 ++ + #endif +diff --git a/include/hw/ub/ub_ummu.h b/include/hw/ub/ub_ummu.h +new file mode 100644 +index 0000000000..f8b65a0bbe +--- /dev/null ++++ b/include/hw/ub/ub_ummu.h +@@ -0,0 +1,29 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_UMMU_H ++#define UB_UMMU_H ++ ++#include "hw/sysbus.h" ++#include "qom/object.h" ++#include "hw/ub/hisi/ubc.h" ++#include "hw/ub/ub_bus.h" ++#include "hw/ub/ub_ubc.h" ++ ++#define UMMU_INTERRUPT_ID 0x8989 // UMMU DEVICE ID need allocate later ++ ++#endif +-- +2.33.0 + diff --git a/ub-support-ub-cna-mgmt-msg-process.patch b/ub-support-ub-cna-mgmt-msg-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..a6d19c616b4a2585052b58d7638307a069a0d5d7 --- /dev/null +++ b/ub-support-ub-cna-mgmt-msg-process.patch @@ -0,0 +1,366 @@ +From 312626b7f0acfc50a66d5898ab84e1a7067d0da6 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 20:32:17 +0800 +Subject: [PATCH 3/7] ub: support ub cna mgmt msg process + +support process cna relative msg process + +Signed-off-by: caojinhuahw +--- + hw/ub/hisi/ubc_msgq.c | 3 +- + hw/ub/meson.build | 1 + + hw/ub/trace-events | 7 ++ + hw/ub/trace.h | 1 + + hw/ub/ub_cna_mgmt.c | 207 ++++++++++++++++++++++++++++++++++++ + include/hw/ub/ub_cna_mgmt.h | 71 +++++++++++++ + 6 files changed, 289 insertions(+), 1 deletion(-) + create mode 100644 hw/ub/trace.h + create mode 100644 hw/ub/ub_cna_mgmt.c + create mode 100644 include/hw/ub/ub_cna_mgmt.h + +diff --git a/hw/ub/hisi/ubc_msgq.c b/hw/ub/hisi/ubc_msgq.c +index 8258441ced..18403fbf4a 100644 +--- a/hw/ub/hisi/ubc_msgq.c ++++ b/hw/ub/hisi/ubc_msgq.c +@@ -28,6 +28,7 @@ + #include "hw/ub/hisi/ubc.h" + #include "trace.h" + #include "sysemu/dma.h" ++#include "hw/ub/ub_cna_mgmt.h" + + static void (*msgq_pool_handlers[])(BusControllerState *s, HiMsgSqe *sqe, + MsgPktHeader *header) = { +@@ -133,7 +134,7 @@ static void handle_task_type_enum(BusControllerState *s, HiMsgSqe *sqe) + g_free(scan_header); + return; + } +- header_size = ENUM_PKT_HEADER_SIZE + calc_enum_pld_header_size(scan_header, true) + ENUM_TOPO_QUERY_REQ_SIZE; ++ header_size = ENUM_PKT_HEADER_SIZE + calc_enum_pld_header_size(scan_header, true) + ENUM_NA_CFG_REQ_SIZE; + g_free(scan_header); + payload = g_malloc0(header_size); + if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + p_addr, +diff --git a/hw/ub/meson.build b/hw/ub/meson.build +index 4344a45741..d629174ef8 100644 +--- a/hw/ub/meson.build ++++ b/hw/ub/meson.build +@@ -6,6 +6,7 @@ ub_ss.add(files( + 'ub_acpi.c', + 'ub_enum.c', + 'ub_common.c', ++ 'ub_cna_mgmt.c', + 'ub_sec.c', + )) + system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss) +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index e69de29bb2..4083217e7e 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -0,0 +1,7 @@ ++# ub_cna_mgmt.c ++handle_enum_cna_config_request(char *guid, uint32_t port_idx, uint32_t cmd, uint32_t opcode) "guid %s port_idx %u cmd %u opcode %u" ++enum_set_cna_config_space_port(char *guid, uint32_t port_idx, uint32_t cna) "guid: %s, port_idx: %u, cna: %u" ++enum_set_cna_config_space_device(char *guid, uint32_t primary_cna) "guid: %s, primary_cna %u" ++ ++handle_enum_cna_query_request(char *guid, uint32_t port_idx, uint32_t cmd, uint32_t opcode) "guid %s port_idx %u cmd %u opcode %u" ++handle_enum_cna_query_request_rsp(char *guid, uint32_t cna) "guid %s return cna %u" +diff --git a/hw/ub/trace.h b/hw/ub/trace.h +new file mode 100644 +index 0000000000..b32402fc6f +--- /dev/null ++++ b/hw/ub/trace.h +@@ -0,0 +1 @@ ++#include "trace/trace-hw_ub.h" +diff --git a/hw/ub/ub_cna_mgmt.c b/hw/ub/ub_cna_mgmt.c +new file mode 100644 +index 0000000000..4339e20e20 +--- /dev/null ++++ b/hw/ub/ub_cna_mgmt.c +@@ -0,0 +1,207 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "hw/ub/ub_cna_mgmt.h" ++#include "hw/ub/ub_enum.h" ++#include "hw/ub/ub_config.h" ++#include "qemu/log.h" ++#include "trace.h" ++ ++static void enum_set_cna_config_space(uint8_t opcode, EnumCnaCfgReq *cna_cfg_req) ++{ ++ UbGuid *guid = &cna_cfg_req->common.guid; ++ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ UBDevice *dev = ub_find_device_by_guid(guid); ++ uint64_t emulated_offset; ++ ConfigPortBasic *port_basic = NULL; ++ ConfigNetAddrInfo *net_addr_info = NULL; ++ ++ ub_device_get_str_from_guid(guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1); ++ if (!dev) { ++ qemu_log("cannot find ub-device by guid: %s\n", guid_str); ++ return; ++ } ++ ++ if (opcode == UB_ENUM_CNA_MGMT_PORT) { ++ uint16_t port_idx = cna_cfg_req->port_idx; ++ uint64_t offset = UB_PORT_SLICE_START + port_idx * UB_PORT_SZ; ++ ++ emulated_offset = ub_cfg_offset_to_emulated_offset(offset, true); ++ port_basic = (ConfigPortBasic *)(dev->config + emulated_offset); ++ port_basic->port_cna = cna_cfg_req->cna; ++ trace_enum_set_cna_config_space_port(guid_str, port_idx, cna_cfg_req->cna); ++ } else if (opcode == UB_ENUM_CNA_MGMT_DEVICE) { ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_NA_INFO_START, true); ++ net_addr_info = (ConfigNetAddrInfo *)(dev->config + emulated_offset); ++ net_addr_info->primary_cna = cna_cfg_req->cna; ++ trace_enum_set_cna_config_space_device(guid_str, cna_cfg_req->cna); ++ } else { ++ qemu_log("not support opcode: %u\n", opcode); ++ } ++} ++ ++ ++void handle_enum_cna_config_request(BusControllerState *s, ++ HiMsgSqe *sqe, void *buf) ++{ ++ /* req message */ ++ size_t header_sz; ++ EnumPktHeader *header = (EnumPktHeader *)buf; ++ EnumPldScanHeader *scan_header; ++ EnumCnaCfgReq *cna_cfg_req; ++ /* rsp message */ ++ size_t rsp_size; ++ void *rsp_buf; ++ EnumPktHeader *rsp_pkt_hdr; ++ EnumPldScanHeader *rsp_scan_header; ++ EnumNaCfgRsp *rsp_pdu; ++ size_t forward_path_size; ++ HiMsgCqe cqe; ++ char guid[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ ++ scan_header = (EnumPldScanHeader *)((uint8_t *)buf + ENUM_PKT_HEADER_SIZE); ++ header_sz = ENUM_PKT_HEADER_SIZE + ++ calc_enum_pld_header_size(scan_header, true); ++ cna_cfg_req = (EnumCnaCfgReq *)((uint8_t *)buf + header_sz); ++ if (header->ulh.cfg != UB_CLAN_LINK_CFG || ++ header->cnth.nth_nlp != NTH_NLP_WITHOUT_TPH || ++ header->upi != UB_CP_UPI || ++ cna_cfg_req->common.bits.cmd != ENUM_CMD_CNA_CFG) { ++ qemu_log("invalid cna cfg reguest, please check driver inside guestos, " ++ "ulh.cfg %u cnth.nth_nlp %u upi 0x%x cmd %u\n", ++ header->ulh.cfg, header->cnth.nth_nlp, header->upi, ++ cna_cfg_req->common.bits.cmd); ++ return; ++ } ++ ++ ub_device_get_str_from_guid(&cna_cfg_req->common.guid, guid, UB_DEV_GUID_STRING_LENGTH + 1); ++ trace_handle_enum_cna_config_request(guid, cna_cfg_req->port_idx, ++ cna_cfg_req->common.bits.cmd, ++ cna_cfg_req->common.bits.opcode); ++ ++ enum_set_cna_config_space(cna_cfg_req->common.bits.opcode, cna_cfg_req); ++ ++ /* response includes forward path but not return path. */ ++ forward_path_size = calc_forward_path_size(scan_header); ++ rsp_size = sizeof(EnumPktHeader) + sizeof(EnumPldScanHeader) + ++ forward_path_size + sizeof(EnumNaCfgRsp); ++ rsp_buf = g_malloc0(rsp_size); ++ memset(rsp_buf, 0, rsp_size); ++ rsp_pkt_hdr = (EnumPktHeader *)rsp_buf; ++ memcpy(rsp_pkt_hdr, header, sizeof(EnumPktHeader)); ++ rsp_scan_header = (EnumPldScanHeader *)(rsp_buf + ENUM_PKT_HEADER_SIZE); ++ memcpy(rsp_scan_header, scan_header, sizeof(EnumPldScanHeader)); ++ rsp_scan_header->bits.r = 0; ++ rsp_pdu = (EnumNaCfgRsp *)(rsp_buf + ENUM_PKT_HEADER_SIZE + ++ ENUM_PLD_SCAN_HEADER_BASE_SIZE + forward_path_size); ++ memcpy(&rsp_pdu->common, &cna_cfg_req->common, sizeof(EnumPldScanPduCommon)); ++ rsp_pdu->common.bits.opcode = UB_ENUM_CNA_MGMT_RSV0; ++ rsp_pdu->common.bits.status = 0; ++ ++ /* set cqe val */ ++ memset(&cqe, 0, sizeof(cqe)); ++ cqe.opcode = sqe->opcode; ++ cqe.task_type = PROTOCOL_ENUM; ++ cqe.msn = sqe->msn; ++ cqe.p_len = rsp_size; ++ cqe.status = CQE_SUCCESS; ++ cqe.rq_pi = fill_rq(s, rsp_buf, rsp_size); ++ (void)fill_cq(s, &cqe); ++ g_free(rsp_buf); ++} ++ ++void handle_enum_cna_query_request(BusControllerState *s, ++ HiMsgSqe *sqe, void *buf) ++{ ++ /* req message */ ++ size_t header_sz; ++ EnumPktHeader *header = (EnumPktHeader *)buf; ++ EnumPldScanHeader *scan_header; ++ EnumCnaQueryReq *cna_query_req; ++ /* rsp message */ ++ size_t rsp_size; ++ void *rsp_buf; ++ EnumPktHeader *rsp_pkt_hdr; ++ EnumPldScanHeader *rsp_scan_header; ++ EnumCnaQueryRsp *cna_req_rsp; ++ HiMsgCqe cqe; ++ char guid[UB_DEV_GUID_STRING_LENGTH + 1] = {0}; ++ UBDevice *dev; ++ ConfigNetAddrInfo *net_addr_info; ++ uint64_t emulated_offset; ++ size_t forward_path_size; ++ ++ scan_header = (EnumPldScanHeader *)((uint8_t *)buf + ENUM_PKT_HEADER_SIZE); ++ header_sz = ENUM_PKT_HEADER_SIZE + ++ calc_enum_pld_header_size(scan_header, true); ++ cna_query_req = (EnumCnaQueryReq *)((uint8_t *)buf + header_sz); ++ if (header->ulh.cfg != UB_CLAN_LINK_CFG || ++ header->cnth.nth_nlp != NTH_NLP_WITHOUT_TPH || ++ header->upi != UB_CP_UPI || ++ cna_query_req->common.bits.cmd != ENUM_CMD_CNA_QUERY) { ++ qemu_log("invalid cna cfg reguest, please check driver inside guestos, " ++ "ulh.cfg %u cnth.nth_nlp %u upi 0x%x cmd %u\n", ++ header->ulh.cfg, header->cnth.nth_nlp, header->upi, ++ cna_query_req->common.bits.cmd); ++ return; ++ } ++ ++ ub_device_get_str_from_guid(&cna_query_req->common.guid, guid, UB_DEV_GUID_STRING_LENGTH + 1); ++ dev = ub_find_device_by_guid(&cna_query_req->common.guid); ++ if (!dev) { ++ qemu_log("failed to find dev by guid %s\n", guid); ++ return; ++ } ++ ++ trace_handle_enum_cna_query_request(guid, cna_query_req->port_idx, ++ cna_query_req->common.bits.cmd, ++ cna_query_req->common.bits.opcode); ++ ++ forward_path_size = calc_forward_path_size(scan_header); ++ rsp_size = sizeof(EnumPktHeader) + sizeof(EnumPldScanHeader) + ++ forward_path_size + sizeof(EnumCnaQueryRsp); ++ rsp_buf = g_malloc0(rsp_size); ++ memset(rsp_buf, 0, rsp_size); ++ rsp_pkt_hdr = (EnumPktHeader *)rsp_buf; ++ memcpy(rsp_pkt_hdr, header, sizeof(EnumPktHeader)); ++ rsp_scan_header = (EnumPldScanHeader *)(rsp_buf + ENUM_PKT_HEADER_SIZE); ++ memcpy(rsp_scan_header, scan_header, sizeof(EnumPldScanHeader)); ++ rsp_scan_header->bits.r = 0; ++ cna_req_rsp = (EnumCnaQueryRsp *)(rsp_buf + ENUM_PKT_HEADER_SIZE + ++ ENUM_PLD_SCAN_HEADER_BASE_SIZE + forward_path_size); ++ memcpy(&cna_req_rsp->common, &cna_query_req->common, sizeof(cna_query_req->common)); ++ emulated_offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_BASIC_NA_INFO_START, true); ++ net_addr_info = (ConfigNetAddrInfo *)(dev->config + emulated_offset); ++ cna_req_rsp->cna = net_addr_info->primary_cna; ++ ++ trace_handle_enum_cna_query_request_rsp(guid, cna_req_rsp->cna); ++ ++ cna_req_rsp->common.bits.opcode = UB_ENUM_CNA_MGMT_RSV0; ++ cna_req_rsp->common.bits.status = 0; ++ ++ /* set cqe val */ ++ memset(&cqe, 0, sizeof(cqe)); ++ cqe.opcode = sqe->opcode; ++ cqe.task_type = PROTOCOL_ENUM; ++ cqe.msn = sqe->msn; ++ cqe.p_len = rsp_size; ++ cqe.status = CQE_SUCCESS; ++ cqe.rq_pi = fill_rq(s, rsp_buf, rsp_size); ++ (void)fill_cq(s, &cqe); ++ g_free(rsp_buf); ++} +diff --git a/include/hw/ub/ub_cna_mgmt.h b/include/hw/ub/ub_cna_mgmt.h +new file mode 100644 +index 0000000000..f317216af3 +--- /dev/null ++++ b/include/hw/ub/ub_cna_mgmt.h +@@ -0,0 +1,71 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_CNA_MGMT_H ++#define UB_CNA_MGMT_H ++#include "hw/ub/hisi/ubc.h" ++#include "hw/qdev-core.h" ++#include "hw/ub/ub_common.h" ++#include "hw/ub/ub_enum.h" ++ ++typedef struct EnumCnaQueryReq { ++ /* DW0~DW5 */ ++ struct EnumPldScanPduCommon common; ++ /* DW6 */ ++ uint32_t port_idx : 16; ++ uint32_t rsv : 16; ++} EnumCnaQueryReq; ++ ++typedef struct EnumCnaQueryRsp { ++ /* DW0~DW5 */ ++ struct EnumPldScanPduCommon common; ++ /* DW6 */ ++ uint32_t cna : 24; ++ uint32_t rsvd : 8; ++} EnumCnaQueryRsp; ++ ++/* opcode for CNA config and query operation */ ++enum UbEnumCnaMgmtOpcode { ++ UB_ENUM_CNA_MGMT_RSV0 = 0, ++ UB_ENUM_CNA_MGMT_RSV1, ++ UB_ENUM_CNA_MGMT_DEVICE, ++ UB_ENUM_CNA_MGMT_PORT ++}; ++ ++typedef struct EnumCnaCfgReq { ++ /* DW0~DW5 */ ++ struct EnumPldScanPduCommon common; ++ /* DW6 */ ++ uint16_t rsvd; ++ uint16_t port_idx; ++ /* DW7 */ ++ uint32_t cna : 24; ++ uint8_t rsvd1; ++} EnumCnaCfgReq; ++#define ENUM_NA_CFG_REQ_SIZE 44 ++ ++typedef struct EnumNaCfgRsp { ++ /* DW0~DW5 */ ++ struct EnumPldScanPduCommon common; ++} EnumNaCfgRsp; ++ ++ ++void handle_enum_cna_config_request(BusControllerState *s, ++ HiMsgSqe *sqe, void *buf); ++void handle_enum_cna_query_request(BusControllerState *s, ++ HiMsgSqe *sqe, void *buf); ++#endif +\ No newline at end of file +-- +2.33.0 + diff --git a/ub-support-ubc-device.patch b/ub-support-ubc-device.patch new file mode 100644 index 0000000000000000000000000000000000000000..ff1eda8e21a8928f6a0f94b5ed38ddd9a79b994c --- /dev/null +++ b/ub-support-ubc-device.patch @@ -0,0 +1,198 @@ +From 9099defe390db6e8d43ec18b099b4c8d9317a264 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 15:10:32 +0800 +Subject: [PATCH 5/7] ub: support ubc device +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +1、add base ubc device framework realize +2、now can use following cmd to config ubc device + -device ubc,guid=xxx,portnum=x,eid=x + +Signed-off-by: caojinhuahw +--- + hw/ub/ub.c | 14 ++++++++ + hw/ub/ub_ubc.c | 72 ++++++++++++++++++++++++++++++++++++++++++ + include/hw/arm/virt.h | 4 +++ + include/hw/ub/ub_ubc.h | 15 +++++++++ + 4 files changed, 105 insertions(+) + +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 8d94601dbb..974df5d0f7 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -249,3 +249,17 @@ bool ub_device_get_guid_from_str(UbGuid *guid, char *guid_str) + guid->seq_num = seq_num & 0xFFFFFFFFFFFFFFFF; + return true; + } ++ ++/* container_of cannot be used here because 'bus' is a pointer member. */ ++BusControllerState *container_of_ubbus(UBBus *bus) ++{ ++ BusControllerState *ubc = NULL; ++ ++ QLIST_FOREACH(ubc, &ub_bus_controllers, node) { ++ if (bus == ubc->bus) { ++ return ubc; ++ } ++ } ++ ++ return NULL; ++} +\ No newline at end of file +diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c +index 3e634ab83a..e371f4f35a 100644 +--- a/hw/ub/ub_ubc.c ++++ b/hw/ub/ub_ubc.c +@@ -165,6 +165,17 @@ const VMStateDescription vmstate_ub_bus_controller = { + } + }; + ++const VMStateDescription vmstate_ub_bus_controller_dev = { ++ .name = TYPE_BUS_CONTROLLER_DEV, ++ .needed = ub_bus_controller_needed, ++ .version_id = 1, ++ .minimum_version_id = 1, ++ .fields = (VMStateField[]) { ++ /* support migration later */ ++ VMSTATE_END_OF_LIST() ++ } ++}; ++ + static void ub_bus_controller_class_init(ObjectClass *class, void *data) + { + DeviceClass *dc = DEVICE_CLASS(class); +@@ -194,8 +205,69 @@ static const TypeInfo ub_bus_controller_type_info = { + .class_init = ub_bus_controller_class_init, + }; + ++static bool ub_ubc_is_empty(UBBus *bus) ++{ ++ UBDevice *dev; ++ QLIST_FOREACH(dev, &bus->devices, node) { ++ if (dev->dev_type == UB_TYPE_IBUS_CONTROLLER) { ++ return false; ++ } ++ } ++ return true; ++} ++ ++static void ub_bus_controller_dev_realize(UBDevice *dev, Error **errp) ++{ ++ UBBus *bus = UB_BUS(qdev_get_parent_bus(DEVICE(dev))); ++ BusControllerState *ubc = container_of_ubbus(bus); ++ VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); ++ ++ vms->ub_bus = bus; ++ ++ if (!ub_ubc_is_empty(bus)) { ++ qemu_log("ubc realize repetitively\n"); ++ error_setg(errp, "ubc realize repetitively"); ++ return; ++ } ++ ++ ubc->ubc_dev = BUS_CONTROLLER_DEV(dev); ++ if (dev->guid.type != UB_GUID_TYPE_IBUS_CONTROLLER) { ++ qemu_log("%s device type set error, expect: %u, actual: %u\n", ++ dev->qdev.id, UB_GUID_TYPE_IBUS_CONTROLLER, dev->guid.type); ++ error_setg(errp, "%s device type set error, expect: %u, actual: %u\n", ++ dev->qdev.id, UB_GUID_TYPE_IBUS_CONTROLLER, dev->guid.type); ++ return; ++ } ++ ++ dev->dev_type = UB_TYPE_IBUS_CONTROLLER; ++} ++ ++static Property ub_bus_controller_dev_properties[] = { ++ DEFINE_PROP_UB_DEV_GUID("bus_instance_guid", BusControllerDev, bus_instance_guid), ++ DEFINE_PROP_END_OF_LIST(), ++}; ++ ++static void ub_bus_controller_dev_class_init(ObjectClass *class, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(class); ++ UBDeviceClass *uc = UB_DEVICE_CLASS(class); ++ ++ device_class_set_props(dc, ub_bus_controller_dev_properties); ++ uc->realize = ub_bus_controller_dev_realize; ++ dc->vmsd = &vmstate_ub_bus_controller_dev; ++} ++ ++static const TypeInfo ub_bus_controller_dev_type_info = { ++ .name = TYPE_BUS_CONTROLLER_DEV, ++ .parent = TYPE_UB_DEVICE, ++ .instance_size = sizeof(BusControllerDev), ++ .class_size = sizeof(BusControllerDevClass), ++ .class_init = ub_bus_controller_dev_class_init, ++}; ++ + static void ub_bus_controller_register_types(void) + { + type_register_static(&ub_bus_controller_type_info); ++ type_register_static(&ub_bus_controller_dev_type_info); + } + type_init(ub_bus_controller_register_types) +diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h +index fee7c27e0c..a621844eb3 100644 +--- a/include/hw/arm/virt.h ++++ b/include/hw/arm/virt.h +@@ -39,6 +39,7 @@ + #include "sysemu/kvm.h" + #include "hw/intc/arm_gicv3_common.h" + #include "qom/object.h" ++#include "hw/ub/ub_bus.h" + + #define NUM_GICV2M_SPIS 64 + #define NUM_VIRTIO_TRANSPORTS 32 +@@ -219,6 +220,9 @@ struct VirtMachineState { + bool highmem_ecam; + bool highmem_mmio; + bool highmem_redists; ++#ifdef CONFIG_UB ++ UBBus *ub_bus; ++#endif // CONFIG_UB + bool its; + bool tcg_its; + bool virt; +diff --git a/include/hw/ub/ub_ubc.h b/include/hw/ub/ub_ubc.h +index 5d9098a4c3..af0f4b1a7f 100644 +--- a/include/hw/ub/ub_ubc.h ++++ b/include/hw/ub/ub_ubc.h +@@ -23,6 +23,19 @@ + #include "hw/ub/hisi/ubc.h" + #include "hw/ub/ub_bus.h" + ++#define TYPE_BUS_CONTROLLER_DEV "ubc" ++OBJECT_DECLARE_TYPE(BusControllerDev, BusControllerDevClass, BUS_CONTROLLER_DEV) ++ ++typedef struct BusControllerDev { ++ UBDevice parent; ++ UbGuid bus_instance_guid; ++ int bus_instance_lock_fd; ++} BusControllerDev; ++ ++struct BusControllerDevClass { ++ UBDeviceClass parent_class; ++}; ++ + #define TYPE_BUS_CONTROLLER "ub-bus-controller" + OBJECT_DECLARE_TYPE(BusControllerState, BusControllerClass, BUS_CONTROLLER) + +@@ -39,6 +52,7 @@ struct BusControllerState { + MemoryRegion io_mmio; /* ub mmio hpa memory region */ + uint32_t mmio_size; + bool mig_enabled; ++ BusControllerDev *ubc_dev; + UBBus *bus; + QLIST_ENTRY(BusControllerState) node; + }; +@@ -48,4 +62,5 @@ struct BusControllerClass { + }; + + void ub_save_ubc_list(BusControllerState *s); ++BusControllerState *container_of_ubbus(UBBus *bus); + #endif +-- +2.33.0 + diff --git a/ub-support-ubc-msg-process.patch b/ub-support-ubc-msg-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..3d7ff5030988a4722a4905bfe4dea3f9a1dd5520 --- /dev/null +++ b/ub-support-ubc-msg-process.patch @@ -0,0 +1,994 @@ +From aca23c74e2bb55071c1b8ec6b15a8fdd796f8c59 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Tue, 11 Nov 2025 19:28:53 +0800 +Subject: [PATCH 1/7] ub: support ubc msg process + +add based ubc msg process framework + +Signed-off-by: caojinhuahw +--- + hw/ub/hisi/meson.build | 1 + + hw/ub/hisi/trace-events | 7 + + hw/ub/hisi/trace.h | 1 + + hw/ub/hisi/ubc_msgq.c | 361 ++++++++++++++++++++++++++++++++++++++ + hw/ub/meson.build | 3 + + hw/ub/ub_common.c | 89 ++++++++++ + hw/ub/ub_config.c | 4 + + hw/ub/ub_enum.c | 31 ++++ + hw/ub/ub_sec.c | 24 +++ + hw/ub/ub_ubc.c | 21 +++ + include/hw/ub/hisi/ubc.h | 5 + + include/hw/ub/ub_common.h | 3 + + include/hw/ub/ub_enum.h | 217 +++++++++++++++++++++++ + include/hw/ub/ub_pool.h | 29 +++ + include/hw/ub/ub_sec.h | 26 +++ + include/hw/ub/ub_ubc.h | 1 + + 16 files changed, 823 insertions(+) + create mode 100644 hw/ub/hisi/trace.h + create mode 100644 hw/ub/hisi/ubc_msgq.c + create mode 100644 hw/ub/ub_common.c + create mode 100644 hw/ub/ub_enum.c + create mode 100644 hw/ub/ub_sec.c + create mode 100644 include/hw/ub/ub_enum.h + create mode 100644 include/hw/ub/ub_pool.h + create mode 100644 include/hw/ub/ub_sec.h + +diff --git a/hw/ub/hisi/meson.build b/hw/ub/hisi/meson.build +index df07aae9e1..36d9db5a7e 100644 +--- a/hw/ub/hisi/meson.build ++++ b/hw/ub/hisi/meson.build +@@ -1,5 +1,6 @@ + ub_ss = ss.source_set() + ub_ss.add(files( ++ 'ubc_msgq.c', + 'ub_fm.c', + )) + system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss) +\ No newline at end of file +diff --git a/hw/ub/hisi/trace-events b/hw/ub/hisi/trace-events +index e69de29bb2..afe421dd93 100644 +--- a/hw/ub/hisi/trace-events ++++ b/hw/ub/hisi/trace-events +@@ -0,0 +1,7 @@ ++# See docs/devel/tracing.rst for syntax documentation. ++ ++# ubc_msgq.c ++handle_eu_table_cfg_cmd(uint32_t msg_code, uint32_t entry_num, uint32_t tbl_cfg_mode, uint32_t tbl_cfg_status, uint32_t entry_start_id, uint32_t eid, uint32_t upi) "eu_msg_code(%u), cfg_entry_num(%u), tbl_cfg_mode(%u), tbl_cfg_status(%u), entry_start_id(%u), eid(%u), upi(%u)" ++msgq_sq_init(uint64_t gpa, uint64_t hva, uint32_t depth) "sq_base_addr_gpa 0x%lx sq_base_addr_hva 0x%lx depth %u" ++msgq_cq_init(uint64_t gpa, uint64_t hva, uint32_t depth) "cq_base_addr_gpa 0x%lx cq_base_addr_hva 0x%lx depth %u" ++msgq_rq_init(uint64_t gpa, uint64_t hva, uint32_t depth) "rq_base_addr_gpa 0x%lx rq_base_addr_hva 0x%lx depth %u" +diff --git a/hw/ub/hisi/trace.h b/hw/ub/hisi/trace.h +new file mode 100644 +index 0000000000..08e5ad89bb +--- /dev/null ++++ b/hw/ub/hisi/trace.h +@@ -0,0 +1 @@ ++#include "trace/trace-hw_ub_hisi.h" +\ No newline at end of file +diff --git a/hw/ub/hisi/ubc_msgq.c b/hw/ub/hisi/ubc_msgq.c +new file mode 100644 +index 0000000000..8258441ced +--- /dev/null ++++ b/hw/ub/hisi/ubc_msgq.c +@@ -0,0 +1,361 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "qemu/module.h" ++#include "qemu/log.h" ++#include "hw/qdev-properties.h" ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_bus.h" ++#include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_config.h" ++#include "hw/ub/ub_pool.h" ++#include "hw/ub/ub_sec.h" ++#include "hw/ub/ub_enum.h" ++#include "hw/ub/hisi/ubc.h" ++#include "trace.h" ++#include "sysemu/dma.h" ++ ++static void (*msgq_pool_handlers[])(BusControllerState *s, HiMsgSqe *sqe, ++ MsgPktHeader *header) = { ++ [UB_DEV_REG] = NULL, /* only send from CFM */ ++ [UB_DEV_RLS] = NULL, /* only send from CFM */ ++ [UB_BI_CREATE] = NULL, ++ [UB_BI_DESTROY] = NULL, ++ [UB_CFG_CPL_NOTIFY] = NULL, /* only send from CFM */ ++}; ++ ++static void handle_msg_pool(void *opaque, HiMsgSqe *sqe, void *payload) ++{ ++ BusControllerState *s = opaque; ++ MsgPktHeader *header = (MsgPktHeader *)payload; ++ MsgExtendedHeader *msgetah = &header->msgetah; ++ ++ if (msgetah->msg_code != UB_MSG_CODE_POOL || ++ msgetah->sub_msg_code >= ARRAY_SIZE(msgq_pool_handlers)) { ++ qemu_log("invalid msg code %u or sub msg code %u, array size %lu\n", ++ msgetah->msg_code, msgetah->sub_msg_code, ARRAY_SIZE(msgq_pool_handlers)); ++ return; ++ } ++ ++ if (msgq_pool_handlers[msgetah->sub_msg_code]) { ++ msgq_pool_handlers[msgetah->sub_msg_code](s, sqe, header); ++ } else { ++ qemu_log("dont support sub msg code %d.\n", msgetah->sub_msg_code); ++ } ++} ++ ++static void (*msgq_handlers[])(void *opaque, HiMsgSqe *sqe, void *payload) = { ++ [UB_MSG_CODE_RAS] = NULL, ++ [UB_MSG_CODE_LINK] = NULL, ++ [UB_MSG_CODE_CFG] = handle_msg_cfg, ++ [UB_MSG_CODE_VDM] = NULL, ++ [UB_MSG_CODE_EXCH] = NULL, ++ [UB_MSG_CODE_SEC] = handle_msg_sec, ++ [UB_MSG_CODE_POOL] = handle_msg_pool, ++}; ++ ++static void handle_task_type_msg(BusControllerState *s, HiMsgSqe *sqe) ++{ ++ MsgPktHeader *payload = NULL; ++ uint8_t msg_code = sqe->msg_code; ++ uint32_t p_addr = sqe->p_addr; ++ uint32_t plen; ++ ++ if (msg_code >= (ARRAY_SIZE(msgq_handlers))) { ++ qemu_log("invalid msg code %u, array size %lu\n", ++ msg_code, ARRAY_SIZE(msgq_handlers)); ++ return; ++ } ++ ++ if (p_addr + HI_MSG_SQE_PLD_SIZE > s->msgq.sq_sz) { ++ qemu_log("invalid p_addr %u, total size %ld\n", ++ p_addr, s->msgq.sq_sz); ++ return; ++ } ++ ++ payload = g_malloc0(sizeof(MsgPktHeader)); ++ if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + p_addr, ++ payload, sizeof(MsgPktHeader), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ g_free(payload); ++ return; ++ } ++ plen = payload->msgetah.plen; ++ g_free(payload); ++ payload = g_malloc0(sizeof(MsgPktHeader) + plen); ++ if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + p_addr, ++ payload, sizeof(MsgPktHeader) + plen, MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ g_free(payload); ++ return; ++ } ++ ++ if (msgq_handlers[msg_code]) { ++ msgq_handlers[msg_code](s, sqe, payload); ++ } else { ++ qemu_log("current cannot support process msg code: %u.\n", msg_code); ++ } ++ g_free(payload); ++} ++ ++static void handle_task_type_enum(BusControllerState *s, HiMsgSqe *sqe) ++{ ++ EnumPktHeader *payload = NULL; ++ EnumPldScanHeader *scan_header = NULL; ++ uint32_t p_addr = sqe->p_addr; ++ uint32_t header_size; ++ ++ if (p_addr + HI_MSG_SQE_PLD_SIZE > s->msgq.sq_sz) { ++ qemu_log("invalid p_addr %u, total size %ld\n", ++ p_addr, s->msgq.sq_sz); ++ return; ++ } ++ ++ scan_header = g_malloc0(sizeof(EnumPldScanHeader)); ++ if (dma_memory_read(&address_space_memory, ++ s->msgq.sq_base_addr_gpa + p_addr + ENUM_PKT_HEADER_SIZE, ++ scan_header, sizeof(EnumPldScanHeader), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ g_free(scan_header); ++ return; ++ } ++ header_size = ENUM_PKT_HEADER_SIZE + calc_enum_pld_header_size(scan_header, true) + ENUM_TOPO_QUERY_REQ_SIZE; ++ g_free(scan_header); ++ payload = g_malloc0(header_size); ++ if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + p_addr, ++ payload, header_size, MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ g_free(payload); ++ return; ++ } ++ ++ handle_msg_enum(s, sqe, payload); ++ g_free(payload); ++} ++ ++static void handle_eu_table_cfg_cmd(BusControllerState *s, HiMsgSqe *sqe, void *payload) ++{ ++ HiEuCfgReq *req = (HiEuCfgReq *)payload; ++ HiEuCfgRsp rsp; ++ HiMsgCqe cqe; ++ ++ /* qemu do nothing for hisi_private msg, just mask the msg return success */ ++ trace_handle_eu_table_cfg_cmd(req->eu_msg_code, req->cfg_entry_num, ++ req->tbl_cfg_mode, req->tbl_cfg_status, ++ req->entry_start_id, req->eid, req->upi); ++ ++ memset(&rsp, 0, sizeof(rsp)); ++ rsp.tbl_cfg_status = EU_CFG_SUCCESS; ++ ++ memset(&cqe, 0, sizeof(cqe)); ++ cqe.opcode = EU_TABLE_CFG_CMD; ++ cqe.task_type = HISI_PRIVATE; ++ cqe.msn = sqe->msn; ++ cqe.p_len = sizeof(rsp); ++ cqe.status = CQE_SUCCESS; ++ cqe.rq_pi = fill_rq(s, &rsp, sizeof(rsp)); ++ (void)fill_cq(s, &cqe); ++} ++ ++static void (*hisi_private_handlers[])(BusControllerState *s, HiMsgSqe *sqe, void *payload) = { ++ [CC_CTX_CFG_CMD] = NULL, ++ [QUERY_UB_MEM_ROUTE_CMD] = NULL, ++ [EU_TABLE_CFG_CMD] = handle_eu_table_cfg_cmd, ++ [CC_CTX_QUERY_CMD] = NULL, ++}; ++ ++static void handle_task_type_hisi_private(BusControllerState *s, HiMsgSqe *sqe) ++{ ++ HiEuCfgReq *payload = NULL; ++ uint8_t opcode = sqe->opcode; ++ uint32_t p_addr = sqe->p_addr; ++ ++ if (opcode >= ARRAY_SIZE(hisi_private_handlers)) { ++ qemu_log("invalid msg code %u, array size %lu\n", ++ opcode, ARRAY_SIZE(hisi_private_handlers)); ++ return; ++ } ++ ++ if (p_addr + HI_MSG_SQE_PLD_SIZE > s->msgq.sq_sz) { ++ qemu_log("invalid p_addr %u, total size %ld\n", ++ p_addr, s->msgq.sq_sz); ++ return; ++ } ++ ++ payload = g_malloc0(sizeof(HiEuCfgReq)); ++ if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + p_addr, ++ payload, sizeof(HiEuCfgReq), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ g_free(payload); ++ return; ++ } ++ ++ if (hisi_private_handlers[opcode]) { ++ hisi_private_handlers[opcode](s, sqe, payload); ++ } else { ++ qemu_log("current cannot support process hisi private opcode: %u.\n", opcode); ++ } ++ g_free(payload); ++} ++ ++void msgq_process_task(void *opaque, uint64_t val) ++{ ++ BusControllerState *s = opaque; ++ HiMsgSqe *sqe = NULL; ++ uint16_t i; ++ uint16_t cnt; ++ uint32_t ci = ub_get_long(s->msgq_reg + SQ_CI); ++ uint32_t pi = ub_get_long(s->msgq_reg + SQ_PI); ++ uint32_t depth = ub_get_long(s->msgq_reg + SQ_DEPTH); ++ ++ if (!s->msgq.sq_base_addr_gpa) { ++ /* not ready */ ++ return; ++ } ++ ++ if (depth > HI_MSGQ_MAX_DEPTH || depth < HI_MSGQ_MIN_DEPTH || ci >= depth || pi >= depth) { ++ qemu_log("Invalid arguments: ci=%u pi=%u depth=%u\n", ci, pi, depth); ++ return; ++ } ++ ++ sqe = g_malloc0(sizeof(HiMsgSqe)); ++ cnt = (pi + depth - ci) % depth; ++ for (i = 0; i < cnt; i++) { ++ if (dma_memory_read(&address_space_memory, s->msgq.sq_base_addr_gpa + ci, ++ sqe, sizeof(HiMsgSqe), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("Fail to read sq_base_addr_gpa entry\n"); ++ g_free(sqe); ++ return; ++ } ++ if (sqe->msg_code >= (ARRAY_SIZE(msgq_handlers))) { ++ qemu_log("invalid msg code %u, array size %lu\n", ++ sqe->msg_code, ARRAY_SIZE(msgq_handlers)); ++ g_free(sqe); ++ return; ++ } ++ ++ switch (sqe->task_type) { ++ case PROTOCOL_MSG: ++ handle_task_type_msg(s, sqe); ++ break; ++ case PROTOCOL_ENUM: ++ handle_task_type_enum(s, sqe); ++ break; ++ case HISI_PRIVATE: ++ handle_task_type_hisi_private(s, sqe); ++ break; ++ default: ++ qemu_log("current can not process task type: %u\n", sqe->task_type); ++ break; ++ } ++ ci = (ci + 1) % depth; ++ } ++ ub_set_long(s->msgq_reg + SQ_CI, ci); ++ g_free(sqe); ++} ++ ++void msgq_sq_init(void *opaque) ++{ ++ BusControllerState *s = opaque; ++ uint32_t addr_l = ub_get_long(s->msgq_reg + SQ_ADDR_L); ++ uint32_t addr_h = ub_get_long(s->msgq_reg + SQ_ADDR_H); ++ uint32_t depth = ub_get_long(s->msgq_reg + SQ_DEPTH); ++ uint64_t size = (uint64_t)depth * (HI_MSG_SQE_SIZE + HI_MSG_SQE_PLD_SIZE); ++ ++ s->msgq.sq_base_addr_gpa = addr_l | ((uint64_t)addr_h << 32); ++ s->msgq.sq_base_addr_hva = (uint64_t)cpu_physical_memory_map(s->msgq.sq_base_addr_gpa, &size, true); ++ if (size != depth * (HI_MSG_SQE_SIZE + HI_MSG_SQE_PLD_SIZE)) { ++ qemu_log("sq size %lu != %lu, depth=%u\n", size, ++ depth * (HI_MSG_SQE_SIZE + HI_MSG_SQE_PLD_SIZE), depth); ++ return; ++ } ++ s->msgq.sq_sz = size; ++ trace_msgq_sq_init(s->msgq.sq_base_addr_gpa, s->msgq.sq_base_addr_hva, depth); ++} ++ ++void msgq_cq_init(void *opaque) ++{ ++ BusControllerState *s = opaque; ++ uint32_t addr_l = ub_get_long(s->msgq_reg + CQ_ADDR_L); ++ uint32_t addr_h = ub_get_long(s->msgq_reg + CQ_ADDR_H); ++ uint32_t depth = ub_get_long(s->msgq_reg + CQ_DEPTH); ++ uint64_t size = (uint64_t)depth * HI_MSG_CQE_SIZE; ++ ++ s->msgq.cq_base_addr_gpa = addr_l | ((uint64_t)addr_h << 32); ++ s->msgq.cq_base_addr_hva = (uint64_t)cpu_physical_memory_map(s->msgq.cq_base_addr_gpa, &size, true); ++ if (size != depth * HI_MSG_CQE_SIZE) { ++ qemu_log("cq size %lu != %lu, depth=%u\n", size, ++ depth * HI_MSG_CQE_SIZE, depth); ++ return; ++ } ++ s->msgq.cq_sz = size; ++ trace_msgq_cq_init(s->msgq.cq_base_addr_gpa, s->msgq.cq_base_addr_hva, depth); ++} ++ ++void msgq_rq_init(void *opaque) ++{ ++ BusControllerState *s = opaque; ++ uint32_t addr_l = ub_get_long(s->msgq_reg + RQ_ADDR_L); ++ uint32_t addr_h = ub_get_long(s->msgq_reg + RQ_ADDR_H); ++ uint32_t depth = ub_get_long(s->msgq_reg + RQ_DEPTH); ++ uint64_t size = (uint64_t)depth * HI_MSG_RQE_SIZE; ++ ++ s->msgq.rq_base_addr_gpa = addr_l | ((uint64_t)addr_h << 32); ++ s->msgq.rq_base_addr_hva = (uint64_t)cpu_physical_memory_map(s->msgq.rq_base_addr_gpa, &size, true); ++ if (size != depth * HI_MSG_RQE_SIZE) { ++ qemu_log("rq size %lu != %u, depth=%u\n", size, ++ depth * HI_MSG_RQE_SIZE, depth); ++ return; ++ } ++ s->msgq.rq_sz = size; ++ trace_msgq_rq_init(s->msgq.rq_base_addr_gpa, s->msgq.rq_base_addr_hva, depth); ++} ++ ++void msgq_handle_rst(void *opaque) ++{ ++ BusControllerState *s = opaque; ++ uint32_t old = ub_get_long(s->msgq_reg + SQ_CI); ++ ++ qemu_log("BusControllerState receive reset event, " ++ "clear SQ_CI(%u -> 0).\n", old); ++ ub_set_long(s->msgq_reg + SQ_CI, 0); ++ ub_set_long(s->msgq_reg + SQ_ADDR_L, 0); ++ ub_set_long(s->msgq_reg + SQ_ADDR_H, 0); ++ ub_set_long(s->msgq_reg + SQ_DEPTH, 0); ++ ub_set_long(s->msgq_reg + CQ_PI, 0); ++ ub_set_long(s->msgq_reg + CQ_ADDR_L, 0); ++ ub_set_long(s->msgq_reg + CQ_ADDR_H, 0); ++ ub_set_long(s->msgq_reg + CQ_DEPTH, 0); ++ ub_set_long(s->msgq_reg + RQ_PI, 0); ++ ub_set_long(s->msgq_reg + RQ_ADDR_L, 0); ++ ub_set_long(s->msgq_reg + RQ_ADDR_H, 0); ++ ub_set_long(s->msgq_reg + RQ_DEPTH, 0); ++ ++ if (s->msgq.rq_sz && s->msgq.rq_base_addr_hva) { ++ cpu_physical_memory_unmap((void *)s->msgq.rq_base_addr_hva, ++ s->msgq.rq_sz, true, s->msgq.rq_sz); ++ } ++ if (s->msgq.sq_sz && s->msgq.sq_base_addr_hva) { ++ cpu_physical_memory_unmap((void *)s->msgq.sq_base_addr_hva, ++ s->msgq.sq_sz, true, s->msgq.sq_sz); ++ } ++ if (s->msgq.cq_sz && s->msgq.cq_base_addr_hva) { ++ cpu_physical_memory_unmap((void *)s->msgq.cq_base_addr_hva, ++ s->msgq.cq_sz, true, s->msgq.cq_sz); ++ } ++ memset(&s->msgq, 0, sizeof(s->msgq)); ++} +\ No newline at end of file +diff --git a/hw/ub/meson.build b/hw/ub/meson.build +index ffa135dacf..4344a45741 100644 +--- a/hw/ub/meson.build ++++ b/hw/ub/meson.build +@@ -4,6 +4,9 @@ ub_ss.add(files( + 'ub_ubc.c', + 'ub_config.c', + 'ub_acpi.c', ++ 'ub_enum.c', ++ 'ub_common.c', ++ 'ub_sec.c', + )) + system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss) + subdir('hisi') +diff --git a/hw/ub/ub_common.c b/hw/ub/ub_common.c +new file mode 100644 +index 0000000000..3f8dff2a45 +--- /dev/null ++++ b/hw/ub/ub_common.c +@@ -0,0 +1,89 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "hw/arm/virt.h" ++#include "qemu/log.h" ++#include "qapi/error.h" ++#include "hw/ub/ub_common.h" ++#include "sysemu/dma.h" ++ ++/* tmp for vfio-ub run with stub, remove later */ ++ ++uint32_t fill_rq(BusControllerState *s, void *rsp, uint32_t rsp_size) ++{ ++ uint32_t ci = ub_get_long(s->msgq_reg + RQ_CI); ++ uint32_t pi = ub_get_long(s->msgq_reg + RQ_PI); ++ uint32_t pi_new; ++ uint32_t depth = ub_get_long(s->msgq_reg + RQ_DEPTH); ++ uint32_t remain; ++ hwaddr dst_rqe; ++ ++ if (!s->msgq.rq_base_addr_gpa) { ++ qemu_log("rq_base_addr_gpa is NULL\n"); ++ return UINT32_MAX; ++ } ++ ++ if (depth > HI_MSGQ_MAX_DEPTH || depth < HI_MSGQ_MIN_DEPTH || ci >= depth || pi >= depth) { ++ qemu_log("Invalid RQ indices: ci=%u pi=%u depth=%u\n", ci, pi, depth); ++ return UINT32_MAX; ++ } ++ ++ remain = depth - (pi + depth - ci) % depth; ++ if (remain < 1) { ++ qemu_log("RQ is full! depth=%u ci=%u pi=%u\n", depth, ci, pi); ++ return UINT32_MAX; ++ } ++ ++ dst_rqe = (uint64_t)((uint8_t *)s->msgq.rq_base_addr_gpa + pi * HI_MSG_RQE_SIZE); ++ dma_memory_write(&address_space_memory, dst_rqe, rsp, rsp_size, ++ MEMTXATTRS_UNSPECIFIED); ++ pi_new = (pi + DIV_ROUND_UP((rsp_size), HI_MSG_RQE_SIZE)) % depth; ++ ub_set_long(s->msgq_reg + RQ_PI, pi_new); ++ return pi; ++} ++ ++uint32_t fill_cq(BusControllerState *s, HiMsgCqe *cqe) ++{ ++ uint32_t ci = ub_get_long(s->msgq_reg + CQ_CI); ++ uint32_t pi = ub_get_long(s->msgq_reg + CQ_PI); ++ uint32_t depth = ub_get_long(s->msgq_reg + CQ_DEPTH); ++ uint32_t remain; ++ hwaddr dst_cqe; ++ ++ if (depth > HI_MSGQ_MAX_DEPTH || depth < HI_MSGQ_MIN_DEPTH || ci >= depth || pi >= depth) { ++ qemu_log("Invalid CQ indices: ci=%u pi=%u depth=%u\n", ci, pi, depth); ++ return UINT32_MAX; ++ } ++ ++ if (!s->msgq.cq_base_addr_gpa) { ++ qemu_log("sq_base_addr_gpa is NULL\n"); ++ return UINT32_MAX; ++ } ++ ++ remain = depth - (pi + depth - ci) % depth; ++ if (remain <= 1) { ++ qemu_log("CQ is full! depth=%u ci=%u pi=%u\n", depth, ci, pi); ++ return UINT32_MAX; ++ } ++ ++ dst_cqe = (uint64_t)((HiMsgCqe *)s->msgq.cq_base_addr_gpa + pi); ++ dma_memory_write(&address_space_memory, dst_cqe, cqe, ++ sizeof(HiMsgCqe), MEMTXATTRS_UNSPECIFIED); ++ ub_set_long(s->msgq_reg + CQ_PI, ++pi % depth); ++ ++ return pi; ++} +\ No newline at end of file +diff --git a/hw/ub/ub_config.c b/hw/ub/ub_config.c +index 32ae6b91e4..25307cba19 100644 +--- a/hw/ub/ub_config.c ++++ b/hw/ub/ub_config.c +@@ -130,4 +130,8 @@ uint64_t ub_cfg_offset_to_emulated_offset(uint64_t offset, bool check_success) + } + + return emulate_offset; ++} ++ ++void handle_msg_cfg(void *opaque, HiMsgSqe *sqe, void *payload) ++{ + } +\ No newline at end of file +diff --git a/hw/ub/ub_enum.c b/hw/ub/ub_enum.c +new file mode 100644 +index 0000000000..0419e2f295 +--- /dev/null ++++ b/hw/ub/ub_enum.c +@@ -0,0 +1,31 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "hw/qdev-properties.h" ++#include "hw/ub/hisi/ubc.h" ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_config.h" ++#include "hw/ub/ub_bus.h" ++#include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_enum.h" ++#include "qemu/log.h" ++#include "qapi/error.h" ++#include "trace.h" ++ ++void handle_msg_enum(void *opaque, HiMsgSqe *sqe, void *payload) ++{ ++} +\ No newline at end of file +diff --git a/hw/ub/ub_sec.c b/hw/ub/ub_sec.c +new file mode 100644 +index 0000000000..4939e76925 +--- /dev/null ++++ b/hw/ub/ub_sec.c +@@ -0,0 +1,24 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "hw/ub/ub_sec.h" ++#include "qemu/log.h" ++ ++void handle_msg_sec(void *opaque, HiMsgSqe *sqe, void *payload) ++{ ++} +\ No newline at end of file +diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c +index d28ba2024f..6d2441f380 100644 +--- a/hw/ub/ub_ubc.c ++++ b/hw/ub/ub_ubc.c +@@ -75,6 +75,27 @@ static void ub_msgq_reg_write(void *opaque, hwaddr addr, uint64_t val, unsigned + qemu_log("invalid argument len 0x%x val 0x%lx\n", len, val); + return; + } ++ ++ /* only support 1 queue */ ++ switch (addr) { ++ case SQ_PI: ++ msgq_process_task(s, val); ++ break; ++ case SQ_ADDR_H: ++ msgq_sq_init(s); ++ break; ++ case CQ_ADDR_H: ++ msgq_cq_init(s); ++ break; ++ case RQ_ADDR_H: ++ msgq_rq_init(s); ++ break; ++ case MSGQ_RST: ++ msgq_handle_rst(s); ++ break; ++ default: ++ break; ++ } + } + + static const MemoryRegionOps ub_msgq_reg_ops = { +diff --git a/include/hw/ub/hisi/ubc.h b/include/hw/ub/hisi/ubc.h +index f9201741a9..dc923f3a13 100644 +--- a/include/hw/ub/hisi/ubc.h ++++ b/include/hw/ub/hisi/ubc.h +@@ -399,4 +399,9 @@ typedef struct HiEuCfgPld { + }; + } HiEuCfgPld; + ++void msgq_sq_init(void *opaque); ++void msgq_cq_init(void *opaque); ++void msgq_rq_init(void *opaque); ++void msgq_process_task(void *opaque, uint64_t val); ++void msgq_handle_rst(void *opaque); + #endif +diff --git a/include/hw/ub/ub_common.h b/include/hw/ub/ub_common.h +index b8a0287e56..840052931e 100644 +--- a/include/hw/ub/ub_common.h ++++ b/include/hw/ub/ub_common.h +@@ -413,4 +413,7 @@ typedef struct MsgPktHeader { /* TODO, check byte order */ + } MsgPktHeader; + #define MSG_PKT_HEADER_SIZE 32 + ++uint32_t fill_rq(BusControllerState *s, void *rsp, uint32_t rsp_size); ++uint32_t fill_cq(BusControllerState *s, HiMsgCqe *cqe); ++ + #endif +diff --git a/include/hw/ub/ub_enum.h b/include/hw/ub/ub_enum.h +new file mode 100644 +index 0000000000..8697a8de4b +--- /dev/null ++++ b/include/hw/ub/ub_enum.h +@@ -0,0 +1,217 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_ENUM_H ++#define UB_ENUM_H ++ ++#include "hw/ub/hisi/ubc.h" ++#include "hw/ub/ub.h" ++#include "hw/ub/ub_common.h" ++#include "qemu/log.h" ++ ++enum UbEnumSubMsgCode { ++ UB_ENUM_QUERY_REQ = 0x0, ++ UB_ENUM_QUERY_RSP = 0x1, ++ UB_ENUM_CNA_CONFIG_REQ = 0x2, ++ UB_ENUM_CNA_CONFIG_RSP = 0x3 ++}; ++ ++enum EnumCMD { ++ ENUM_CMD_TOPO_QUERY = 0, ++ ENUM_CMD_CNA_CFG, ++ ENUM_CMD_CNA_QUERY ++}; ++ ++enum UbEnumTopoQueryOpcode { ++ UB_ENUM_TOPO_QUERY_RSP = 0x0, ++ UB_ENUM_TOPO_QUERY_REQ = 0x1 ++}; ++ ++ ++/* ++ * enum pkt : EnumPktHeader + EnumPldScanHeader + reqX ++ * reqX : EnumTopoQueryReq or EnumCnaCfgReq or EnumNaQueryReq ++ */ ++typedef struct EnumPktHeader { ++ /* DW0 */ ++ struct UbLinkHeader ulh; ++ /* DW1-DW2 */ ++ struct ClanNetworkHeader cnth; ++ /* DW3 */ ++ uint16_t rsv; ++#define UB_CP_UPI 0x7FFF /* id = ~0, permission = 0 */ ++ uint16_t upi; ++ ++ /* DW4~ */ ++ char payload[0]; ++} EnumPktHeader; ++#define ENUM_PKT_HEADER_SIZE 16 ++ ++typedef struct EnumPldScanHeader { ++ /* DW0 */ ++ union { ++ struct { ++ uint32_t step : 8; ++ uint32_t hops : 8; ++ uint32_t hop_type : 4; ++ uint32_t r : 1; ++ uint32_t rsv : 11; ++ } bits; ++ uint32_t dw0; ++ }; ++ /* DW1~ */ ++ uint8_t path[]; /* include forward & return, 4byte align */ ++} EnumPldScanHeader; ++#define ENUM_PLD_SCAN_HEADER_BASE_SIZE 4 /* exclusive path */ ++ ++typedef struct EnumPldScanPduCommon { ++ /* DW0 */ ++ union { ++ struct { ++ union { ++ uint8_t status; ++ uint8_t slice_id; ++ }; ++ uint8_t opcode; ++ uint8_t cmd; ++#define UB_ENUM_MNG_VERSION 0x1 ++ uint8_t version; ++ } bits; ++ uint32_t dw0; ++ }; ++ /* DW1 */ ++ uint32_t msn : 16; ++ uint32_t pdu_len : 8; ++ uint32_t msgq_id : 8; ++ /* DW2~DW5 */ ++ UbGuid guid; ++} EnumPldScanPduCommon; ++#define ENUM_PLD_SCAN_PDU_COMMON_SIZE 24 ++ ++typedef struct EnumTopoQueryReq { ++ /* DW0~DW5 */ ++ struct EnumPldScanPduCommon common; ++} EnumTopoQueryReq; ++#define ENUM_TOPO_QUERY_REQ_SIZE 24 ++ ++/* enum query respons message */ ++typedef struct EnumTlvPortInfo { ++ /* DW0 */ ++ union { ++ struct { ++ uint32_t rsvd : 8; ++ uint32_t s : 1; ++ uint32_t b : 1; ++ uint32_t w : 1; ++ uint32_t t : 1; ++ uint32_t rsvd1 : 4; ++ uint32_t len : 8; ++ uint32_t type : 8; ++ } bits0; ++ uint32_t dw0; ++ }; ++ /* DW1 */ ++ uint16_t remote_port_idx; ++ uint16_t local_port_idx; ++ ++ /* DW2 */ ++ uint16_t cur_rate; ++ uint16_t max_rate; ++ ++ /* DW3~DW6 */ ++ UbGuid remote_guid; ++} EnumTlvPortInfo; ++#define ENUM_TOPO_QUERY_RSP_PORT_SIZE 28 ++#define UB_PORT_STATUS_UP 1 ++#define UB_PORT_STATUS_DOWN 0 ++ ++typedef struct EnumTlvPortNum { ++ uint32_t total_num_ports : 16; ++ uint32_t len : 8; ++ uint32_t type : 8; ++ uint32_t rsvd; ++} EnumTlvPortNum; ++#define ENUM_TLV_PORT_NUM_SZ 8 ++ ++typedef struct EnumTlvSliceInfo { ++ uint32_t rsvd : 8; ++ uint32_t total_slice : 8; ++ uint32_t len : 8; ++ uint32_t type : 8; ++} EnumTlvSliceInfo; ++#define ENUM_TLV_SLICE_INFO_SZ 4 ++ ++typedef struct EnumTlvCapInfo { ++ uint32_t da : 1; ++ uint32_t rsvd0 : 7; ++ uint32_t mtu : 3; ++ uint32_t rsvd1 : 1; ++ uint32_t sup_mtu : 3; ++ uint32_t rsvd2 : 1; ++ uint32_t len : 8; ++ uint32_t type : 8; ++ uint32_t class_code : 16; ++ uint32_t rsvd3 : 16; ++} EnumTlvCapInfo; ++#define ENUM_TLV_CAP_INFO_SZ 8 ++ ++typedef struct EnumTopoQueryRspPdu { ++ /* DW0~DW5 */ ++ struct EnumPldScanPduCommon common; ++ struct EnumTlvPortInfo port_info[0]; // TODO: size depends on num of port ++} EnumTopoQueryRspPdu; ++#define ENUM_TOPO_QUERY_RSP_BASE_SIZE 40 /* exclusive port_info */ ++#define ENUM_TOPO_QUERY_RSP_SIZE sizeof(EnumTopoQueryRsp) ++#define ENUM_TOPO_QUERY_MAX_RSP_SIZE HI_MSG_RQE_SIZE ++#define ENUM_TOPO_QUERY_RSP_PDU_MAX_LEN 256 ++ ++typedef struct EnumTopoQueryRsp { ++ EnumPktHeader pkt_hdr; ++ EnumPldScanHeader scan_hdr; ++ EnumTopoQueryRspPdu scan_pdu; ++} EnumTopoQueryRsp; ++ ++void handle_msg_enum(void *opaque, HiMsgSqe *sqe, void *payload); ++static inline size_t calc_forward_path_size(struct EnumPldScanHeader *header) ++{ ++#define FOUR_BITS_PER_DWORD 8 ++#define ALIGN(a, b) (((a) + ((b) - 1)) & ~((b) - 1)) ++ uint8_t hop_bits[] = { 0x00000004, 0x00000008, 0x00000010 }; ++ ++ if (header->bits.hop_type >= ARRAY_SIZE(hop_bits)) { ++ qemu_log("enum hop_type error, hop_type is %u, hop_bits size is %zu\n", ++ header->bits.hop_type, ARRAY_SIZE(hop_bits)); ++ return 0; ++ } ++ ++ /* Path size is hops * hop_bits[], then align it to 4byte */ ++ return ALIGN(hop_bits[header->bits.hop_type] * header->bits.hops / ++ hop_bits[0], FOUR_BITS_PER_DWORD) / 0x00000002; ++} ++ ++static inline size_t calc_enum_pld_header_size(EnumPldScanHeader *header, bool req) ++{ ++ size_t bytes = calc_forward_path_size(header); ++ ++ if (req && header->bits.r) ++ bytes <<= 1; ++ ++ bytes += ENUM_PLD_SCAN_HEADER_BASE_SIZE; ++ ++ return bytes; ++} ++#endif +\ No newline at end of file +diff --git a/include/hw/ub/ub_pool.h b/include/hw/ub/ub_pool.h +new file mode 100644 +index 0000000000..85176794f9 +--- /dev/null ++++ b/include/hw/ub/ub_pool.h +@@ -0,0 +1,29 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_POOL_H ++#define UB_POOL_H ++ ++enum ub_pool_sub_msg_code { ++ UB_DEV_REG, ++ UB_DEV_RLS, ++ UB_BI_CREATE, ++ UB_BI_DESTROY, ++ UB_CFG_CPL_NOTIFY, ++}; ++ ++#endif +\ No newline at end of file +diff --git a/include/hw/ub/ub_sec.h b/include/hw/ub/ub_sec.h +new file mode 100644 +index 0000000000..000cf83812 +--- /dev/null ++++ b/include/hw/ub/ub_sec.h +@@ -0,0 +1,26 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef UB_SEC_H ++#define UB_SEC_H ++#include "hw/ub/hisi/ubc.h" ++#include "hw/qdev-core.h" ++#include "hw/ub/ub_common.h" ++ ++void handle_msg_sec(void *opaque, HiMsgSqe *sqe, void *payload); ++ ++#endif +\ No newline at end of file +diff --git a/include/hw/ub/ub_ubc.h b/include/hw/ub/ub_ubc.h +index fe86a1e34f..86ce83a7c5 100644 +--- a/include/hw/ub/ub_ubc.h ++++ b/include/hw/ub/ub_ubc.h +@@ -52,6 +52,7 @@ struct BusControllerState { + MemoryRegion io_mmio; /* ub mmio hpa memory region */ + uint32_t mmio_size; + bool mig_enabled; ++ HiMsgqInfo msgq; + BusControllerDev *ubc_dev; + UBBus *bus; + QLIST_ENTRY(BusControllerState) node; +-- +2.33.0 + diff --git a/ub-support-ummu-dma-address-translate.patch b/ub-support-ummu-dma-address-translate.patch new file mode 100644 index 0000000000000000000000000000000000000000..f3aab553436228c30a25be88620914815dd6bf11 --- /dev/null +++ b/ub-support-ummu-dma-address-translate.patch @@ -0,0 +1,687 @@ +From 91cabe631eb0f81f728425edc0438ebc2a46b6a9 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 20:42:19 +0800 +Subject: [PATCH 5/6] ub: support ummu dma address translate + +support ummu stage-1 addr translate + +Signed-off-by: caojinhuahw +--- + hw/ub/meson.build | 1 + + hw/ub/ub.c | 12 + + hw/ub/ub_ummu.c | 505 +++++++++++++++++++++++++++++++++++++++++ + hw/ub/ub_usi.c | 38 ++++ + include/hw/ub/ub.h | 2 + + include/hw/ub/ub_usi.h | 2 + + 6 files changed, 560 insertions(+) + create mode 100644 hw/ub/ub_usi.c + +diff --git a/hw/ub/meson.build b/hw/ub/meson.build +index 400fa553d8..2991d45416 100644 +--- a/hw/ub/meson.build ++++ b/hw/ub/meson.build +@@ -7,6 +7,7 @@ ub_ss.add(files( + 'ub_acpi.c', + 'ub_enum.c', + 'ub_common.c', ++ 'ub_usi.c', + 'ub_cna_mgmt.c', + 'ub_sec.c', + )) +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 7a171c787b..6d42abfe27 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -945,3 +945,15 @@ void ub_setup_iommu(UBBus *bus, const UBIOMMUOps *ops, void *opaque) + bus->iommu_ops = ops; + bus->iommu_opaque = opaque; + } ++ ++uint32_t ub_dev_get_token_id(UBDevice *udev) ++{ ++ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_DEV_TOKEN_ID_OFFSET, true); ++ return *(uint32_t *)(udev->config + offset); ++} ++ ++uint32_t ub_dev_get_ueid(UBDevice *udev) ++{ ++ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_DEV_UEID_OFFSET, true); ++ return *(uint32_t *)(udev->config + offset); ++} +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index db5e6583f8..a7a4a33af3 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -71,6 +71,33 @@ static const char *const mcmdq_cmd_strings[MCMDQ_CMD_MAX] = { + [CMD_TLBI_S2_IPA_U] = "CMD_TLBI_S2_IPA_U", + }; + ++static const char *const ummu_event_type_strings[EVT_MAX] = { ++ [EVT_NONE] = "EVT_NONE", ++ [EVT_UT] = "EVT_UT", ++ [EVT_BAD_DSTEID] = "EVT_BAD_DSTEID", ++ [EVT_TECT_FETCH] = "EVT_TECT_FETCH", ++ [EVT_BAD_TECT] = "EVT_BAD_TECT", ++ [EVT_RESERVE_0] = "EVT_RESERVE_0", ++ [EVT_BAD_TOKENID] = "EVT_BAD_TOKENID", ++ [EVT_TCT_FETCH] = "EVT_TCT_FETCH", ++ [EVT_BAD_TCT] = "EVT_BAD_TCT", ++ [EVT_A_PTW_EABT] = "EVT_A_PTW_EABT", ++ [EVT_A_TRANSLATION] = "EVT_A_TRANSLATION", ++ [EVT_A_ADDR_SIZE] = "EVT_A_ADDR_SIZE", ++ [EVT_ACCESS] = "EVT_ACCESS", ++ [EVT_A_PERMISSION] = "EVT_A_PERMISSION", ++ [EVT_TBU_CONFLICT] = "EVT_TBU_CONFLICT", ++ [EVT_CFG_CONFLICT] = "EVT_CFG_CONFLICT", ++ [EVT_VMS_FETCH] = "EVT_VMS_FETCH", ++ [EVT_P_PTW_EABT] = "EVT_P_PTW_EABT", ++ [EVT_P_CFG_ERROR] = "EVT_P_CFG_ERROR", ++ [EVT_P_PERMISSION] = "EVT_P_PERMISSION", ++ [EVT_RESERVE_1] = "EVT_RESERVE_1", ++ [EVT_EBIT_DENY] = "EVT_EBIT_DENY", ++ [EVT_CREATE_DSTEID_TECT_RELATION_RESULT] = "EVT_CREATE_DSTEID_TECT_RELATION_RESULT", ++ [EVT_DELETE_DSTEID_TECT_RELATION_RESULT] = "EVT_DELETE_DSTEID_TECT_RELATION_RESULT" ++}; ++ + QLIST_HEAD(, UMMUState) ub_umms; + UMMUState *ummu_find_by_bus_num(uint8_t bus_num) + { +@@ -389,6 +416,19 @@ static void mcmdq_cmd_sync_sev_irq(void) + qemu_log("cannot support CMD_SYNC SEV event.\n"); + } + ++static void ummu_glb_usi_notify(UMMUState *u, UMMUUSIVectorType type) ++{ ++ USIMessage msg; ++ ++ if (type == UMMU_USI_VECTOR_GERROR) { ++ msg = ummu_get_gerror_usi_message(u); ++ } else { ++ msg = ummu_get_eventq_usi_message(u); ++ } ++ ++ usi_send_message(&msg, UMMU_INTERRUPT_ID, NULL); ++} ++ + static void mcmdq_cmd_sync_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) + { + uint32_t cm = CMD_SYNC_CM(cmd); +@@ -1624,6 +1664,7 @@ static void ummu_base_realize(DeviceState *dev, Error **errp) + ub_save_ummu_list(u); + + u->ummu_devs = g_hash_table_new_full(NULL, NULL, NULL, g_free); ++ u->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free); + QLIST_INIT(&u->kvtbl); + if (u->primary_bus) { + ub_setup_iommu(u->primary_bus, &ummu_ops, u); +@@ -1632,6 +1673,7 @@ static void ummu_base_realize(DeviceState *dev, Error **errp) + } + + u->tecte_tag_num = 0; ++ u->mrtypename = TYPE_UMMU_IOMMU_MEMORY_REGION; + if (u->nested) { + memory_region_init(&u->stage2, OBJECT(u), "stage2", UINT64_MAX); + memory_region_init_alias(&u->sysmem, OBJECT(u), +@@ -1662,6 +1704,12 @@ static void ummu_base_unrealize(DeviceState *dev) + u->ummu_devs = NULL; + } + ++ if (u->configs) { ++ g_hash_table_remove_all(u->configs); ++ g_hash_table_destroy(u->configs); ++ u->configs = NULL; ++ } ++ + QLIST_FOREACH_SAFE(entry, &u->kvtbl, list, next_entry) { + QLIST_REMOVE(entry, list); + g_free(entry); +@@ -1719,6 +1767,25 @@ static int ummu_get_tecte(UMMUState *ummu, dma_addr_t addr, TECTE *tecte) + return 0; + } + ++static uint32_t ummu_get_tecte_tag_by_dest_eid(UMMUState *u, uint32_t dst_eid) ++{ ++ UMMUKVTblEntry *entry = NULL; ++ ++ QLIST_FOREACH(entry, &u->kvtbl, list) { ++ if (entry->dst_eid == dst_eid) { ++ break; ++ } ++ } ++ ++ if (!entry) { ++ qemu_log("cannot find tecte_tag by dst_eid 0x%x\n", dst_eid); ++ return UINT32_MAX; ++ } ++ qemu_log("success get tecte_tag(0x%x) by dst_eid(0x%x)\n", entry->tecte_tag, dst_eid); ++ ++ return entry->tecte_tag; ++} ++ + static int ummu_find_tecte(UMMUState *ummu, uint32_t tecte_tag, TECTE *tecte) + { + dma_addr_t tect_base_addr = TECT_BASE_ADDR(ummu->tect_base); +@@ -1768,6 +1835,429 @@ static int ummu_find_tecte(UMMUState *ummu, uint32_t tecte_tag, TECTE *tecte) + return 0; + } + ++static int ummu_decode_tecte(UMMUState *ummu, UMMUTransCfg *cfg, ++ TECTE *tecte, UMMUEventInfo *event) ++{ ++ if (TECTE_VALID(tecte) == 0) { ++ qemu_log("fetched tecte is invalid\n"); ++ return -EINVAL; ++ } ++ ++ cfg->tct_ptr = TECTE_TCT_PTR(tecte); ++ cfg->tct_num = TECTE_TCT_NUM(tecte); ++ cfg->tct_fmt = TECTE_TCT_FMT(tecte); ++ ++ qemu_log("tct_ptr: 0x%lx, tct_num: %lu, fmt: %lu\n", ++ cfg->tct_ptr, cfg->tct_num, cfg->tct_fmt); ++ return 0; ++} ++ ++static int ummu_get_tcte(UMMUState *ummu, dma_addr_t addr, ++ TCTE *tcte, uint32_t tid) ++{ ++ int ret, i; ++ uint64_t *_tcte; ++ ++ ret = dma_memory_read(&address_space_memory, addr, tcte, sizeof(*tcte), ++ MEMTXATTRS_UNSPECIFIED); ++ if (ret != MEMTX_OK) { ++ qemu_log("Cannot fetch tcte at address=0x%lx\n", addr); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(tcte->word); i++) { ++ le32_to_cpus(&tcte->word[i]); ++ } ++ ++ _tcte = (uint64_t *)tcte; ++ qemu_log("fetch tcte(%u): <0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx>\n", ++ tid, _tcte[0], _tcte[1], _tcte[2], _tcte[3], _tcte[4], _tcte[5], _tcte[6], _tcte[7]); ++ return 0; ++} ++ ++static int ummu_find_tcte(UMMUState *ummu, UMMUTransCfg *cfg, uint32_t tid, ++ TCTE *tcte, UMMUEventInfo *event) ++{ ++ int l1idx, l2idx; ++ dma_addr_t tct_lv1_addr, tcte_addr; ++ TCTEDesc tct_desc; ++ int ret, i; ++ ++ if (cfg->tct_num == 0 || tid >= TCTE_MAX_NUM(cfg->tct_num)) { ++ event->type = EVT_BAD_TOKENID; ++ return -EINVAL; ++ } ++ ++ if (TCT_FMT_LINEAR == cfg->tct_fmt || TCT_FMT_LVL2_4K == cfg->tct_fmt) { ++ event->type = EVT_TCT_FETCH; ++ qemu_log("current dont support TCT_FMT_LINEAR&TCT_FMT_LVL2_4K.\n"); ++ return -EINVAL; ++ } ++ ++ l1idx = tid >> TCT_SPLIT_64K; ++ tct_lv1_addr = cfg->tct_ptr + l1idx * sizeof(tct_desc); ++ ret = dma_memory_read(&address_space_memory, tct_lv1_addr, &tct_desc, sizeof(tct_desc), ++ MEMTXATTRS_UNSPECIFIED); ++ if (ret != MEMTX_OK) { ++ event->type = EVT_TCT_FETCH; ++ qemu_log("failed to dma read tct lv1 entry.\n"); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(tct_desc.word); i++) { ++ le32_to_cpus(&tct_desc.word[i]); ++ } ++ ++ qemu_log("l1idx: %d, tct_l1_addr: 0x%lx, tct_desc: 0x%lx, tcte_ptr: 0x%llx, l1tcte_v: %u\n", ++ l1idx, tct_lv1_addr, *(uint64_t *)&tct_desc, TCT_L2TCTE_PTR(&tct_desc), TCT_L1TCTE_V(&tct_desc)); ++ ++ if (TCT_L1TCTE_V(&tct_desc) == 0) { ++ event->type = EVT_BAD_TOKENID; ++ qemu_log("l2tcte is invalid\n"); ++ return -EINVAL; ++ } ++ ++ l2idx = tid & (TCT_L2_ENTRIES - 1); ++ tcte_addr = TCT_L2TCTE_PTR(&tct_desc) + l2idx * sizeof(*tcte); ++ qemu_log("l2idx: %d, tcte_addr: 0x%lx\n", l2idx, tcte_addr); ++ ret = ummu_get_tcte(ummu, tcte_addr, tcte, tid); ++ if (ret) { ++ event->type = EVT_TCT_FETCH; ++ qemu_log("failed to get tcte, ret = %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ummu_decode_tcte(UMMUState *ummu, UMMUTransCfg *cfg, ++ TCTE *tcte, UMMUEventInfo *event) ++{ ++ uint32_t tct_v = TCTE_TCT_V(tcte); ++ ++ if (tct_v == 0) { ++ qemu_log("fetched tcte invalid\n"); ++ event->type = EVT_BAD_TCT; ++ return -1; ++ } ++ ++ cfg->tct_ttba = TCTE_TTBA(tcte); ++ cfg->tct_sz = TCTE_SZ(tcte); ++ cfg->tct_tgs = tgs2granule(TCTE_TGS(tcte)); ++ qemu_log("tcte_tbba: 0x%lx, sz: %u, tgs: %u, tct_v: %u\n", ++ cfg->tct_ttba, cfg->tct_sz, cfg->tct_tgs, tct_v); ++ return 0; ++} ++ ++static int ummu_tect_parse_sparse_table(UMMUDevice *ummu_dev, UMMUTransCfg *cfg, ++ uint32_t dest_eid, UMMUEventInfo *event) ++{ ++ UMMUState *ummu = ummu_dev->ummu; ++ int ret; ++ TECTE tecte; ++ TCTE tcte; ++ uint32_t tecte_tag; ++ uint32_t tid = ub_dev_get_token_id(ummu_dev->udev); ++ ++ tecte_tag = ummu_get_tecte_tag_by_dest_eid(ummu, dest_eid); ++ if (tecte_tag == UINT32_MAX) { ++ qemu_log("failed to get tecte tag by dest_eid(%u).\n", dest_eid); ++ event->type = EVT_BAD_DSTEID; ++ goto failed; ++ } ++ ++ ret = ummu_find_tecte(ummu, tecte_tag, &tecte); ++ if (ret) { ++ event->type = EVT_TECT_FETCH; ++ qemu_log("failed to find tecte: %d\n", ret); ++ goto failed; ++ } ++ ++ ret = ummu_decode_tecte(ummu, cfg, &tecte, event); ++ if (ret) { ++ event->type = EVT_BAD_TECT; ++ qemu_log("failed to decode tecte.\n"); ++ goto failed; ++ } ++ ++ qemu_log("get udev(%s %s) tid(%u)\n", ++ ummu_dev->udev->name, ummu_dev->udev->qdev.id, tid); ++ ret = ummu_find_tcte(ummu, cfg, tid, &tcte, event); ++ if (ret) { ++ qemu_log("failed to find tecte.\n"); ++ goto failed; ++ } ++ ++ ret = ummu_decode_tcte(ummu, cfg, &tcte, event); ++ if (ret) { ++ qemu_log("failed to decode tecte.\n"); ++ goto failed; ++ } ++ cfg->tecte_tag = tecte_tag; ++ cfg->tid = tid; ++ ++ return 0; ++ ++failed: ++ event->tid = tid; ++ event->tecte_tag = tecte_tag; ++ return -EINVAL; ++} ++ ++static int ummu_decode_config(UMMUDevice *ummu_dev, UMMUTransCfg *cfg, UMMUEventInfo *event) ++{ ++ uint32_t dest_eid = ub_dev_get_ueid(ummu_dev->udev); ++ ++ qemu_log("ummu decode config dest_eid is %u.\n", dest_eid); ++ if (ummu_tect_mode_sparse_table(ummu_dev->ummu)) { ++ return ummu_tect_parse_sparse_table(ummu_dev, cfg, dest_eid, event); ++ } ++ ++ event->type = EVT_TECT_FETCH; ++ event->tecte_tag = ummu_get_tecte_tag_by_dest_eid(ummu_dev->ummu, dest_eid); ++ ++ qemu_log("current not support process linear table.\n"); ++ return -1; ++} ++ ++static UMMUTransCfg *ummu_get_config(UMMUDevice *ummu_dev, UMMUEventInfo *event) ++{ ++ UMMUState *ummu = ummu_dev->ummu; ++ UMMUTransCfg *cfg = NULL; ++ ++ cfg = g_hash_table_lookup(ummu->configs, ummu_dev); ++ if (cfg) { ++ return cfg; ++ } ++ ++ /* cfg will be freed when removed from hash table */ ++ cfg = g_new0(UMMUTransCfg, 1); ++ if (!ummu_decode_config(ummu_dev, cfg, event)) { ++ g_hash_table_insert(ummu->configs, ummu_dev, cfg); ++ } else { ++ g_free(cfg); ++ cfg = NULL; ++ } ++ ++ return cfg; ++} ++ ++static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte) ++{ ++ int ret; ++ dma_addr_t addr = baseaddr + index * sizeof(*pte); ++ ++ ret = ldq_le_dma(&address_space_memory, addr, pte, MEMTXATTRS_UNSPECIFIED); ++ if (ret) { ++ qemu_log("failed to get dma data for addr 0x%lx\n", addr); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static void ummu_ptw_64_s1(UMMUTransCfg *cfg, dma_addr_t iova, IOMMUTLBEntry *entry, UMMUPTWEventInfo *ptw_info) ++{ ++ dma_addr_t baseaddr, indexmask; ++ uint32_t granule_sz, stride, level, inputsize; ++ ++ granule_sz = cfg->tct_tgs; ++ stride = VMSA_STRIDE(granule_sz); ++ if (granule_sz == 0 || stride == 0) { ++ qemu_log("ummu ptw 64 s1 failed: granule_sz = %u, stride = %u\n", granule_sz, stride); ++ goto error; ++ } ++ inputsize = 64 - cfg->tct_sz; ++ level = 4 - (inputsize - 4) / stride; ++ indexmask = VMSA_IDXMSK(inputsize, stride, level); ++ baseaddr = extract64(cfg->tct_ttba, 0, 48); ++ baseaddr &= ~indexmask; ++ ++ qemu_log("stride: %u, inputsize: %u, level: %u, baseaddr: 0x%lx\n", ++ stride, inputsize, level, baseaddr); ++ while (level < VMSA_LEVELS) { ++ uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); ++ uint64_t mask = subpage_size - 1; ++ uint64_t pte, gpa; ++ uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz); ++ ++ if (get_pte(baseaddr, offset, &pte)) { ++ goto error; ++ } ++ ++ if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { ++ qemu_log("invalid or reserved pte.\n"); ++ break; ++ } ++ ++ if (is_table_pte(pte, level)) { ++ baseaddr = get_table_pte_address(pte, granule_sz); ++ level++; ++ continue; ++ } else if (is_page_pte(pte, level)) { ++ gpa = get_page_pte_address(pte, granule_sz); ++ } else { ++ uint64_t block_size; ++ gpa = get_block_pte_address(pte, level, granule_sz, &block_size); ++ } ++ ++ entry->translated_addr = gpa; ++ entry->iova = iova & ~mask; ++ entry->addr_mask = mask; ++ ++ return; ++ } ++ ++error: ++ ptw_info->type = UMMU_PTW_ERR_TRANSLATION; ++ return; ++} ++ ++static void ummu_ptw(UMMUTransCfg *cfg, dma_addr_t iova, IOMMUTLBEntry *entry, UMMUPTWEventInfo *ptw_info) ++{ ++ ummu_ptw_64_s1(cfg, iova, entry, ptw_info); ++} ++ ++static MemTxResult eventq_write(UMMUEventQueue *q, UMMUEvent *evt_in) ++{ ++ dma_addr_t base_addr, addr; ++ MemTxResult ret; ++ UMMUEvent evt = *evt_in; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(evt.word); i++) { ++ cpu_to_le32s(&evt.word[i]); ++ } ++ ++ base_addr = EVENT_QUE_BASE_ADDR(&q->queue); ++ addr = base_addr + EVENT_QUE_WR_IDX(&q->queue) * q->queue.entry_size; ++ ret = dma_memory_write(&address_space_memory, addr, &evt, sizeof(UMMUEvent), ++ MEMTXATTRS_UNSPECIFIED); ++ if (ret != MEMTX_OK) { ++ return ret; ++ } ++ ++ ummu_eventq_prod_incr(q); ++ qemu_log("eventq: addr(0x%lx), prod(%u), cons(%u)\n", addr, ++ EVENT_QUE_WR_IDX(&q->queue), EVENT_QUE_RD_IDX(&q->queue)); ++ return MEMTX_OK; ++} ++ ++static MemTxResult ummu_write_eventq(UMMUState *u, UMMUEvent *evt) ++{ ++ UMMUEventQueue *queue = &u->eventq; ++ MemTxResult r; ++ ++ if (!ummu_eventq_enabled(u)) { ++ return MEMTX_ERROR; ++ } ++ ++ if (ummu_eventq_full(queue)) { ++ qemu_log("ummu eventq full, eventq write failed.\n"); ++ return MEMTX_ERROR; ++ } ++ ++ r = eventq_write(queue, evt); ++ if (r != MEMTX_OK) { ++ return r; ++ } ++ ++ if (!ummu_eventq_empty(queue)) { ++ ummu_glb_usi_notify(u, UMMU_USI_VECTOR_EVETQ); ++ } ++ ++ return MEMTX_OK; ++} ++ ++static void ummu_record_event(UMMUState *u, UMMUEventInfo *info) ++{ ++ UMMUEvent evt = {}; ++ MemTxResult r; ++ ++ if (!ummu_eventq_enabled(u)) { ++ qemu_log("ummu eventq disabled.\n"); ++ return; ++ } ++ ++ /* need set more EVT info for different event later */ ++ EVT_SET_TYPE(&evt, info->type); ++ EVT_SET_TECTE_TAG(&evt, info->tecte_tag); ++ EVT_SET_TID(&evt, info->tid); ++ ++ qemu_log("report event %s: tecte_tag %u tid %u\n", ++ ummu_event_type_strings[info->type], info->tecte_tag, info->tid); ++ ++ r = ummu_write_eventq(u, &evt); ++ if (r != MEMTX_OK) { ++ qemu_log("ummu failed to write eventq.\n"); ++ /* trigger glb err irq later */ ++ } ++} ++ ++static IOMMUTLBEntry ummu_translate(IOMMUMemoryRegion *mr, hwaddr addr, ++ IOMMUAccessFlags flag, int iommu_idx) ++{ ++ UMMUDevice *ummu_dev = container_of(mr, UMMUDevice, iommu); ++ UMMUTransCfg *cfg = NULL; ++ IOMMUTLBEntry entry = { ++ .target_as = &address_space_memory, ++ .iova = addr, ++ .translated_addr = addr, ++ .addr_mask = ~(hwaddr)0, ++ .perm = IOMMU_RW, ++ }; ++ UMMUEventInfo event = { ++ .type = EVT_NONE ++ }; ++ UMMUPTWEventInfo ptw_info = { ++ .type = UMMU_PTW_ERR_NONE ++ }; ++ ++ cfg = ummu_get_config(ummu_dev, &event); ++ if (!cfg) { ++ qemu_log("failed to get ummu config.\n"); ++ goto epilogue; ++ } ++ ++ /* need support cache TLB entry later */ ++ ummu_ptw(cfg, addr, &entry, &ptw_info); ++ if (ptw_info.type == UMMU_PTW_ERR_NONE) { ++ goto epilogue; ++ } ++ ++ event.tecte_tag = cfg->tecte_tag; ++ event.tid = cfg->tid; ++ switch (ptw_info.type) ++ { ++ case UMMU_PTW_ERR_TRANSLATION: ++ event.type = EVT_A_TRANSLATION; ++ break; ++ case UMMU_PTW_ERR_PERMISSION: ++ event.type = EVT_A_PERMISSION; ++ break; ++ default: ++ break; ++ } ++ ++epilogue: ++ qemu_log("ummu_translate: addr(0x%lx), translated_addr(0x%lx)\n", addr, entry.translated_addr); ++ ++ if (event.type != EVT_NONE) { ++ ummu_record_event(ummu_dev->ummu, &event); ++ } ++ ++ return entry; ++} ++ ++static int ummu_notify_flag_changed(IOMMUMemoryRegion *iommu, ++ IOMMUNotifierFlag old, ++ IOMMUNotifierFlag new, ++ Error **errp) ++{ ++ qemu_log("ummu_notify_flag_changed\n"); ++ return 0; ++} ++ + void ummu_dev_uninstall_nested_tecte(UMMUDevice *ummu_dev) + { + HostIOMMUDeviceIOMMUFD *idev = ummu_dev->idev; +@@ -1841,9 +2331,24 @@ free: + return -EINVAL; + } + ++static void ummu_iommu_memory_region_class_init(ObjectClass *klass, void *data) ++{ ++ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); ++ ++ imrc->translate = ummu_translate; ++ imrc->notify_flag_changed = ummu_notify_flag_changed; ++} ++ ++static const TypeInfo ummu_iommu_memory_region_info = { ++ .parent = TYPE_IOMMU_MEMORY_REGION, ++ .name = TYPE_UMMU_IOMMU_MEMORY_REGION, ++ .class_init = ummu_iommu_memory_region_class_init, ++}; ++ + static void ummu_base_register_types(void) + { + type_register_static(&ummu_base_info); ++ type_register_static(&ummu_iommu_memory_region_info); + } + + type_init(ummu_base_register_types) +diff --git a/hw/ub/ub_usi.c b/hw/ub/ub_usi.c +new file mode 100644 +index 0000000000..8250d853eb +--- /dev/null ++++ b/hw/ub/ub_usi.c +@@ -0,0 +1,38 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "hw/qdev-core.h" ++#include "hw/ub/ub_usi.h" ++#include "hw/ub/ub_config.h" ++#include "qemu/log.h" ++#include "exec/address-spaces.h" ++ ++void usi_send_message(USIMessage *msg, uint32_t interrupt_id, UBDevice *udev) ++{ ++ MemTxAttrs attrs = {}; ++ attrs.requester_id = interrupt_id; ++ if (udev) { ++ AddressSpace *as = ub_device_iommu_address_space(udev); ++ address_space_stl_le(as, msg->address, msg->data, ++ attrs, NULL); ++ } else { ++ address_space_stl_le(&address_space_memory, msg->address, msg->data, ++ attrs, NULL); ++ } ++ qemu_log("usi notify success: interrupt_id %u eventid %u gicv3_its 0x%lx\n", ++ interrupt_id, msg->data, msg->address); ++} +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index a5101ed374..ca2a54d845 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -256,4 +256,6 @@ AddressSpace *ub_device_iommu_address_space(UBDevice *dev); + UBDevice *ub_find_device_by_id(const char *id); + uint32_t ub_interrupt_id(UBDevice *udev); + void ub_setup_iommu(UBBus *bus, const UBIOMMUOps *ops, void *opaque); ++uint32_t ub_dev_get_token_id(UBDevice *udev); ++uint32_t ub_dev_get_ueid(UBDevice *udev); + #endif +diff --git a/include/hw/ub/ub_usi.h b/include/hw/ub/ub_usi.h +index a9df04e686..96332e5850 100644 +--- a/include/hw/ub/ub_usi.h ++++ b/include/hw/ub/ub_usi.h +@@ -24,4 +24,6 @@ struct USIMessage { + uint32_t data; + }; + ++void usi_send_message(USIMessage *msg, uint32_t interrupt_id, UBDevice *udev); ++ + #endif +-- +2.33.0 + diff --git a/ub-switch-name-fers-to-ers.patch b/ub-switch-name-fers-to-ers.patch new file mode 100644 index 0000000000000000000000000000000000000000..fef38a11a9fbbe4192158e5b9d36d2a73da6b5b3 --- /dev/null +++ b/ub-switch-name-fers-to-ers.patch @@ -0,0 +1,28 @@ +From 0e950101666bbf63517c8626517d4fdecf85ada0 Mon Sep 17 00:00:00 2001 +From: xiangzixuan +Date: Mon, 10 Nov 2025 21:04:09 +0800 +Subject: [PATCH 3/4] ub: switch name fers to ers + +switch name fers to ers + +Signed-off-by: xiangzixuan +--- + hw/arm/virt.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index e2e603a4a0..e29c0d47d6 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -1792,7 +1792,7 @@ static void create_ub(VirtMachineState *vms) + mmio_alias); + + mmio_alias = g_new0(MemoryRegion, 1); +- memory_region_init_alias(mmio_alias, OBJECT(ubc), "ub-idev-fers-as", ++ memory_region_init_alias(mmio_alias, OBJECT(ubc), "ub-idev-ers-as", + mmio_reg, vms->memmap[VIRT_UB_IDEV_ERS].base, + vms->memmap[VIRT_UB_IDEV_ERS].size); + memory_region_add_subregion(get_system_memory(), +-- +2.33.0 + diff --git a/ub-ummu-add-ummu_ops-for-vfio-ub-preprare.patch b/ub-ummu-add-ummu_ops-for-vfio-ub-preprare.patch new file mode 100644 index 0000000000000000000000000000000000000000..1d1cfd890b695535e92e75145aa90b88b431fb38 --- /dev/null +++ b/ub-ummu-add-ummu_ops-for-vfio-ub-preprare.patch @@ -0,0 +1,347 @@ +From 2995d6011212c845ef24a9a112b7eaaf4388947b Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 19:09:23 +0800 +Subject: [PATCH 2/6] ub: ummu add ummu_ops for vfio-ub preprare + +this prepare for vfio-ub nested mode running + +Signed-off-by: caojinhuahw +--- + hw/ub/ub.c | 12 +- + hw/ub/ub_ummu.c | 223 ++++++++++++++++++++++++++++++++++ + include/hw/ub/ub.h | 1 + + linux-headers/linux/iommufd.h | 9 ++ + 4 files changed, 244 insertions(+), 1 deletion(-) + +diff --git a/hw/ub/ub.c b/hw/ub/ub.c +index 6a2c3cc493..7a171c787b 100644 +--- a/hw/ub/ub.c ++++ b/hw/ub/ub.c +@@ -934,4 +934,14 @@ int ub_dev_finally_setup(VirtMachineState *vms, Error **errp) + ub_set_ubinfo_in_ubc_table(vms); + + return 0; +-} +\ No newline at end of file ++} ++ ++void ub_setup_iommu(UBBus *bus, const UBIOMMUOps *ops, void *opaque) ++{ ++ /* ++ * If called, ub_setup_iommu() should provide a minimum set of ++ * useful callbacks for the bus. ++ */ ++ bus->iommu_ops = ops; ++ bus->iommu_opaque = opaque; ++} +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index ed92e97d06..228fa14b87 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -1175,6 +1175,206 @@ int ummu_associating_with_ubc(BusControllerState *ubc) + return 0; + } + ++static UMMUDevice *ummu_get_udev(UBBus *bus, UMMUState *u, uint32_t eid) ++{ ++ UMMUDevice *ummu_dev = NULL; ++ UBDevice *udev = NULL; ++ char *name = NULL; ++ ++ udev = ub_find_device_by_eid(bus, eid); ++ ummu_dev = g_hash_table_lookup(u->ummu_devs, udev); ++ if (ummu_dev) { ++ return ummu_dev; ++ } ++ ++ /* will be freed when remove from hash table */ ++ ummu_dev = g_new0(UMMUDevice, 1); ++ ummu_dev->ummu = u; ++ ummu_dev->udev = udev; ++ ++ name = g_strdup_printf("%s-0x%x", u->mrtypename, eid); ++ memory_region_init_iommu(&ummu_dev->iommu, sizeof(ummu_dev->iommu), u->mrtypename, ++ OBJECT(u), name, UINT64_MAX); ++ address_space_init(&ummu_dev->as_sysmem, &u->root, name); ++ address_space_init(&ummu_dev->as, MEMORY_REGION(&ummu_dev->iommu), name); ++ g_free(name); ++ g_hash_table_insert(u->ummu_devs, udev, ummu_dev); ++ ++ return ummu_dev; ++} ++ ++static AddressSpace *ummu_find_add_as(UBBus *bus, void *opaque, uint32_t eid) ++{ ++ UMMUState *u = opaque; ++ UMMUDevice *ummu_dev = ummu_get_udev(bus, u, eid); ++ ++ if (u->nested && !ummu_dev->s1_hwpt) { ++ return &ummu_dev->as_sysmem; ++ } ++ ++ return &ummu_dev->as; ++} ++ ++static bool ummu_is_nested(void *opaque) ++{ ++ UMMUState *u = opaque; ++ ++ return u->nested; ++} ++ ++static bool ummu_dev_attach_viommu(UMMUDevice *udev, ++ HostIOMMUDeviceIOMMUFD *idev, Error **errp) ++{ ++ UMMUState *u = udev->ummu; ++ UMMUS2Hwpt *s2_hwpt = NULL; ++ UMMUViommu *viommu = NULL; ++ uint32_t s2_hwpt_id; ++ ++ if (u->viommu) { ++ return host_iommu_device_iommufd_attach_hwpt( ++ idev, u->viommu->s2_hwpt->hwpt_id, errp); ++ } ++ ++ if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid, idev->ioas_id, ++ IOMMU_HWPT_ALLOC_NEST_PARENT, ++ IOMMU_HWPT_DATA_NONE, 0, NULL, ++ &s2_hwpt_id, NULL, errp)) { ++ error_setg(errp, "failed to allocate an S2 hwpt"); ++ return false; ++ } ++ ++ if (!host_iommu_device_iommufd_attach_hwpt(idev, s2_hwpt_id, errp)) { ++ error_setg(errp, "failed to attach stage-2 HW pagetable"); ++ goto free_s2_hwpt; ++ } ++ ++ viommu = g_new0(UMMUViommu, 1); ++ viommu->core = iommufd_backend_alloc_viommu(idev->iommufd, idev->devid, ++ IOMMU_VIOMMU_TYPE_UMMU, ++ s2_hwpt_id); ++ if (!viommu->core) { ++ error_setg(errp, "failed to allocate a viommu"); ++ goto free_viommu; ++ } ++ ++ s2_hwpt = g_new0(UMMUS2Hwpt, 1); ++ s2_hwpt->iommufd = idev->iommufd; ++ s2_hwpt->hwpt_id = s2_hwpt_id; ++ s2_hwpt->ioas_id = idev->ioas_id; ++ qemu_log("alloc hwpt for s2 success, hwpt id is %u\n", s2_hwpt_id); ++ ++ viommu->iommufd = idev->iommufd; ++ viommu->s2_hwpt = s2_hwpt; ++ ++ u->viommu = viommu; ++ return true; ++ ++free_viommu: ++ g_free(viommu); ++ host_iommu_device_iommufd_attach_hwpt(idev, udev->idev->ioas_id, errp); ++free_s2_hwpt: ++ iommufd_backend_free_id(idev->iommufd, s2_hwpt_id); ++ ++ return false; ++} ++ ++static bool ummu_dev_set_iommu_dev(UBBus *bus, void *opaque, uint32_t eid, ++ HostIOMMUDevice *hiod, Error **errp) ++{ ++ HostIOMMUDeviceIOMMUFD *idev = HOST_IOMMU_DEVICE_IOMMUFD(hiod); ++ UMMUState *u = opaque; ++ UMMUDevice *ummu_dev = NULL; ++ ++ if (!u->nested) { ++ error_setg(errp, "set iommu dev expcet ummu is nested mode\n"); ++ return false; ++ } ++ ++ if (!idev) { ++ error_setg(errp, "unexpect idev is NULL\n"); ++ return false; ++ } ++ ++ ummu_dev = ummu_get_udev(bus, u, eid); ++ if (!ummu_dev) { ++ error_setg(errp, "failed to get ummu dev by eid 0x%x\n", eid); ++ return false; ++ } ++ ++ if (ummu_dev->idev) { ++ if (ummu_dev->idev != idev) { ++ error_setg(errp, "udev(%s) exist idev conflict new config idev\n", ummu_dev->udev->name); ++ return false; ++ } else { ++ return true; ++ } ++ } ++ ++ if (!ummu_dev_attach_viommu(ummu_dev, idev, errp)) { ++ error_report("Unable to attach viommu"); ++ return false; ++ } ++ ++ ummu_dev->idev = idev; ++ ummu_dev->viommu = u->viommu; ++ QLIST_INSERT_HEAD(&u->viommu->device_list, ummu_dev, next); ++ ++ return 0; ++} ++ ++static void ummu_dev_unset_iommu_dev(UBBus *bus, void *opaque, uint32_t eid) ++{ ++ UMMUDevice *ummu_dev; ++ UMMUViommu *viommu = NULL; ++ UMMUVdev *vdev = NULL; ++ UMMUState *u = opaque; ++ UBDevice *udev = NULL; ++ ++ if (!u->nested) { ++ return; ++ } ++ ++ udev = ub_find_device_by_eid(bus, eid); ++ ummu_dev = g_hash_table_lookup(u->ummu_devs, udev); ++ if (!ummu_dev) { ++ return; ++ } ++ ++ if (!host_iommu_device_iommufd_attach_hwpt(ummu_dev->idev, ++ ummu_dev->idev->ioas_id, NULL)) { ++ error_report("Unable to attach dev to the default HW pagetable"); ++ } ++ ++ vdev = ummu_dev->vdev; ++ viommu = ummu_dev->viommu; ++ ++ ummu_dev->idev = NULL; ++ ummu_dev->viommu = NULL; ++ QLIST_REMOVE(ummu_dev, next); ++ ++ if (vdev) { ++ iommufd_backend_free_id(viommu->iommufd, vdev->core->vdev_id); ++ g_free(vdev->core); ++ g_free(vdev); ++ } ++ ++ if (QLIST_EMPTY(&viommu->device_list)) { ++ iommufd_backend_free_id(viommu->iommufd, viommu->core->viommu_id); ++ g_free(viommu->core); ++ iommufd_backend_free_id(viommu->iommufd, viommu->s2_hwpt->hwpt_id); ++ g_free(viommu->s2_hwpt); ++ g_free(viommu); ++ u->viommu = NULL; ++ } ++} ++ ++static const UBIOMMUOps ummu_ops = { ++ .get_address_space = ummu_find_add_as, ++ .ummu_is_nested = ummu_is_nested, ++ .set_iommu_device = ummu_dev_set_iommu_dev, ++ .unset_iommu_device = ummu_dev_unset_iommu_dev, ++}; ++ + static void ub_save_ummu_list(UMMUState *u) + { + QLIST_INSERT_HEAD(&ub_umms, u, node); +@@ -1202,7 +1402,24 @@ static void ummu_base_realize(DeviceState *dev, Error **errp) + ummu_registers_init(u); + ub_save_ummu_list(u); + ++ u->ummu_devs = g_hash_table_new_full(NULL, NULL, NULL, g_free); + QLIST_INIT(&u->kvtbl); ++ if (u->primary_bus) { ++ ub_setup_iommu(u->primary_bus, &ummu_ops, u); ++ } else { ++ error_setg(errp, "UMMU is not attached to any UB bus!"); ++ } ++ ++ if (u->nested) { ++ memory_region_init(&u->stage2, OBJECT(u), "stage2", UINT64_MAX); ++ memory_region_init_alias(&u->sysmem, OBJECT(u), ++ "ummu-sysmem", get_system_memory(), 0, ++ memory_region_size(get_system_memory())); ++ memory_region_add_subregion(&u->stage2, 0, &u->sysmem); ++ ++ memory_region_init(&u->root, OBJECT(u), "ummu-root", UINT64_MAX); ++ memory_region_add_subregion(&u->root, 0, &u->stage2); ++ } + } + + static void ummu_base_unrealize(DeviceState *dev) +@@ -1217,6 +1434,12 @@ static void ummu_base_unrealize(DeviceState *dev) + g_free(sysdev->parent_obj.id); + } + ++ if (u->ummu_devs) { ++ g_hash_table_remove_all(u->ummu_devs); ++ g_hash_table_destroy(u->ummu_devs); ++ u->ummu_devs = NULL; ++ } ++ + QLIST_FOREACH_SAFE(entry, &u->kvtbl, list, next_entry) { + QLIST_REMOVE(entry, list); + g_free(entry); +diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h +index db0a19a8bb..a5101ed374 100644 +--- a/include/hw/ub/ub.h ++++ b/include/hw/ub/ub.h +@@ -255,4 +255,5 @@ static inline uint64_t ub_config_size(void) + AddressSpace *ub_device_iommu_address_space(UBDevice *dev); + UBDevice *ub_find_device_by_id(const char *id); + uint32_t ub_interrupt_id(UBDevice *udev); ++void ub_setup_iommu(UBBus *bus, const UBIOMMUOps *ops, void *opaque); + #endif +diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h +index 3e57fee01c..79bc50379b 100644 +--- a/linux-headers/linux/iommufd.h ++++ b/linux-headers/linux/iommufd.h +@@ -416,6 +416,10 @@ struct iommu_hwpt_arm_smmuv3 { + __aligned_le64 ste[2]; + }; + ++struct iommu_hwpt_ummu { ++ __aligned_le64 tecte[2]; ++}; ++ + /** + * enum iommu_hwpt_data_type - IOMMU HWPT Data Type + * @IOMMU_HWPT_DATA_NONE: no data +@@ -426,6 +430,7 @@ enum iommu_hwpt_data_type { + IOMMU_HWPT_DATA_NONE = 0, + IOMMU_HWPT_DATA_VTD_S1 = 1, + IOMMU_HWPT_DATA_ARM_SMMUV3 = 2, ++ IOMMU_HWPT_DATA_UMMU = 3, + }; + + /** +@@ -701,10 +706,12 @@ struct iommu_hwpt_get_dirty_bitmap { + * Data Type + * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1 + * @IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3 ++ * @IOMMU_VIOMMU_INVALIDATE_DATA_UMMU: Invalidation data for UMMU + */ + enum iommu_hwpt_invalidate_data_type { + IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0, + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 = 1, ++ IOMMU_VIOMMU_INVALIDATE_DATA_UMMU = 2, + }; + + /** +@@ -902,10 +909,12 @@ struct iommu_fault_alloc { + * enum iommu_viommu_type - Virtual IOMMU Type + * @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use + * @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type ++ * @IOMMU_VIOMMU_TYPE_UMMU: HISI UMMU driver specific type + */ + enum iommu_viommu_type { + IOMMU_VIOMMU_TYPE_DEFAULT = 0, + IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1, ++ IOMMU_VIOMMU_TYPE_UMMU = 2, + }; + + /** +-- +2.33.0 + diff --git a/ub-ummu-glb-int-enable.patch b/ub-ummu-glb-int-enable.patch new file mode 100644 index 0000000000000000000000000000000000000000..91ddace30482c8e994c352d4fcf03e654ea5ddc2 --- /dev/null +++ b/ub-ummu-glb-int-enable.patch @@ -0,0 +1,110 @@ +From 8a0f51ca6c9f05be42ab43b7ea744f1d5b2dfb5b Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 16:55:14 +0800 +Subject: [PATCH 1/6] ub: ummu glb int enable + +init ummu glb interrupt + +Signed-off-by: caojinhuahw +--- + hw/ub/trace-events | 1 + + hw/ub/ub_ummu.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 63 insertions(+) + +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index e53af1bd75..c6dbef23a8 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -14,6 +14,7 @@ ummu_eventq_req_writell(uint64_t base, uint8_t log2size) "base(0x%lx) log2size(0 + ummu_eventq_usi_reg_writell(uint64_t addr) "set eventq usi addr 0x%lx" + ummu_glberr_usi_reg_writell(uint64_t addr) "set glb err usi addr 0x%lx" + ummu_mapt_ctx_base_reg_writell(uint64_t addr) "config mapt ctx base 0x%lx" ++ummu_glb_int_enable(int type, int virq) "int type(%d) virq(%d)" + + # ub.c + ub_update_mappings(int i, uint64_t region_size, uint64_t old_addr, uint64_t new_addr) "region[%d], size: 0x%lx, old_addr: 0x%lx, new_addr: 0x%lx" +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index 033fcb9a34..ed92e97d06 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -646,8 +646,68 @@ static void ummu_mcmdq_reg_writel(UMMUState *u, hwaddr offset, uint64_t data) + trace_ummu_mcmdq_reg_writel(mcmdq_idx, MCMD_QUE_WD_IDX(&q->queue), MCMD_QUE_RD_IDX(&q->queue)); + } + ++static void ummu_glb_int_disable(UMMUState *u, UMMUUSIVectorType type) ++{ ++ qemu_log("start disable glb int\n"); ++ ++ if (u->usi_virq[type] < 0) { ++ return; ++ } ++ ++ kvm_irqchip_release_virq(kvm_state, u->usi_virq[type]); ++ u->usi_virq[type] = -1; ++} ++ ++static void ummu_glb_int_enable(UMMUState *u, UMMUUSIVectorType type) ++{ ++ KVMRouteChange route_change; ++ USIMessage msg; ++ uint32_t interrupt_id = UMMU_INTERRUPT_ID; ++ ++ if (type == UMMU_USI_VECTOR_EVETQ) { ++ msg = ummu_get_eventq_usi_message(u); ++ } else { ++ msg = ummu_get_gerror_usi_message(u); ++ } ++ ++ route_change = kvm_irqchip_begin_route_changes(kvm_state); ++ u->usi_virq[type] = kvm_irqchip_add_usi_route(&route_change, msg, interrupt_id, NULL); ++ trace_ummu_glb_int_enable(type, u->usi_virq[type]); ++ if (u->usi_virq[type] < 0) { ++ qemu_log("kvm irqchip failed to add usi route.\n"); ++ return; ++ } ++ kvm_irqchip_commit_route_changes(&route_change); ++} ++ ++static void ummu_handle_glb_int_enable_update(UMMUState *u, UMMUUSIVectorType type, ++ bool was_enabled, bool is_enabled) ++{ ++ if (was_enabled && !is_enabled) { ++ ummu_glb_int_disable(u, type); ++ } else if (!was_enabled && is_enabled) { ++ ummu_glb_int_enable(u, type); ++ } ++} ++ + static void ummu_glb_int_en_process(UMMUState *u, uint64_t data) + { ++ bool gerror_was_enabled, eventq_was_enabled; ++ bool gerror_is_enabled, eventq_is_enabled; ++ ++ /* process eventq interrupt update */ ++ eventq_was_enabled = ummu_event_que_int_en(u); ++ ummu_set_event_que_int_en(u, data); ++ eventq_is_enabled = ummu_event_que_int_en(u); ++ ummu_handle_glb_int_enable_update(u, UMMU_USI_VECTOR_EVETQ, ++ eventq_was_enabled, eventq_is_enabled); ++ ++ /* process glb_err interrupt update */ ++ gerror_was_enabled = ummu_glb_err_int_en(u); ++ ummu_set_glb_err_int_en(u, data); ++ gerror_is_enabled = ummu_glb_err_int_en(u); ++ ummu_handle_glb_int_enable_update(u, UMMU_USI_VECTOR_GERROR, ++ gerror_was_enabled, gerror_is_enabled); + } + + static MemTxResult ummu_mapt_cmdq_fetch_cmd(MAPTCmdqBase *base, MAPTCmd *cmd) +@@ -1137,6 +1197,8 @@ static void ummu_base_realize(DeviceState *dev, Error **errp) + memory_region_init_io(&u->ummu_reg_mem, OBJECT(u), &ummu_reg_ops, + u, TYPE_UB_UMMU, u->ummu_reg_size); + sysbus_init_mmio(sysdev, &u->ummu_reg_mem); ++ ++ memset(u->usi_virq, -1, sizeof(u->usi_virq)); + ummu_registers_init(u); + ub_save_ummu_list(u); + +-- +2.33.0 + diff --git a/ub-ummu-realize-config-tecte-mcmdq-process.patch b/ub-ummu-realize-config-tecte-mcmdq-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..1915fefca79f27a001fb8031b4a31c88f96d0213 --- /dev/null +++ b/ub-ummu-realize-config-tecte-mcmdq-process.patch @@ -0,0 +1,389 @@ +From e3d3555eced677198591191d201eed367389fc7a Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 19:39:54 +0800 +Subject: [PATCH 3/6] ub: ummu realize config tecte mcmdq process + +support mcmdq process, install guest tecte to host physical ummu + +Signed-off-by: caojinhuahw +--- + hw/ub/trace-events | 6 + + hw/ub/ub_ummu.c | 302 ++++++++++++++++++++++++++++++++++++++- + hw/ub/ub_ummu_internal.h | 3 + + 3 files changed, 310 insertions(+), 1 deletion(-) + +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index c6dbef23a8..78182e2896 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -4,6 +4,8 @@ + ummu_mcmdq_reg_writel(uint32_t idx, uint32_t prod, uint32_t cons) "mcmdq process: idx(%u), prod(%u), cons(%u)" + mcmdq_process_task(uint32_t mcmdq_idx, const char *cmd) "mcmdq_idx: %u, cmd: %s" + mcmdq_cmd_sync_handler(uint32_t mcmdq_idx, uint64_t usi_addr, uint32_t usi_data) "CMD_SYNC: mcmdq_idx(%u) usi_addr(0x%lx) usi_data(0x%x)" ++mcmdq_cmd_cfgi_tect_handler(uint32_t mcmdq_idx, uint32_t tecte_tag) "CMD_CFGI_TECT: mcmdq_idx(%u) tecte_tag(%u)" ++mcmdq_cmd_cfgi_tect_range_handler(uint32_t mcmdq_idx, uint32_t tecte_tag, uint32_t range) "CMD_CFGI_TECT_RANGE: mcmdq_idx(%u) tecte_tag(%u) range(%u)" + mcmdq_cmd_plbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_PLBIx: mcmdq_idx(%u) cmd(%s)" + mcmdq_cmd_tlbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_TLBIx: mcmdq_idx(%u) cmd(%s)" + mcmdq_cmd_create_kvtbl(uint32_t mcmdq_idx, uint32_t dest_eid, uint32_t tecte_tag) "CMD_CREATE_KVTBL: mcmdq_idx(%u) dest_eid(%u) tecte_tag(%u)" +@@ -15,6 +17,10 @@ ummu_eventq_usi_reg_writell(uint64_t addr) "set eventq usi addr 0x%lx" + ummu_glberr_usi_reg_writell(uint64_t addr) "set glb err usi addr 0x%lx" + ummu_mapt_ctx_base_reg_writell(uint64_t addr) "config mapt ctx base 0x%lx" + ummu_glb_int_enable(int type, int virq) "int type(%d) virq(%d)" ++ummu_config_tecte(int valid, int mode) "tecte: valid(%d), st_mode(0x%x)" ++ummu_invalid_single_tecte(uint32_t tecte_tag) "tecte_tag: %u" ++ummu_dev_install_nested_tecte(uint64_t tecte0, uint64_t tecte1) "installed tecte[0]: 0x%lx, tecte[1]: 0x%lx" ++ummu_install_nested_tecte(long long unsigned int tecte0, long long unsigned int tecte1) "installed tecte[0]: 0x%llx, tecte[1]: 0x%llx" + + # ub.c + ub_update_mappings(int i, uint64_t region_size, uint64_t old_addr, uint64_t new_addr) "region[%d], size: 0x%lx, old_addr: 0x%lx, new_addr: 0x%lx" +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index 228fa14b87..a55ab00e96 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -450,6 +450,164 @@ static void mcmdq_cmd_delete_kvtbl(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmd + } + } + ++static gboolean ummu_invalid_tecte(gpointer key, gpointer value, gpointer user_data) ++{ ++ UMMUDevice *ummu_dev = (UMMUDevice *)key; ++ UMMUTransCfg *cfg = (UMMUTransCfg *)value; ++ UMMUTecteRange *range = (UMMUTecteRange *)user_data; ++ ++ if (range->invalid_all || ++ (cfg->tecte_tag >= range->start && cfg->tecte_tag <= range->end)) { ++ qemu_log("ummu start invalidate udev(%s) cached config.\n", ummu_dev->udev->qdev.id); ++ return true; ++ } ++ ++ return false; ++} ++ ++static void ummu_invalid_single_tecte(UMMUState *u, uint32_t tecte_tag) ++{ ++ UMMUTecteRange tecte_range = { .invalid_all = false, }; ++ ++ trace_ummu_invalid_single_tecte(tecte_tag); ++ tecte_range.start = tecte_tag; ++ tecte_range.end = tecte_tag; ++ g_hash_table_foreach_remove(u->configs, ummu_invalid_tecte, &tecte_range); ++} ++ ++static void ummu_uninstall_nested_tecte(gpointer key, gpointer value, gpointer opaque) ++{ ++ UMMUDevice *ummu_dev = (UMMUDevice *)value; ++ ++ ummu_dev_uninstall_nested_tecte(ummu_dev); ++} ++ ++/* V | ST_MODE(.CONFIG) | TCRC_SEL(.STRW) */ ++#define INSTALL_TECTE0_WORD0_MASK (GENMASK(0, 0) | GENMASK(1, 3) | GENMASK(22, 21)) ++#define INSTALL_TECTE0_WORD1_MASK 0 ++/* TCT_MAXNUM(.S1CDMax) | TCT_PTR[31:6](.S1ContextPtr) */ ++#define INSTALL_TECTE1_WORD0_MASK (GENMASK(4, 0) | GENMASK(31, 6)) ++/* TCT_PTR[51:32](.S1ContextPtr) | TCT_FMT(.S1Fmt) | TCT_STALL_EN(.S1STALLD) | ++ * TCT_Ptr_MD0(.S1CIR) | TCT_Ptr_MD1(.S1COR) | TCT_Ptr_MSD(.S1CSH) */ ++#define INSTALL_TECTE1_WORD1_MASK (GENMASK(19, 0) | \ ++ GENMASK(21, 20) | \ ++ GENMASK(24, 24) | \ ++ GENMASK(27, 26) | \ ++ GENMASK(29, 28) | \ ++ GENMASK(31, 30)) ++ ++static void ummu_install_nested_tecte(gpointer key, gpointer value, gpointer opaque) ++{ ++ UMMUDevice *ummu_dev = (UMMUDevice *)value; ++ TECTE *tecte = (TECTE *)opaque; ++ struct iommu_hwpt_ummu iommu_config = {}; ++ int ret; ++ ++ if (ummu_dev->udev->dev_type != UB_TYPE_DEVICE && ++ ummu_dev->udev->dev_type != UB_TYPE_IDEVICE) { ++ return; ++ } ++ ++ if (!ummu_dev->vdev && ummu_dev->idev && ummu_dev->viommu) { ++ UMMUVdev *vdev = g_new0(UMMUVdev, 1); ++ /* default use eid as virt_id */ ++ vdev->core = iommufd_backend_alloc_vdev(ummu_dev->idev, ummu_dev->viommu->core, ummu_dev->udev->eid); ++ if (!vdev->core) { ++ error_report("failed to allocate a vDEVICE"); ++ g_free(vdev); ++ return; ++ } ++ ummu_dev->vdev = vdev; ++ } ++ ++ iommu_config.tecte[0] = (uint64_t)tecte->word[0] & INSTALL_TECTE0_WORD0_MASK; ++ iommu_config.tecte[0] |= ((uint64_t)tecte->word[1] & INSTALL_TECTE0_WORD1_MASK) << 32; ++ iommu_config.tecte[1] = (uint64_t)tecte->word[2] & INSTALL_TECTE1_WORD0_MASK; ++ iommu_config.tecte[1] |= ((uint64_t)tecte->word[3] & INSTALL_TECTE1_WORD1_MASK) << 32; ++ trace_ummu_install_nested_tecte(iommu_config.tecte[0], iommu_config.tecte[1]); ++ ret = ummu_dev_install_nested_tecte(ummu_dev, IOMMU_HWPT_DATA_UMMU, ++ sizeof(iommu_config), &iommu_config); ++ if (ret && ret != -ENOENT) { ++ error_report("Unable to alloc Stage-1 HW Page Table: %d", ret); ++ } else if (ret == 0) { ++ qemu_log("install nested tecte success.\n"); ++ } ++} ++ ++static int ummu_find_tecte(UMMUState *ummu, uint32_t tecte_tag, TECTE *tecte); ++static void ummu_config_tecte(UMMUState *u, uint32_t tecte_tag) ++{ ++ TECTE tecte; ++ int ret; ++ ++ ret = ummu_find_tecte(u, tecte_tag, &tecte); ++ if (ret) { ++ qemu_log("failed to find tecte\n"); ++ return; ++ } ++ ++ trace_ummu_config_tecte(TECTE_VALID(&tecte), TECTE_ST_MODE(&tecte)); ++ if (!TECTE_VALID(&tecte) || TECTE_ST_MODE(&tecte) != TECTE_ST_MODE_S1) { ++ g_hash_table_foreach(u->ummu_devs, ummu_uninstall_nested_tecte, NULL); ++ return; ++ } ++ ++ g_hash_table_foreach(u->ummu_devs, ummu_install_nested_tecte, &tecte); ++ if (u->tecte_tag_num >= UMMU_TECTE_TAG_MAX_NUM) { ++ qemu_log("unexpect tecte tag num over %u\n", UMMU_TECTE_TAG_MAX_NUM); ++ return; ++ } else { ++ u->tecte_tag_cache[u->tecte_tag_num++] = tecte_tag; ++ } ++} ++ ++static void ummu_invalidate_cache(UMMUState *u, UMMUMcmdqCmd *cmd); ++static void mcmdq_cmd_cfgi_tect_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ uint32_t tecte_tag = CMD_TECTE_TAG(cmd); ++ ++ trace_mcmdq_cmd_cfgi_tect_handler(mcmdq_idx, tecte_tag); ++ ++ ummu_invalid_single_tecte(u, tecte_tag); ++ ummu_config_tecte(u, tecte_tag); ++ ummu_invalidate_cache(u, cmd); ++} ++ ++static void ummu_viommu_invalidate_cache(IOMMUFDViommu *viommu, uint32_t type, UMMUMcmdqCmd *cmd) ++{ ++ int ret; ++ uint32_t tecte_tag = CMD_TECTE_TAG(cmd); ++ uint32_t ncmds = 1; ++ ++ if (!viommu) { ++ return; ++ } ++ ++ ret = iommufd_viommu_invalidate_cache(viommu->iommufd, viommu->viommu_id, ++ type, sizeof(*cmd), &ncmds, cmd); ++ if (ret) { ++ qemu_log("failed to invalidate cache for ummu, tecte_tag = %u, ret = %d\n", tecte_tag, ret); ++ } ++} ++ ++static void ummu_invalidate_cache(UMMUState *u, UMMUMcmdqCmd *cmd) ++{ ++ IOMMUFDViommu *viommu = NULL; ++ UMMUDevice *ummu_dev = NULL; ++ ++ if (!u->viommu) { ++ return; ++ } ++ ++ ummu_dev = QLIST_FIRST(&u->viommu->device_list); ++ if (!ummu_dev || !ummu_dev->vdev) { ++ return; ++ } ++ ++ viommu = u->viommu->core; ++ ummu_viommu_invalidate_cache(viommu, IOMMU_VIOMMU_INVALIDATE_DATA_UMMU, cmd); ++} ++ + static void mcmdq_cmd_plbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) + { + trace_mcmdq_cmd_plbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]); +@@ -527,7 +685,7 @@ static void (*mcmdq_cmd_handlers[])(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcm + [CMD_SYNC] = mcmdq_cmd_sync_handler, + [CMD_STALL_RESUME] = NULL, + [CMD_PREFET_CFG] = mcmdq_cmd_prefet_cfg, +- [CMD_CFGI_TECT] = NULL, ++ [CMD_CFGI_TECT] = mcmdq_cmd_cfgi_tect_handler, + [CMD_CFGI_TECT_RANGE] = NULL, + [CMD_CFGI_TCT] = NULL, + [CMD_CFGI_TCT_ALL] = NULL, +@@ -1410,6 +1568,7 @@ static void ummu_base_realize(DeviceState *dev, Error **errp) + error_setg(errp, "UMMU is not attached to any UB bus!"); + } + ++ u->tecte_tag_num = 0; + if (u->nested) { + memory_region_init(&u->stage2, OBJECT(u), "stage2", UINT64_MAX); + memory_region_init_alias(&u->sysmem, OBJECT(u), +@@ -1479,8 +1638,149 @@ static const TypeInfo ummu_base_info = { + .class_init = ummu_base_class_init, + }; + ++static int ummu_get_tecte(UMMUState *ummu, dma_addr_t addr, TECTE *tecte) ++{ ++ int ret, i; ++ ++ ret = dma_memory_read(&address_space_memory, addr, tecte, sizeof(*tecte), ++ MEMTXATTRS_UNSPECIFIED); ++ if (ret != MEMTX_OK) { ++ qemu_log("Cannot fetch tecte at address=0x%lx\n", addr); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(tecte->word); i++) { ++ le32_to_cpus(&tecte->word[i]); ++ } ++ ++ return 0; ++} ++ ++static int ummu_find_tecte(UMMUState *ummu, uint32_t tecte_tag, TECTE *tecte) ++{ ++ dma_addr_t tect_base_addr = TECT_BASE_ADDR(ummu->tect_base); ++ dma_addr_t tecte_addr; ++ int ret; ++ int i; ++ ++ if (ummu_tect_fmt_2level(ummu)) { ++ int l1_tecte_offset, l2_tecte_offset; ++ uint32_t split; ++ dma_addr_t l1ptr, l2ptr; ++ TECTEDesc l1_tecte_desc; ++ ++ split = ummu_tect_split(ummu); ++ l1_tecte_offset = tecte_tag >> split; ++ l2_tecte_offset = tecte_tag & ((1 << split) - 1); ++ l1ptr = (dma_addr_t)(tect_base_addr + l1_tecte_offset * sizeof(l1_tecte_desc)); ++ ++ ret = dma_memory_read(&address_space_memory, l1ptr, &l1_tecte_desc, ++ sizeof(l1_tecte_desc), MEMTXATTRS_UNSPECIFIED); ++ if (ret != MEMTX_OK) { ++ qemu_log("dma read failed for tecte level1 desc.\n"); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(l1_tecte_desc.word); i++) { ++ le32_to_cpus(&l1_tecte_desc.word[i]); ++ } ++ ++ if (TECT_DESC_V(&l1_tecte_desc) == 0) { ++ qemu_log("tecte desc is invalid\n"); ++ return -EINVAL; ++ } ++ ++ l2ptr = TECT_L2TECTE_PTR(&l1_tecte_desc); ++ tecte_addr = l2ptr + l2_tecte_offset * sizeof(*tecte); ++ } else { ++ qemu_log("liner table process not support\n"); ++ return -EINVAL; ++ } ++ ++ if (ummu_get_tecte(ummu, tecte_addr, tecte)) { ++ qemu_log("failed to get tecte.\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++void ummu_dev_uninstall_nested_tecte(UMMUDevice *ummu_dev) ++{ ++ HostIOMMUDeviceIOMMUFD *idev = ummu_dev->idev; ++ UMMUS1Hwpt *s1_hwpt = ummu_dev->s1_hwpt; ++ uint32_t hwpt_id; ++ ++ if (!s1_hwpt || !ummu_dev->viommu) { ++ return; ++ } ++ ++ hwpt_id = ummu_dev->viommu->s2_hwpt->hwpt_id; ++ if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, NULL)) { ++ error_report("Unable to attach dev to stage-2 HW pagetable"); ++ return; ++ } ++ ++ qemu_log("uninstall s1 hwpt(%u) success\n", s1_hwpt->hwpt_id); ++ iommufd_backend_free_id(idev->iommufd, s1_hwpt->hwpt_id); ++ ummu_dev->s1_hwpt = NULL; ++ g_free(s1_hwpt); ++} ++ ++int ummu_dev_install_nested_tecte(UMMUDevice *ummu_dev, uint32_t data_type, ++ uint32_t data_len, void *data) ++{ ++ UMMUViommu *viommu = ummu_dev->viommu; ++ UMMUS1Hwpt *s1_hwpt = ummu_dev->s1_hwpt; ++ HostIOMMUDeviceIOMMUFD *idev = ummu_dev->idev; ++ uint64_t *tecte = (uint64_t *)data; ++ ++ if (!idev || !viommu) { ++ return -ENOENT; ++ } ++ ++ if (s1_hwpt) { ++ return 0; ++ } ++ ++ s1_hwpt = g_new0(UMMUS1Hwpt, 1); ++ if (!s1_hwpt) { ++ return -ENOMEM; ++ } ++ ++ s1_hwpt->ummu = ummu_dev->ummu; ++ s1_hwpt->viommu = viommu; ++ s1_hwpt->iommufd = idev->iommufd; ++ ++ if (tecte) { ++ trace_ummu_dev_install_nested_tecte(tecte[0], tecte[1]); ++ } ++ ++ if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid, ++ viommu->core->viommu_id, 0, data_type, ++ data_len, data, &s1_hwpt->hwpt_id, NULL, NULL)) { ++ goto free; ++ } ++ ++ if (!host_iommu_device_iommufd_attach_hwpt(idev, s1_hwpt->hwpt_id, NULL)) { ++ goto free_hwpt; ++ } ++ ++ ummu_dev->s1_hwpt = s1_hwpt; ++ ++ return 0; ++free_hwpt: ++ iommufd_backend_free_id(idev->iommufd, s1_hwpt->hwpt_id); ++free: ++ ummu_dev->s1_hwpt = NULL; ++ g_free(s1_hwpt); ++ ++ return -EINVAL; ++} ++ + static void ummu_base_register_types(void) + { + type_register_static(&ummu_base_info); + } ++ + type_init(ummu_base_register_types) +diff --git a/hw/ub/ub_ummu_internal.h b/hw/ub/ub_ummu_internal.h +index 68724e5ce1..5b90ea037e 100644 +--- a/hw/ub/ub_ummu_internal.h ++++ b/hw/ub/ub_ummu_internal.h +@@ -955,4 +955,7 @@ static inline void ummu_plib_usr_va_to_pibi_os_va(MAPTCmd *mapt_cmd, UMMUMcmdqCm + mcmd_cmd->word[4] = deposit32(mcmd_cmd->word[4], 0, 16, tag); + } + ++void ummu_dev_uninstall_nested_tecte(UMMUDevice *ummu_dev); ++int ummu_dev_install_nested_tecte(UMMUDevice *sdev, uint32_t data_type, ++ uint32_t data_len, void *data); + #endif +-- +2.33.0 + diff --git a/ub-ummu-realize-some-ummu-read-write-process.patch b/ub-ummu-realize-some-ummu-read-write-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..de6116caef8ee4ef6d932ed96b623eada4d77950 --- /dev/null +++ b/ub-ummu-realize-some-ummu-read-write-process.patch @@ -0,0 +1,703 @@ +From 0a275e92a75172c0e3afac67552d488c7c669b74 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 11:35:25 +0800 +Subject: [PATCH 3/6] ub ummu: realize some ummu read/write process + +1. realize ummu read/write ops +2. realize mapt cmd process + +Signed-off-by: caojinhuahw +--- + hw/ub/trace-events | 10 + + hw/ub/ub_ummu.c | 650 +++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 660 insertions(+) + +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index d24c754de1..acc25a7052 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -1,3 +1,13 @@ ++# See docs/devel/tracing.rst for syntax documentation. ++ ++# ub_ummu.c ++ummu_mcmdq_reg_writel(uint32_t idx, uint32_t prod, uint32_t cons) "mcmdq process: idx(%u), prod(%u), cons(%u)" ++ummu_mcmdq_base_reg_writell(uint8_t idx, uint64_t base, uint8_t log2size) "idx(%u) base(0x%lx) log2size(0x%x)" ++ummu_eventq_req_writell(uint64_t base, uint8_t log2size) "base(0x%lx) log2size(0x%x)" ++ummu_eventq_usi_reg_writell(uint64_t addr) "set eventq usi addr 0x%lx" ++ummu_glberr_usi_reg_writell(uint64_t addr) "set glb err usi addr 0x%lx" ++ummu_mapt_ctx_base_reg_writell(uint64_t addr) "config mapt ctx base 0x%lx" ++ + # ub.c + ub_update_mappings(int i, uint64_t region_size, uint64_t old_addr, uint64_t new_addr) "region[%d], size: 0x%lx, old_addr: 0x%lx, new_addr: 0x%lx" + ub_update_mappings_add(uint64_t new_addr) "commit region addr to 0x%lx" +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index 19829f661a..75ac7659b5 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -47,13 +47,663 @@ UMMUState *ummu_find_by_bus_num(uint8_t bus_num) + return NULL; + } + ++static void ummu_cr0_process_task(UMMUState *u) ++{ ++ u->ctrl0_ack = u->ctrl[0]; ++} ++ ++static uint64_t ummu_mcmdq_reg_readl(UMMUState *u, hwaddr offset) ++{ ++ uint8_t mcmdq_idx; ++ uint64_t val = UINT64_MAX; ++ ++ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK); ++ if (mcmdq_idx >= UMMU_MAX_MCMDQS) { ++ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset); ++ return val; ++ } ++ ++ switch (offset & MCMDQ_BASE_ADDR_MASK) { ++ case MCMDQ_PROD_BASE_ADDR: ++ val = u->mcmdqs[mcmdq_idx].queue.prod; ++ break; ++ case MCMDQ_CONS_BASE_ADDR: ++ val = u->mcmdqs[mcmdq_idx].queue.cons; ++ break; ++ default: ++ qemu_log("ummu cannot handle 32-bit mcmdq reg read access at 0x%lx\n", offset); ++ break; ++ } ++ ++ return val; ++} ++ ++static int ummu_mapt_get_cmdq_base(UMMUState *u, dma_addr_t base_addr, uint32_t qid, MAPTCmdqBase *base) ++{ ++ int ret, i; ++ dma_addr_t addr = base_addr + qid * MAPT_CMDQ_CTXT_BASE_BYTES; ++ ++ ret = dma_memory_read(&address_space_memory, addr, base, sizeof(*base), ++ MEMTXATTRS_UNSPECIFIED); ++ if (ret != MEMTX_OK) { ++ qemu_log("Cannot fetch mapt cmdq ctx at address=0x%lx\n", addr); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(base->word); i++) { ++ le32_to_cpus(&base->word[i]); ++ } ++ ++ return 0; ++} ++ ++static int ummu_mapt_update_cmdq_base(UMMUState *u, dma_addr_t base_addr, uint32_t qid, MAPTCmdqBase *base) ++{ ++ int i; ++ dma_addr_t addr = base_addr + qid * MAPT_CMDQ_CTXT_BASE_BYTES; ++ ++ for (i = 0; i < ARRAY_SIZE(base->word); i++, addr += sizeof(uint32_t)) { ++ uint32_t tmp = cpu_to_le32(base->word[i]); ++ if (dma_memory_write(&address_space_memory, addr, &tmp, ++ sizeof(uint32_t), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("dma failed to write to addr 0x%lx\n", addr); ++ return -1; ++ } ++ } ++ ++ return 0; ++} ++ ++static uint64_t ummu_mapt_ctrlr_page_read_process(UMMUState *u, hwaddr offset) ++{ ++ MAPTCmdqBase base; ++ uint32_t qid = ummu_mapt_cmdq_get_qid(u, offset); ++ dma_addr_t addr = MAPT_CMDQ_CTXT_BASE_ADDR(u->mapt_cmdq_ctxt_base); ++ int ret; ++ uint64_t val = UINT64_MAX; ++ ++ if (!addr) { ++ /* mapt ctrlr page not init, return default val 0 */ ++ return 0; ++ } ++ ++ ret = ummu_mapt_get_cmdq_base(u, addr, qid, &base); ++ if (ret) { ++ qemu_log("failed to get mapt cmdq base.\n"); ++ return val; ++ } ++ ++ switch (offset & UCMDQ_UCPLQ_CI_PI_MASK) { ++ case UCMDQ_PI: ++ val = ummu_mapt_cmdq_base_get_ucmdq_pi(&base); ++ break; ++ case UCMDQ_CI: ++ val = ummu_mapt_cmdq_base_get_ucmdq_ci(&base); ++ break; ++ case UCPLQ_PI: ++ val = ummu_mapt_cmdq_base_get_ucplq_pi(&base); ++ break; ++ case UCPLQ_CI: ++ val = ummu_mapt_cmdq_base_get_ucplq_ci(&base); ++ break; ++ default: ++ qemu_log("cannot process addr(0x%lx) mpat ctrlr page read.\n", offset); ++ return val; ++ } ++ ++ return val; ++} ++ ++static uint64_t ummu_reg_readw(UMMUState *u, hwaddr offset) ++{ ++ uint64_t val = UINT64_MAX; ++ ++ switch (offset) { ++ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG: ++ val = ummu_mapt_ctrlr_page_read_process(u, offset); ++ break; ++ default: ++ qemu_log("ummu cannot handle 16-bit read access at: 0x%lx\n", offset); ++ break; ++ } ++ ++ return val; ++} ++ ++static uint64_t ummu_reg_readl(UMMUState *u, hwaddr offset) ++{ ++ uint64_t val = UINT64_MAX; ++ ++ switch (offset) { ++ case A_CAP0...A_CAP6: ++ val = u->cap[(offset - A_CAP0) / 4]; ++ break; ++ case A_CTRL0: ++ val = u->ctrl[0]; ++ break; ++ case A_CTRL0_ACK: ++ val = u->ctrl0_ack; ++ break; ++ case A_CTRL1: ++ val = u->ctrl[1]; ++ break; ++ case A_CTRL2: ++ val = u->ctrl[2]; ++ break; ++ case A_CTRL3: ++ val = u->ctrl[3]; ++ break; ++ case A_TECT_BASE_CFG: ++ val = u->tect_base_cfg; ++ break; ++ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI: ++ val = ummu_mcmdq_reg_readl(u, offset); ++ break; ++ case A_EVENT_QUE_PI: ++ val = u->eventq.queue.prod; ++ break; ++ case A_EVENT_QUE_CI: ++ val = u->eventq.queue.cons; ++ break; ++ case A_EVENT_QUE_USI_DATA: ++ val = u->eventq.usi_data; ++ break; ++ case A_EVENT_QUE_USI_ATTR: ++ val = u->eventq.usi_attr; ++ break; ++ case A_GLB_INT_EN: ++ val = 0; ++ /* glb err interrupt bit enabled int bit 0 */ ++ if (ummu_glb_err_int_en(u)) { ++ val |= 0x1; ++ } ++ ++ /* event que interrupt bit enabled in bit 1 */ ++ if (ummu_event_que_int_en(u)) { ++ val |= (1 << 1); ++ } ++ break; ++ case A_GLB_ERR: ++ val = u->glb_err.glb_err; ++ break; ++ case A_GLB_ERR_RESP: ++ val = u->glb_err.glb_err_resp; ++ break; ++ case A_GLB_ERR_INT_USI_DATA: ++ val = u->glb_err.usi_data; ++ break; ++ case A_GLB_ERR_INT_USI_ATTR: ++ val = u->glb_err.usi_attr; ++ break; ++ case A_RELEASE_UM_QUEUE_ID: ++ val = u->release_um_queue_id; ++ break; ++ case A_RELEASE_UM_QUEUE: ++ val = u->release_um_queue; ++ break; ++ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG: ++ val = ummu_mapt_ctrlr_page_read_process(u, offset); ++ break; ++ case A_UMCMD_PAGE_SEL: ++ val = u->ucmdq_page_sel; ++ break; ++ case A_UMMU_USER_CONFIG0...A_UMMU_USER_CONFIG11: ++ case A_UMMU_MEM_USI_DATA: ++ case A_UMMU_MEM_USI_ATTR: ++ case A_UMMU_INT_MASK: ++ case A_UMMU_DSTEID_CAM_TABLE_BASE_CFG: ++ /* do nothing, reg return val 0 */ ++ val = 0; ++ break; ++ default: ++ qemu_log("ummu cannot handle 32-bit read access at 0x%lx\n", offset); ++ break; ++ } ++ ++ return val; ++} ++ ++static uint64_t ummu_mcmdq_reg_readll(UMMUState *u, hwaddr offset) ++{ ++ uint8_t mcmdq_idx; ++ uint64_t val = UINT64_MAX; ++ ++ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK); ++ if (mcmdq_idx >= UMMU_MAX_MCMDQS) { ++ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset); ++ return val; ++ } ++ ++ switch (offset & MCMDQ_BASE_ADDR_MASK) { ++ case A_MCMD_QUE_BASE: ++ val = u->mcmdqs[mcmdq_idx].queue.base; ++ break; ++ default: ++ qemu_log("ummu cannot handle 64-bit mcmdq reg read access at 0x%lx\n", offset); ++ break; ++ } ++ ++ return val; ++} ++ ++static uint64_t ummu_reg_readll(UMMUState *u, hwaddr offset) ++{ ++ uint64_t val = UINT64_MAX; ++ ++ switch (offset) { ++ case A_TECT_BASE0: ++ val = u->tect_base; ++ break; ++ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI: ++ val = ummu_mcmdq_reg_readll(u, offset); ++ break; ++ case A_EVENT_QUE_BASE0: ++ val = u->eventq.queue.base; ++ break; ++ case A_EVENT_QUE_USI_ADDR0: ++ val = u->eventq.usi_addr; ++ break; ++ case A_GLB_ERR_INT_USI_ADDR0: ++ val = u->glb_err.usi_addr; ++ break; ++ case A_MAPT_CMDQ_CTXT_BADDR0: ++ val = u->mapt_cmdq_ctxt_base; ++ break; ++ case A_UMMU_MEM_USI_ADDR0: ++ /* do nothing, reg return val 0 */ ++ val = 0; ++ break; ++ default: ++ qemu_log("ummu cannot handle 64-bit read access at 0x%lx\n", offset); ++ break; ++ } ++ ++ return val; ++} ++ + static uint64_t ummu_reg_read(void *opaque, hwaddr offset, unsigned size) + { ++ UMMUState *u = opaque; ++ uint64_t val = UINT64_MAX; ++ ++ switch (size) { ++ case 2: ++ val = ummu_reg_readw(u, offset); ++ break; ++ case 4: ++ val = ummu_reg_readl(u, offset); ++ break; ++ case 8: ++ val = ummu_reg_readll(u, offset); ++ break; ++ default: ++ break; ++ } ++ ++ return val; ++} ++ ++static void mcmdq_process_task(UMMUState *u, uint8_t mcmdq_idx) ++{ ++} ++ ++static void ummu_mcmdq_reg_writel(UMMUState *u, hwaddr offset, uint64_t data) ++{ ++ uint8_t mcmdq_idx; ++ UMMUMcmdQueue *q = NULL; ++ ++ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK); ++ if (mcmdq_idx >= UMMU_MAX_MCMDQS) { ++ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset); ++ return; ++ } ++ ++ switch (offset & MCMDQ_BASE_ADDR_MASK) { ++ case MCMDQ_PROD_BASE_ADDR: ++ update_reg32_by_wmask(&u->mcmdqs[mcmdq_idx].queue.prod, data, UMMU_MCMDQ_PI_WMASK); ++ mcmdq_process_task(u, mcmdq_idx); ++ break; ++ case MCMDQ_CONS_BASE_ADDR: ++ update_reg32_by_wmask(&u->mcmdqs[mcmdq_idx].queue.cons, data, UMMU_MCMDQ_CI_WMASK); ++ break; ++ default: ++ qemu_log("ummu cannot handle 32-bit mcmdq reg write access at 0x%lx\n", offset); ++ break; ++ } ++ ++ q = &u->mcmdqs[mcmdq_idx]; ++ trace_ummu_mcmdq_reg_writel(mcmdq_idx, MCMD_QUE_WD_IDX(&q->queue), MCMD_QUE_RD_IDX(&q->queue)); ++} ++ ++static void ummu_glb_int_en_process(UMMUState *u, uint64_t data) ++{ ++} ++ ++static MemTxResult ummu_mapt_cmdq_fetch_cmd(MAPTCmdqBase *base, MAPTCmd *cmd) ++{ ++ dma_addr_t base_addr = MAPT_UCMDQ_BASE_ADDR(base); ++ dma_addr_t addr = base_addr + MAPT_UCMDQ_CI(base) * sizeof(*cmd); ++ int ret, i; ++ ++ ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(*cmd), ++ MEMTXATTRS_UNSPECIFIED); ++ if (ret != MEMTX_OK) { ++ qemu_log("addr 0x%lx failed to fectch mapt ucmdq cmd.\n", addr); ++ return ret; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(cmd->word); i++) { ++ le32_to_cpus(&cmd->word[i]); ++ } ++ ++ return ret; ++} ++ ++static void ummu_mapt_cplq_add_entry(MAPTCmdqBase *base, MAPTCmdCpl *cpl) ++{ ++ dma_addr_t base_addr = MAPT_UCPLQ_BASE_ADDR(base); ++ dma_addr_t addr = base_addr + MAPT_UCPLQ_PI(base) * sizeof(*cpl); ++ uint32_t tmp = cpu_to_le32(*(uint32_t *)cpl); ++ ++ if (dma_memory_write(&address_space_memory, addr, &tmp, ++ sizeof(tmp), MEMTXATTRS_UNSPECIFIED)) { ++ qemu_log("dma failed to wirte cpl entry to addr 0x%lx\n", addr); ++ } ++} ++ ++static void ummu_process_mapt_cmd(UMMUState *u, MAPTCmdqBase *base, MAPTCmd *cmd, uint32_t ci) ++{ ++ uint32_t type = MAPT_UCMD_TYPE(cmd); ++ MAPTCmdCpl cpl; ++ uint16_t tecte_tag; ++ uint32_t tid; ++ ++ /* default set cpl staus invalid */ ++ ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_INVALID, 0); ++ tecte_tag = ummu_mapt_cmdq_base_get_tecte_tag(base); ++ tid = ummu_mapt_cmdq_base_get_token_id(base); ++ qemu_log("tid: %u, tecte_tag: %u\n", tid, tecte_tag); ++ switch (type) { ++ case MAPT_UCMD_TYPE_PSYNC: ++ qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PSYNC.\n"); ++ ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_PSYNC_SUCCESS, ci); ++ break; ++ case MAPT_UCMD_TYPE_PLBI_USR_ALL: ++ qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PLBI_USR_ALL.\n"); ++ break; ++ case MAPT_UCMD_TYPE_PLBI_USR_VA: ++ qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PLBI_USR_VA.\n"); ++ break; ++ default: ++ qemu_log("unknown mapt cmd type: 0x%x\n", type); ++ ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_TYPE_ERROR, ci); ++ break; ++ } ++ ++ if (cpl.cpl_status == MAPT_UCPL_STATUS_INVALID) { ++ return; ++ } ++ ++ if (ummu_mapt_ucplq_full(base)) { ++ qemu_log("mapt ucplq full, failed to add cpl entry.\n"); ++ return; ++ } ++ ummu_mapt_cplq_add_entry(base, &cpl); ++ ummu_mapt_ucqlq_prod_incr(base); ++ qemu_log("mapt cplq add entry success, cplpi: %u, cplci: %u.\n", ++ MAPT_UCPLQ_PI(base), MAPT_UCPLQ_CI(base)); ++} ++ ++static void ummu_process_mapt_cmdq(UMMUState *u, MAPTCmdqBase *base) ++{ ++ MAPTCmd cmd; ++ int ret; ++ ++ while (!ummu_mapt_ucmdq_empty(base)) { ++ ret = ummu_mapt_cmdq_fetch_cmd(base, &cmd); ++ if (ret) { ++ qemu_log("failed to fetch matp cmdq cmd.\n"); ++ return; ++ } ++ ummu_process_mapt_cmd(u, base, &cmd, MAPT_UCMDQ_CI(base)); ++ ummu_mapt_ucmdq_cons_incr(base); ++ } ++ qemu_log("after cmdq process, log2size: %u, cmdpi: %u, cmdci: %u, cplpi: %u, cplci: %u\n", ++ MAPT_UCMDQ_LOG2SIZE(base), MAPT_UCMDQ_PI(base), MAPT_UCMDQ_CI(base), ++ MAPT_UCPLQ_PI(base), MAPT_UCPLQ_CI(base)); ++} ++ ++static void ummu_mapt_ctrlr_page_write_process(UMMUState *u, hwaddr offset, uint64_t data) ++{ ++ MAPTCmdqBase base; ++ uint32_t qid = ummu_mapt_cmdq_get_qid(u, offset); ++ dma_addr_t addr = MAPT_CMDQ_CTXT_BASE_ADDR(u->mapt_cmdq_ctxt_base); ++ int ret; ++ ++ qemu_log("qid: %u, mapt_ctxt_base: 0x%lx\n", qid, addr); ++ ret = ummu_mapt_get_cmdq_base(u, addr, qid, &base); ++ if (ret) { ++ qemu_log("failed to get mapt cmdq base.\n"); ++ return; ++ } ++ ++ switch (offset & UCMDQ_UCPLQ_CI_PI_MASK) { ++ case UCMDQ_PI: ++ ummu_mapt_cmdq_base_update_ucmdq_pi(&base, (uint16_t)data); ++ ummu_process_mapt_cmdq(u, &base); ++ break; ++ case UCMDQ_CI: ++ ummu_mapt_cmdq_base_update_ucmdq_ci(&base, (uint16_t)data); ++ break; ++ case UCPLQ_PI: ++ ummu_mapt_cmdq_base_update_ucplq_pi(&base, (uint16_t)data); ++ break; ++ case UCPLQ_CI: ++ ummu_mapt_cmdq_base_update_ucplq_ci(&base, (uint16_t)data); ++ break; ++ default: ++ qemu_log("cannot process addr(0x%lx) mpat ctrlr page write.\n", offset); ++ return; ++ } ++ ++ ret = ummu_mapt_update_cmdq_base(u, addr, qid, &base); ++ if (ret) { ++ qemu_log("failed to update mapt cmdq ctx.\n"); ++ return; ++ } ++} ++ ++static void ummu_reg_writew(UMMUState *u, hwaddr offset, uint64_t data) ++{ ++ switch (offset) { ++ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG: ++ ummu_mapt_ctrlr_page_write_process(u, offset, data); ++ break; ++ default: ++ qemu_log("ummu cannot handle 16-bit write access at: 0x%lx\n", offset); ++ break; ++ } ++} ++ ++static int ummu_mapt_process_release_um_queue(UMMUState *u) ++{ ++ MAPTCmdqBase base; ++ uint32_t qid = u->release_um_queue_id; ++ dma_addr_t addr = MAPT_CMDQ_CTXT_BASE_ADDR(u->mapt_cmdq_ctxt_base); ++ ++ memset(&base, 0, sizeof(base)); ++ if (ummu_mapt_update_cmdq_base(u, addr, qid, &base)) { ++ qemu_log("failed to release um queue(qid: %u)\n", qid); ++ return -1; ++ } ++ ++ qemu_log("release um queue(qid: %u) success.\n", qid); + return 0; + } + ++static void ummu_reg_writel(UMMUState *u, hwaddr offset, uint64_t data) ++{ ++ switch (offset) { ++ case A_CTRL0: ++ update_reg32_by_wmask(&u->ctrl[0], data, UMMU_CTRL0_WMASK); ++ ummu_cr0_process_task(u); ++ break; ++ case A_CTRL1: ++ update_reg32_by_wmask(&u->ctrl[1], data, UMMU_CTRL1_WMASK); ++ break; ++ case A_CTRL2: ++ update_reg32_by_wmask(&u->ctrl[2], data, UMMU_CTRL2_WMASK); ++ break; ++ case A_CTRL3: ++ update_reg32_by_wmask(&u->ctrl[3], data, UMMU_CTRL3_WMASK); ++ break; ++ case A_TECT_BASE_CFG: ++ update_reg32_by_wmask(&u->tect_base_cfg, data, UMMU_TECT_BASE_CFG_WMASK); ++ break; ++ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI: ++ ummu_mcmdq_reg_writel(u, offset, data); ++ break; ++ case A_EVENT_QUE_PI: ++ update_reg32_by_wmask(&u->eventq.queue.prod, data, UMMU_EVENTQ_PI_WMASK); ++ break; ++ case A_EVENT_QUE_CI: ++ update_reg32_by_wmask(&u->eventq.queue.cons, data, UMMU_EVENTQ_CI_WMASK); ++ break; ++ case A_EVENT_QUE_USI_DATA: ++ update_reg32_by_wmask(&u->eventq.usi_data, data, UMMU_EVENT_QUE_USI_DATA_WMASK); ++ break; ++ case A_EVENT_QUE_USI_ATTR: ++ update_reg32_by_wmask(&u->eventq.usi_attr, data, UMMU_EVENTQ_USI_ATTR_WMASK); ++ break; ++ case A_GLB_ERR_INT_USI_DATA: ++ update_reg32_by_wmask(&u->glb_err.usi_data, data, UMMU_GLB_ERR_INT_USI_DATA_WMASK); ++ break; ++ case A_GLB_ERR_INT_USI_ATTR: ++ update_reg32_by_wmask(&u->glb_err.usi_attr, data, UMMU_GLB_ERR_INT_USI_ATTR_WMASK); ++ break; ++ case A_GLB_INT_EN: ++ ummu_glb_int_en_process(u, data); ++ break; ++ case A_GLB_ERR_RESP: ++ update_reg32_by_wmask(&u->glb_err.glb_err_resp, data, UMMU_GLB_ERR_RESP_WMASK); ++ break; ++ case A_RELEASE_UM_QUEUE: ++ /* release_um_queue reg set 1 to release um_queue */ ++ if ((data & RELEASE_UM_QUEUE_WMASK) != 1) { ++ break; ++ } ++ if (ummu_mapt_process_release_um_queue(u)) { ++ u->release_um_queue = 1; ++ break; ++ } ++ /* release success, set release_um_queue reg to 0, means release success */ ++ u->release_um_queue = 0; ++ break; ++ case A_RELEASE_UM_QUEUE_ID: ++ update_reg32_by_wmask(&u->release_um_queue_id, data, RELEASE_UM_QUEUE_ID_WMASK); ++ break; ++ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG: ++ ummu_mapt_ctrlr_page_write_process(u, offset, data); ++ break; ++ case A_UMCMD_PAGE_SEL: ++ qemu_log("ucmdq set page sel to %s\n", ++ data == MAPT_CMDQ_CTRLR_PAGE_SIZE_4K ? "4K" : "64K"); ++ update_reg32_by_wmask(&u->ucmdq_page_sel, data, UMCMD_PAGE_SEL_WMASK); ++ break; ++ case A_DSTEID_KV_TABLE_BASE_CFG: ++ case A_UMMU_DSTEID_KV_TABLE_HASH_CFG0: ++ case A_UMMU_DSTEID_KV_TABLE_HASH_CFG1: ++ case A_UMMU_USER_CONFIG0...A_UMMU_USER_CONFIG11: ++ case A_UMMU_MEM_USI_DATA: ++ case A_UMMU_MEM_USI_ATTR: ++ case A_UMMU_INT_MASK: ++ case A_UMMU_DSTEID_CAM_TABLE_BASE_CFG: ++ /* do nothing */ ++ break; ++ default: ++ qemu_log("ummu cannot handle 32-bit write access at 0x%lx\n", offset); ++ break; ++ } ++} ++ ++static void ummu_mcmdq_reg_writell(UMMUState *u, hwaddr offset, uint64_t data) ++{ ++ uint8_t mcmdq_idx; ++ ++ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK); ++ if (mcmdq_idx >= UMMU_MAX_MCMDQS) { ++ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset); ++ return; ++ } ++ ++ switch (offset & MCMDQ_BASE_ADDR_MASK) { ++ case A_MCMD_QUE_BASE: ++ update_reg64_by_wmask(&u->mcmdqs[mcmdq_idx].queue.base, data, UMMU_MCMDQ_BASE_WMASK); ++ u->mcmdqs[mcmdq_idx].queue.log2size = MCMD_QUE_LOG2SIZE(data); ++ trace_ummu_mcmdq_base_reg_writell(mcmdq_idx, u->mcmdqs[mcmdq_idx].queue.base, ++ u->mcmdqs[mcmdq_idx].queue.log2size); ++ break; ++ default: ++ qemu_log("ummu cannot handle 64-bit mcmdq reg write access at 0x%lx\n", offset); ++ break; ++ } ++} ++ ++static void ummu_reg_writell(UMMUState *u, hwaddr offset, uint64_t data) ++{ ++ switch (offset) { ++ case A_TECT_BASE0: ++ update_reg64_by_wmask(&u->tect_base, data, UMMU_TECT_BASE_WMASK); ++ break; ++ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI: ++ ummu_mcmdq_reg_writell(u, offset, data); ++ break; ++ case A_EVENT_QUE_BASE0: ++ update_reg64_by_wmask(&u->eventq.queue.base, data, UMMU_EVENTQ_BASE_WMASK); ++ u->eventq.queue.log2size = EVENT_QUE_LOG2SIZE(data); ++ trace_ummu_eventq_req_writell(u->eventq.queue.base, u->eventq.queue.log2size); ++ break; ++ case A_EVENT_QUE_USI_ADDR0: ++ update_reg64_by_wmask(&u->eventq.usi_addr, data, UMMU_EVENTQ_USI_ADDR_WMASK); ++ trace_ummu_eventq_usi_reg_writell(data); ++ break; ++ case A_GLB_ERR_INT_USI_ADDR0: ++ update_reg64_by_wmask(&u->glb_err.usi_addr, data, UMMU_GLB_ERR_INT_USI_ADDR_WMASK); ++ trace_ummu_glberr_usi_reg_writell(data); ++ break; ++ case A_MAPT_CMDQ_CTXT_BADDR0: ++ update_reg64_by_wmask(&u->mapt_cmdq_ctxt_base, data, MAPT_CMDQ_CTXT_BADDR_WMASK); ++ trace_ummu_mapt_ctx_base_reg_writell(u->mapt_cmdq_ctxt_base); ++ break; ++ case A_DSTEID_KV_TABLE_BASE0: ++ case A_UMMU_DSTEID_CAM_TABLE_BASE0: ++ case A_UMMU_MEM_USI_ADDR0: ++ /* do nothing */ ++ break; ++ default: ++ qemu_log("ummu cannot handle 64-bit write access at 0x%lx\n", offset); ++ break; ++ } ++} ++ + static void ummu_reg_write(void *opaque, hwaddr offset, uint64_t data, unsigned size) + { ++ UMMUState *u = opaque; ++ ++ switch (size) { ++ case 2: ++ ummu_reg_writew(u, offset, data); ++ break; ++ case 4: ++ ummu_reg_writel(u, offset, data); ++ break; ++ case 8: ++ ummu_reg_writell(u, offset, data); ++ break; ++ default: ++ qemu_log("cann't process ummu reg write for size: %u\n", size); ++ break; ++ } + } + + static const MemoryRegionOps ummu_reg_ops = { +-- +2.33.0 + diff --git a/ub-ummu-supprot-create-kvtbl-and-del-kvtbl-mcmdq-pro.patch b/ub-ummu-supprot-create-kvtbl-and-del-kvtbl-mcmdq-pro.patch new file mode 100644 index 0000000000000000000000000000000000000000..111762b4f65083d446c4d67cf145f1c60b7ba0d6 --- /dev/null +++ b/ub-ummu-supprot-create-kvtbl-and-del-kvtbl-mcmdq-pro.patch @@ -0,0 +1,128 @@ +From 8341002ceb47075797719986bab3e2517aee9779 Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 16:32:56 +0800 +Subject: [PATCH 5/6] ub: ummu supprot create kvtbl and del kvtbl mcmdq process + +1. realize mcmdq kvtbl create cmd +2. realize mcmdq kvtbl delete cmd + +Signed-off-by: caojinhuahw +--- + hw/ub/trace-events | 2 ++ + hw/ub/ub_ummu.c | 61 ++++++++++++++++++++++++++++++++++++++++++++-- + 2 files changed, 61 insertions(+), 2 deletions(-) + +diff --git a/hw/ub/trace-events b/hw/ub/trace-events +index bcecd2ad67..ac55f5e406 100644 +--- a/hw/ub/trace-events ++++ b/hw/ub/trace-events +@@ -5,6 +5,8 @@ ummu_mcmdq_reg_writel(uint32_t idx, uint32_t prod, uint32_t cons) "mcmdq process + mcmdq_process_task(uint32_t mcmdq_idx, const char *cmd) "mcmdq_idx: %u, cmd: %s" + mcmdq_cmd_plbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_PLBIx: mcmdq_idx(%u) cmd(%s)" + mcmdq_cmd_tlbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_TLBIx: mcmdq_idx(%u) cmd(%s)" ++mcmdq_cmd_create_kvtbl(uint32_t mcmdq_idx, uint32_t dest_eid, uint32_t tecte_tag) "CMD_CREATE_KVTBL: mcmdq_idx(%u) dest_eid(%u) tecte_tag(%u)" ++mcmdq_cmd_delete_kvtbl(uint32_t mcmdq_idx, uint32_t dest_eid) "CMD_DELETE_KVTBL: mcmdq_idx(%u) dest_eid(%u)" + mcmdq_cmd_null(uint32_t mcmdq_idx, uint64_t addr, void *hva, uint64_t size, uint64_t rb_size) "CMD_NULL: mcmdq_idx(%u) addr(0x%lx) hva(%p) size(0x%lx) rb_size(0x%lx)" + ummu_mcmdq_base_reg_writell(uint8_t idx, uint64_t base, uint8_t log2size) "idx(%u) base(0x%lx) log2size(0x%x)" + ummu_eventq_req_writell(uint64_t base, uint8_t log2size) "base(0x%lx) log2size(0x%x)" +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index 87a6bfb075..d610c7d9e4 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -379,6 +379,55 @@ static uint64_t ummu_reg_read(void *opaque, hwaddr offset, unsigned size) + return val; + } + ++static void mcmdq_cmd_create_kvtbl(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ UMMUKVTblEntry *entry = NULL; ++ uint32_t dst_eid = CMD_CREATE_KVTBL_DEST_EID(cmd); ++ uint32_t tecte_tag = CMD_CREATE_KVTBL_TECTE_TAG(cmd); ++ ++ trace_mcmdq_cmd_create_kvtbl(mcmdq_idx, dst_eid, tecte_tag); ++ ++ QLIST_FOREACH(entry, &u->kvtbl, list) { ++ if (entry->dst_eid == dst_eid) { ++ qemu_log("update kvtlb dst_eid(0x%x) tecte_tag from 0x%x to 0x%x\n", ++ dst_eid, entry->tecte_tag, tecte_tag); ++ entry->tecte_tag = tecte_tag; ++ return; ++ } ++ } ++ ++ entry = g_malloc(sizeof(UMMUKVTblEntry)); ++ if (!entry) { ++ qemu_log("failed to malloc for kvtbl entry for dst_eid(0x%x)\n", dst_eid); ++ return; ++ } ++ ++ entry->dst_eid = dst_eid; ++ entry->tecte_tag = tecte_tag; ++ QLIST_INSERT_HEAD(&u->kvtbl, entry, list); ++} ++ ++static void mcmdq_cmd_delete_kvtbl(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) ++{ ++ UMMUKVTblEntry *entry = NULL; ++ uint32_t dst_eid = CMD_DELETE_KVTBL_DEST_EID(cmd); ++ ++ trace_mcmdq_cmd_delete_kvtbl(mcmdq_idx, dst_eid); ++ ++ QLIST_FOREACH(entry, &u->kvtbl, list) { ++ if (entry->dst_eid == dst_eid) { ++ break; ++ } ++ } ++ ++ if (entry) { ++ QLIST_REMOVE(entry, list); ++ g_free(entry); ++ } else { ++ qemu_log("cannot find dst_eid(0x%x) entry in kvtbl.\n", dst_eid); ++ } ++} ++ + static void mcmdq_cmd_plbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) + { + trace_mcmdq_cmd_plbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]); +@@ -476,8 +525,8 @@ static void (*mcmdq_cmd_handlers[])(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcm + [CMD_TLBI_S2_IPA] = mcmdq_cmd_tlbi_x_process, + [CMD_TLBI_NS_OS_ALL] = mcmdq_cmd_tlbi_x_process, + [CMD_RESUME] = NULL, +- [CMD_CREATE_KVTBL] = NULL, +- [CMD_DELETE_KVTBL] = NULL, ++ [CMD_CREATE_KVTBL] = mcmdq_cmd_create_kvtbl, ++ [CMD_DELETE_KVTBL] = mcmdq_cmd_delete_kvtbl, + [CMD_NULL] = mcmdq_cmd_null, + [CMD_TLBI_OS_ALL_U] = NULL, + [CMD_TLBI_OS_ASID_U] = NULL, +@@ -1068,18 +1117,26 @@ static void ummu_base_realize(DeviceState *dev, Error **errp) + sysbus_init_mmio(sysdev, &u->ummu_reg_mem); + ummu_registers_init(u); + ub_save_ummu_list(u); ++ ++ QLIST_INIT(&u->kvtbl); + } + + static void ummu_base_unrealize(DeviceState *dev) + { + UMMUState *u = UB_UMMU(dev); + SysBusDevice *sysdev = SYS_BUS_DEVICE(dev); ++ UMMUKVTblEntry *entry = NULL; ++ UMMUKVTblEntry *next_entry = NULL; + + ub_remove_ummu_list(u); + if (sysdev->parent_obj.id) { + g_free(sysdev->parent_obj.id); + } + ++ QLIST_FOREACH_SAFE(entry, &u->kvtbl, list, next_entry) { ++ QLIST_REMOVE(entry, list); ++ g_free(entry); ++ } + } + + static void ummu_base_reset(DeviceState *dev) +-- +2.33.0 + diff --git a/ub-use-ummu-and-init-ummu-registers.patch b/ub-use-ummu-and-init-ummu-registers.patch new file mode 100644 index 0000000000000000000000000000000000000000..fa89c049966532aaa0426dc46f84cf6fcfaaad10 --- /dev/null +++ b/ub-use-ummu-and-init-ummu-registers.patch @@ -0,0 +1,293 @@ +From 5492631d181a00760ecf1e6f98fa3b28a8d5e72e Mon Sep 17 00:00:00 2001 +From: caojinhuahw +Date: Thu, 13 Nov 2025 10:40:33 +0800 +Subject: [PATCH 2/6] ub: use ummu and init ummu registers + +1. init ummu registers +2. create ummu device when machine init + +Signed-off-by: caojinhuahw +--- + hw/arm/virt.c | 15 ++++ + hw/ub/ub_ubc.c | 4 + + hw/ub/ub_ummu.c | 189 ++++++++++++++++++++++++++++++++++++++++ + include/hw/ub/ub_ummu.h | 2 + + 4 files changed, 210 insertions(+) + +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index de914a9136..2c8c8df07c 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -1755,8 +1755,10 @@ static void create_virtio_iommu_dt_bindings(VirtMachineState *vms) + static void create_ub(VirtMachineState *vms) + { + DeviceState *ubc; ++ DeviceState *ummu; + MemoryRegion *mmio_reg; + MemoryRegion *mmio_alias; ++ BusControllerState *ubc_state; + + if (ub_cfg_addr_map_table_init() < 0) { + qemu_log("failed to init ub cfg addr map table\n"); +@@ -1795,6 +1797,19 @@ static void create_ub(VirtMachineState *vms) + vms->memmap[VIRT_UB_IDEV_ERS].size); + memory_region_add_subregion(get_system_memory(), + vms->memmap[VIRT_UB_IDEV_ERS].base, mmio_alias); ++ if (vms->ummu) { ++ ummu = qdev_new(TYPE_UB_UMMU); ++ ubc_state = BUS_CONTROLLER(ubc); ++ object_property_set_link(OBJECT(ummu), "primary-bus", OBJECT(ubc_state->bus), &error_abort); ++ /* default set ummu nestd */ ++ object_property_set_bool(OBJECT(ummu), "nested", true, &error_abort); ++ qdev_prop_set_uint64(ummu, "ub-ummu-reg-size", UMMU_REG_SIZE); ++ sysbus_realize_and_unref(SYS_BUS_DEVICE(ummu), &error_fatal); ++ sysbus_mmio_map(SYS_BUS_DEVICE(ummu), 0, ++ vms->memmap[VIRT_UBC_BASE_REG].base + UMMU_REG_OFFSET); ++ } else { ++ qemu_log("ummu disabled.\n"); ++ } + } + #endif // CONFIG_UB + static void create_pcie(VirtMachineState *vms) +diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c +index 6d2441f380..0d5a31a22a 100644 +--- a/hw/ub/ub_ubc.c ++++ b/hw/ub/ub_ubc.c +@@ -26,6 +26,7 @@ + #include "hw/ub/ub.h" + #include "hw/ub/ub_bus.h" + #include "hw/ub/ub_ubc.h" ++#include "hw/ub/ub_ummu.h" + #include "hw/ub/ub_config.h" + #include "hw/ub/hisi/ubc.h" + #include "hw/ub/hisi/ub_mem.h" +@@ -433,6 +434,9 @@ static void ub_bus_controller_dev_realize(UBDevice *dev, Error **errp) + + dev->dev_type = UB_TYPE_IBUS_CONTROLLER; + ub_bus_controller_dev_config_space_init(dev); ++ if (0 > ummu_associating_with_ubc(ubc)) { ++ qemu_log("failed to associating ubc with ummu. %s\n", dev->name); ++ } + } + + static Property ub_bus_controller_dev_properties[] = { +diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c +index 8598e2272c..19829f661a 100644 +--- a/hw/ub/ub_ummu.c ++++ b/hw/ub/ub_ummu.c +@@ -35,12 +35,201 @@ + #include "qemu/error-report.h" + #include "trace.h" + ++QLIST_HEAD(, UMMUState) ub_umms; ++UMMUState *ummu_find_by_bus_num(uint8_t bus_num) ++{ ++ UMMUState *ummu; ++ QLIST_FOREACH(ummu, &ub_umms, node) { ++ if (ummu->bus_num == bus_num) { ++ return ummu; ++ } ++ } ++ return NULL; ++} ++ ++static uint64_t ummu_reg_read(void *opaque, hwaddr offset, unsigned size) ++{ ++ return 0; ++} ++ ++static void ummu_reg_write(void *opaque, hwaddr offset, uint64_t data, unsigned size) ++{ ++} ++ ++static const MemoryRegionOps ummu_reg_ops = { ++ .read = ummu_reg_read, ++ .write = ummu_reg_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = { ++ .min_access_size = 2, ++ .max_access_size = 8, ++ }, ++ .impl = { ++ .min_access_size = 2, ++ .max_access_size = 8, ++ }, ++}; ++ ++static void ummu_registers_init(UMMUState *u) ++{ ++ int i; ++ ++ memset(u->cap, 0, sizeof(u->cap)); ++ /* cap 0 init */ ++ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, DSTEID_SIZE, 0x10); ++ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TOKENID_SIZE, 0x14); ++ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, ATTR_PERMS_OVR, 0x1); ++ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, ATTR_TYPES_OVR, 0x1); ++ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, S2_ATTR_TYPE, 0x1); ++ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TCT_LEVEL, 0x1); ++ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TECT_MODE, 0x1); ++ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TECT_LEVEL, 0x1); ++ /* cap 1 init */ ++ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENTQ_SIZE, 0x13); ++ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENTQ_NUMB, 0x0); ++ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENTQ_SUPPORT, 0x1); ++ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, MCMDQ_SIZE, 0xF); ++ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, MCMDQ_NUMB, 0x3); ++ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, MCMDQ_SUPPORT, 0x1); ++ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENT_GEN, 0x1); ++ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, STALL_MAX, 0x80); ++ /* cap 2 init */ ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, VMID_TLBI, 0x0); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, TLB_BOARDCAST, 0x1); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, RANGE_TLBI, 0x1); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, OA_SIZE, 0x5); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, GRAN4K_T, 0x1); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, GRAN16K_T, 0x1); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, GRAN64K_T, 0x1); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, VA_EXTEND, 0x0); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, S2_TRANS, 0x1); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, S1_TRANS, 0x1); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, SMALL_TRANS, 0x1); ++ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, TRANS_FORM, 0x2); ++ /* cap 3 init */ ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, HIER_ATTR_DISABLE, 0x1); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, S2_EXEC_NEVER_CTRL, 0x1); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, BBM_LEVEL, 0x2); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, COHERENT_ACCESS, 0x1); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, TTENDIAN_MODE, 0x0); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, MTM_SUPPORT, 0x1); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, HTTU_SUPPORT, 0x2); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, HYP_S1CONTEXT, 0x1); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, USI_SUPPORT, 0x1); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, STALL_MODEL, 0x0); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, TERM_MODEL, 0x0); ++ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, SATI_MAX, 0x1); ++ /* cap 4 init */ ++ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UCMDQ_UCPLQ_NUMB, 0x10); ++ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UCMDQ_SIZE, 0xF); ++ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UCPLQ_SIZE, 0xF); ++ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UIEQ_SIZE, 0xF); ++ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UIEQ_NUMB, 0x5); ++ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UIEQ_SUPPORT, 0x1); ++ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, PPLB_SUPPORT, 0x0); ++ ++ /* cap 5 init */ ++ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, MAPT_SUPPORT, 0x1); ++ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, MAPT_MODE, 0x3); ++ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, GRAN2M_P, 0x0); ++ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, GRAN4K_P, 0x1); ++ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, TOKENVAL_CHK, 0x1); ++ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, TOKENVAL_CHK_MODE, 0x1); ++ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, RANGE_PLBI, 0x1); ++ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, PLB_BORDCAST, 0x0); ++ /* cap 6 init */ ++ u->cap[6] = FIELD_DP32(u->cap[6], CAP6, MTM_ID_MAX, 0x00FF); ++ u->cap[6] = FIELD_DP32(u->cap[6], CAP6, MTM_GP_MAX, 0x03); ++ ++ /* ctrlr init */ ++ memset(u->ctrl, 0, sizeof(u->ctrl)); ++ u->ctrl[1] = FIELD_DP32(u->ctrl[1], CTRL1, TECT_MODE_SEL, 0x1); ++ ++ /* tect init */ ++ u->tect_base = 0; ++ u->tect_base_cfg = 0; ++ ++ /* mcmdq init */ ++ for (i = 0; i < UMMU_MAX_MCMDQS; i++) { ++ u->mcmdqs[i].queue.base = 0; ++ u->mcmdqs[i].queue.prod = 0; ++ u->mcmdqs[i].queue.cons = 0; ++ u->mcmdqs[i].queue.entry_size = sizeof(UMMUMcmdqCmd); ++ } ++ ++ /* eventq init */ ++ memset(&u->eventq, 0, sizeof(u->eventq)); ++ ++ /* glb err init */ ++ memset(&u->glb_err, 0, sizeof(u->glb_err)); ++ ++ /* evt queue init */ ++ u->eventq.queue.base = 0; ++ u->eventq.queue.prod = 0; ++ u->eventq.queue.cons = 0; ++ u->eventq.queue.entry_size = sizeof(UMMUEvent); ++ ++ /* mapt cmdq ctxt base addr init */ ++ u->mapt_cmdq_ctxt_base = 0; ++ ++ /* umcmdq default page set to 4K */ ++ u->ucmdq_page_sel = MAPT_CMDQ_CTRLR_PAGE_SIZE_4K; ++} ++ ++int ummu_associating_with_ubc(BusControllerState *ubc) ++{ ++ UMMUState *ummu; ++ unsigned int bus_num; ++ ++ if (1 != sscanf(ubc->bus->qbus.name, "ubus.%u", &bus_num)) { ++ qemu_log("failed to get bus num %s\n", ++ ubc->bus->qbus.name); ++ return -1; ++ } ++ ummu = ummu_find_by_bus_num(bus_num); ++ if (!ummu) { ++ qemu_log("failed to get ummu %u\n", bus_num); ++ return -1; ++ } ++ return 0; ++} ++ ++static void ub_save_ummu_list(UMMUState *u) ++{ ++ QLIST_INSERT_HEAD(&ub_umms, u, node); ++} ++ ++static void ub_remove_ummu_list(UMMUState *u) ++{ ++ QLIST_REMOVE(u, node); ++} ++ + static void ummu_base_realize(DeviceState *dev, Error **errp) + { ++ static uint8_t NO = 0; ++ UMMUState *u = UB_UMMU(dev); ++ SysBusDevice *sysdev = SYS_BUS_DEVICE(dev); ++ ++ u->bus_num = NO; ++ sysdev->parent_obj.id = g_strdup_printf("ummu.%u", NO++); ++ ++ memory_region_init_io(&u->ummu_reg_mem, OBJECT(u), &ummu_reg_ops, ++ u, TYPE_UB_UMMU, u->ummu_reg_size); ++ sysbus_init_mmio(sysdev, &u->ummu_reg_mem); ++ ummu_registers_init(u); ++ ub_save_ummu_list(u); + } + + static void ummu_base_unrealize(DeviceState *dev) + { ++ UMMUState *u = UB_UMMU(dev); ++ SysBusDevice *sysdev = SYS_BUS_DEVICE(dev); ++ ++ ub_remove_ummu_list(u); ++ if (sysdev->parent_obj.id) { ++ g_free(sysdev->parent_obj.id); ++ } ++ + } + + static void ummu_base_reset(DeviceState *dev) +diff --git a/include/hw/ub/ub_ummu.h b/include/hw/ub/ub_ummu.h +index 262b3d6ec2..fc33fd5549 100644 +--- a/include/hw/ub/ub_ummu.h ++++ b/include/hw/ub/ub_ummu.h +@@ -119,4 +119,6 @@ struct UMMUBaseClass { + SysBusDeviceClass parent_class; + }; + ++UMMUState *ummu_find_by_bus_num(uint8_t bus_num); ++int ummu_associating_with_ubc(BusControllerState *ubc); + #endif +-- +2.33.0 +