From b16b10f28c158db74539138d9a9acff8ced5a2f5 Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Tue, 6 Jan 2026 16:42:15 +0800 Subject: [PATCH 01/75] anolis: Revert "perf arm-spe: Support metadata version 2" ANBZ: #25252 This reverts commit b08d9241591df058c70b46830c7ce0e6ca0e14bf. Original commit backported upstream commit - 7842a4b6ff698768ccdb13324c3902a069b5d5dd to Anolis with some mistake. This causes the following build error for perf: util/arm-spe.c:1558:1: error: label 'err_free' defined but not used [-Werror=unused-label] 1558 | err_free: | ^~~~~~~~ Signed-off-by: Wei Chen --- tools/perf/util/arm-spe.c | 99 ++------------------------------------- 1 file changed, 4 insertions(+), 95 deletions(-) diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 51e8b68fd66e..1c4cf1423470 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -79,10 +79,6 @@ struct arm_spe { unsigned long num_events; u8 use_ctx_pkt_for_pid; - - u64 **metadata; - u64 metadata_ver; - u64 metadata_nr_cpu; bool is_homogeneous; }; @@ -1091,73 +1087,6 @@ static int arm_spe_flush(struct perf_session *session __maybe_unused, return 0; } -static u64 *arm_spe__alloc_per_cpu_metadata(u64 *buf, int per_cpu_size) -{ - u64 *metadata; - - metadata = zalloc(per_cpu_size); - if (!metadata) - return NULL; - - memcpy(metadata, buf, per_cpu_size); - return metadata; -} - -static void arm_spe__free_metadata(u64 **metadata, int nr_cpu) -{ - int i; - - for (i = 0; i < nr_cpu; i++) - zfree(&metadata[i]); - free(metadata); -} - -static u64 **arm_spe__alloc_metadata(struct perf_record_auxtrace_info *info, - u64 *ver, int *nr_cpu) -{ - u64 *ptr = (u64 *)info->priv; - u64 metadata_size; - u64 **metadata = NULL; - int hdr_sz, per_cpu_sz, i; - - metadata_size = info->header.size - - sizeof(struct perf_record_auxtrace_info); - - /* Metadata version 1 */ - if (metadata_size == ARM_SPE_AUXTRACE_V1_PRIV_SIZE) { - *ver = 1; - *nr_cpu = 0; - /* No per CPU metadata */ - return NULL; - } - - *ver = ptr[ARM_SPE_HEADER_VERSION]; - hdr_sz = ptr[ARM_SPE_HEADER_SIZE]; - *nr_cpu = ptr[ARM_SPE_CPUS_NUM]; - - metadata = calloc(*nr_cpu, sizeof(*metadata)); - if (!metadata) - return NULL; - - /* Locate the start address of per CPU metadata */ - ptr += hdr_sz; - per_cpu_sz = (metadata_size - (hdr_sz * sizeof(u64))) / (*nr_cpu); - - for (i = 0; i < *nr_cpu; i++) { - metadata[i] = arm_spe__alloc_per_cpu_metadata(ptr, per_cpu_sz); - if (!metadata[i]) - goto err_per_cpu_metadata; - - ptr += per_cpu_sz / sizeof(u64); - } - - return metadata; - -err_per_cpu_metadata: - arm_spe__free_metadata(metadata, *nr_cpu); - return NULL; -} - static void arm_spe_free_queue(void *priv) { struct arm_spe_queue *speq = priv; @@ -1192,7 +1121,6 @@ static void arm_spe_free(struct perf_session *session) auxtrace_heap__free(&spe->heap); arm_spe_free_events(session); session->auxtrace = NULL; - arm_spe__free_metadata(spe->metadata, spe->metadata_nr_cpu); free(spe); } @@ -1461,41 +1389,24 @@ int arm_spe_process_auxtrace_info(union perf_event *event, size_t min_sz = ARM_SPE_AUXTRACE_V1_PRIV_SIZE; struct perf_record_time_conv *tc = &session->time_conv; struct arm_spe *spe; - u64 **metadata = NULL; - u64 metadata_ver; - int nr_cpu, err; + int err; if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) + min_sz) return -EINVAL; - metadata = arm_spe__alloc_metadata(auxtrace_info, &metadata_ver, - &nr_cpu); - if (!metadata && metadata_ver != 1) { - pr_err("Failed to parse Arm SPE metadata.\n"); - return -EINVAL; - } - spe = zalloc(sizeof(struct arm_spe)); if (!spe) return -ENOMEM; err = auxtrace_queues__init(&spe->queues); - if (!spe) { - err = -ENOMEM; - goto err_free_metadata; - } + if (err) + goto err_free; spe->session = session; spe->machine = &session->machines.host; /* No kvm support */ spe->auxtrace_type = auxtrace_info->type; - if (metadata_ver == 1) - spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE]; - else - spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE_V2]; - spe->metadata = metadata; - spe->metadata_ver = metadata_ver; - spe->metadata_nr_cpu = nr_cpu; + spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE]; spe->is_homogeneous = arm_spe__is_homogeneous(metadata, nr_cpu); spe->timeless_decoding = arm_spe__is_timeless_decoding(spe); @@ -1557,7 +1468,5 @@ int arm_spe_process_auxtrace_info(union perf_event *event, session->auxtrace = NULL; err_free: free(spe); -err_free_metadata: - arm_spe__free_metadata(metadata, nr_cpu); return err; } -- Gitee From 5cfd67ea8183cd039965517096ea511ed2d735dd Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Thu, 3 Oct 2024 19:43:01 +0100 Subject: [PATCH 02/75] perf arm-spe: Support metadata version 2 ANBZ: #25252 commit 7842a4b6ff698768ccdb13324c3902a069b5d5dd upstream. This commit is to support metadata version 2 and at the meantime it is backward compatible for version 1's format. The metadata version 1 doesn't include the ARM_SPE_HEADER_VERSION field. As version 1 is fixed with two u64 fields, by checking the metadata size, it distinguishes the metadata is version 1 or version 2 (and any new versions if later will have). For version 2, it reads out CPU number and retrieves the metadata info for every CPU. Signed-off-by: Leo Yan Reviewed-by: James Clark Cc: Will Deacon Cc: Mike Leach Cc: linux-arm-kernel@lists.infradead.org Cc: Besar Wicaksono Cc: John Garry Link: https://lore.kernel.org/r/20241003184302.190806-5-leo.yan@arm.com Signed-off-by: Namhyung Kim Signed-off-by: Wei Chen --- tools/perf/util/arm-spe.c | 99 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 95 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 1c4cf1423470..9856f700d39b 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -79,6 +79,10 @@ struct arm_spe { unsigned long num_events; u8 use_ctx_pkt_for_pid; + + u64 **metadata; + u64 metadata_ver; + u64 metadata_nr_cpu; bool is_homogeneous; }; @@ -1087,6 +1091,73 @@ static int arm_spe_flush(struct perf_session *session __maybe_unused, return 0; } +static u64 *arm_spe__alloc_per_cpu_metadata(u64 *buf, int per_cpu_size) +{ + u64 *metadata; + + metadata = zalloc(per_cpu_size); + if (!metadata) + return NULL; + + memcpy(metadata, buf, per_cpu_size); + return metadata; +} + +static void arm_spe__free_metadata(u64 **metadata, int nr_cpu) +{ + int i; + + for (i = 0; i < nr_cpu; i++) + zfree(&metadata[i]); + free(metadata); +} + +static u64 **arm_spe__alloc_metadata(struct perf_record_auxtrace_info *info, + u64 *ver, int *nr_cpu) +{ + u64 *ptr = (u64 *)info->priv; + u64 metadata_size; + u64 **metadata = NULL; + int hdr_sz, per_cpu_sz, i; + + metadata_size = info->header.size - + sizeof(struct perf_record_auxtrace_info); + + /* Metadata version 1 */ + if (metadata_size == ARM_SPE_AUXTRACE_V1_PRIV_SIZE) { + *ver = 1; + *nr_cpu = 0; + /* No per CPU metadata */ + return NULL; + } + + *ver = ptr[ARM_SPE_HEADER_VERSION]; + hdr_sz = ptr[ARM_SPE_HEADER_SIZE]; + *nr_cpu = ptr[ARM_SPE_CPUS_NUM]; + + metadata = calloc(*nr_cpu, sizeof(*metadata)); + if (!metadata) + return NULL; + + /* Locate the start address of per CPU metadata */ + ptr += hdr_sz; + per_cpu_sz = (metadata_size - (hdr_sz * sizeof(u64))) / (*nr_cpu); + + for (i = 0; i < *nr_cpu; i++) { + metadata[i] = arm_spe__alloc_per_cpu_metadata(ptr, per_cpu_sz); + if (!metadata[i]) + goto err_per_cpu_metadata; + + ptr += per_cpu_sz / sizeof(u64); + } + + return metadata; + +err_per_cpu_metadata: + arm_spe__free_metadata(metadata, *nr_cpu); + return NULL; +} + static void arm_spe_free_queue(void *priv) { struct arm_spe_queue *speq = priv; @@ -1121,6 +1192,7 @@ static void arm_spe_free(struct perf_session *session) auxtrace_heap__free(&spe->heap); arm_spe_free_events(session); session->auxtrace = NULL; + arm_spe__free_metadata(spe->metadata, spe->metadata_nr_cpu); free(spe); } @@ -1389,15 +1461,26 @@ int arm_spe_process_auxtrace_info(union perf_event *event, size_t min_sz = ARM_SPE_AUXTRACE_V1_PRIV_SIZE; struct perf_record_time_conv *tc = &session->time_conv; struct arm_spe *spe; - int err; + u64 **metadata = NULL; + u64 metadata_ver; + int nr_cpu, err; if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) + min_sz) return -EINVAL; + metadata = arm_spe__alloc_metadata(auxtrace_info, &metadata_ver, + &nr_cpu); + if (!metadata && metadata_ver != 1) { + pr_err("Failed to parse Arm SPE metadata.\n"); + return -EINVAL; + } + spe = zalloc(sizeof(struct arm_spe)); - if (!spe) - return -ENOMEM; + if (!spe) { + err = -ENOMEM; + goto err_free_metadata; + } err = auxtrace_queues__init(&spe->queues); if (err) @@ -1406,8 +1489,14 @@ int arm_spe_process_auxtrace_info(union perf_event *event, spe->session = session; spe->machine = &session->machines.host; /* No kvm support */ spe->auxtrace_type = auxtrace_info->type; - spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE]; + if (metadata_ver == 1) + spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE]; + else + spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE_V2]; spe->is_homogeneous = arm_spe__is_homogeneous(metadata, nr_cpu); + spe->metadata = metadata; + spe->metadata_ver = metadata_ver; + spe->metadata_nr_cpu = nr_cpu; spe->timeless_decoding = arm_spe__is_timeless_decoding(spe); @@ -1468,5 +1557,7 @@ int arm_spe_process_auxtrace_info(union perf_event *event, session->auxtrace = NULL; err_free: free(spe); +err_free_metadata: + arm_spe__free_metadata(metadata, nr_cpu); return err; } -- Gitee From f13e681f8d6827a065466b85c3dc0a5444a6d5b0 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 20 Feb 2024 08:02:03 +0530 Subject: [PATCH 03/75] arm64/sysreg: Add register fields for ID_AA64DFR1_EL1 ANBZ: #25252 commit fdd867fe9b32c30b290aa10097f89daff09625cc upstream. This adds register fields for ID_AA64DFR1_EL1 as per the definitions based on DDI0601 2023-12. Cc: Will Deacon Cc: Mark Brown Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20240220023203.3091229-1-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas Signed-off-by: Wei Chen --- arch/arm64/tools/sysreg | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 10e82b882529..663cef8a4794 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1170,7 +1170,36 @@ EndEnum EndSysreg Sysreg ID_AA64DFR1_EL1 3 0 0 5 1 -Res0 63:0 +Field 63:56 ABL_CMPs +UnsignedEnum 55:52 DPFZS + 0b0000 IGNR + 0b0001 FRZN +EndEnum +UnsignedEnum 51:48 EBEP + 0b0000 NI + 0b0001 IMP +EndEnum +UnsignedEnum 47:44 ITE + 0b0000 NI + 0b0001 IMP +EndEnum +UnsignedEnum 43:40 ABLE + 0b0000 NI + 0b0001 IMP +EndEnum +UnsignedEnum 39:36 PMICNTR + 0b0000 NI + 0b0001 IMP +EndEnum +UnsignedEnum 35:32 SPMU + 0b0000 NI + 0b0001 IMP + 0b0010 IMP_SPMZR +EndEnum +Field 31:24 CTX_CMPs +Field 23:16 WRPs +Field 15:8 BRPs +Field 7:0 SYSPMUID EndSysreg Sysreg ID_AA64AFR0_EL1 3 0 0 5 4 -- Gitee From c748f2e4c61eedb144beb1fce533a8d9c0096f6f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:38 +0000 Subject: [PATCH 04/75] arm64: sysreg: Add layout for ID_AA64MMFR4_EL1 ANBZ: #25252 commit cfc680bb04c54e61faa51a34d8383a0aa25b583f upstream. ARMv9.5 has infroduced ID_AA64MMFR4_EL1 with a bunch of new features. Add the corresponding layout. This is extracted from the public ARM SysReg_xml_A_profile-2023-09 delivery, timestamped d55f5af8e09052abe92a02adf820deea2eaed717. Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Reviewed-by: Miguel Luis Link: https://lore.kernel.org/r/20240122181344.258974-5-maz@kernel.org Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- arch/arm64/tools/sysreg | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 663cef8a4794..efd2b4bb7064 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1698,6 +1698,43 @@ UnsignedEnum 3:0 TCRX EndEnum EndSysreg +Sysreg ID_AA64MMFR4_EL1 3 0 0 7 4 +Res0 63:40 +UnsignedEnum 39:36 E3DSE + 0b0000 NI + 0b0001 IMP +EndEnum +Res0 35:28 +SignedEnum 27:24 E2H0 + 0b0000 IMP + 0b1110 NI_NV1 + 0b1111 NI +EndEnum +UnsignedEnum 23:20 NV_frac + 0b0000 NV_NV2 + 0b0001 NV2_ONLY +EndEnum +UnsignedEnum 19:16 FGWTE3 + 0b0000 NI + 0b0001 IMP +EndEnum +UnsignedEnum 15:12 HACDBS + 0b0000 NI + 0b0001 IMP +EndEnum +UnsignedEnum 11:8 ASID2 + 0b0000 NI + 0b0001 IMP +EndEnum +SignedEnum 7:4 EIESB + 0b0000 NI + 0b0001 ToEL3 + 0b0010 ToELx + 0b1111 ANY +EndEnum +Res0 3:0 +EndSysreg + Sysreg SCTLR_EL1 3 0 1 0 0 Field 63 TIDCP Field 62 SPINTMASK -- Gitee From bc8f2010bd77dc0566df1665f50f168abedd543c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 3 Feb 2025 17:27:23 +0000 Subject: [PATCH 05/75] arm64: sysreg: Add layout for HCR_EL2 ANBZ: #25252 commit d0f39259eff447fb4518777c36c7dffcf8b4ef9e upstream. Add HCR_EL2 to the sysreg file, more or less directly generated from the JSON file. Since the generated names significantly differ from the existing naming, express the old names in terms of the new one. One day, we'll fix this mess, but I'm not in any hurry. Reviewed-by: Joey Gouly Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/tools/sysreg | 68 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index efd2b4bb7064..ff537932942e 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2153,6 +2153,74 @@ Field 1 AFSR1_EL1 Field 0 AFSR0_EL1 EndSysregFields +Sysreg HCR_EL2 3 4 1 1 0 +Field 63:60 TWEDEL +Field 59 TWEDEn +Field 58 TID5 +Field 57 DCT +Field 56 ATA +Field 55 TTLBOS +Field 54 TTLBIS +Field 53 EnSCXT +Field 52 TOCU +Field 51 AMVOFFEN +Field 50 TICAB +Field 49 TID4 +Field 48 GPF +Field 47 FIEN +Field 46 FWB +Field 45 NV2 +Field 44 AT +Field 43 NV1 +Field 42 NV +Field 41 API +Field 40 APK +Field 39 TME +Field 38 MIOCNCE +Field 37 TEA +Field 36 TERR +Field 35 TLOR +Field 34 E2H +Field 33 ID +Field 32 CD +Field 31 RW +Field 30 TRVM +Field 29 HCD +Field 28 TDZ +Field 27 TGE +Field 26 TVM +Field 25 TTLB +Field 24 TPU +Field 23 TPCP +Field 22 TSW +Field 21 TACR +Field 20 TIDCP +Field 19 TSC +Field 18 TID3 +Field 17 TID2 +Field 16 TID1 +Field 15 TID0 +Field 14 TWE +Field 13 TWI +Field 12 DC +UnsignedEnum 11:10 BSU + 0b00 NONE + 0b01 IS + 0b10 OS + 0b11 FS +EndEnum +Field 9 FB +Field 8 VSE +Field 7 VI +Field 6 VF +Field 5 AMO +Field 4 IMO +Field 3 FMO +Field 2 PTW +Field 1 SWIO +Field 0 VM +EndSysreg + Sysreg HFGRTR_EL2 3 4 1 1 4 Fields HFGxTR_EL2 EndSysreg -- Gitee From 0b62d4f3c389a63e07dddd2e44bf70c8c64a9101 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 31 Oct 2024 08:42:11 +0000 Subject: [PATCH 06/75] arm64: sysreg: Add ID_AA64ISAR1_EL1.LS64 encoding for FEAT_LS64WB ANBZ: #25252 commit 2030396dac5f564ec85422ceb4327fcdb0054f83 upstream. The 2024 extensions are adding yet another variant of LS64 (aptly named FEAT_LS64WB) supporting LS64 accesses to write-back memory, as well as 32 byte single-copy atomic accesses using pairs of FP registers. Add the relevant encoding to ID_AA64ISAR1_EL1.LS64. Reviewed-by: Joey Gouly Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/tools/sysreg | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index ff537932942e..cbe182cc1fe6 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1292,6 +1292,7 @@ UnsignedEnum 63:60 LS64 0b0001 LS64 0b0010 LS64_V 0b0011 LS64_ACCDATA + 0b0100 LS64WB EndEnum UnsignedEnum 59:56 XS 0b0000 NI -- Gitee From fd86b60d7adae463e25a42c9078c8cd948c47712 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 10 Feb 2025 11:35:31 +0000 Subject: [PATCH 07/75] arm64: sysreg: Update ID_AA64MMFR4_EL1 description ANBZ: #25252 commit eef33835bf6f297faa222f48bf941d57d2f8bda0 upstream. Resync the ID_AA64MMFR4_EL1 with the architectue description. This results in: - the new PoPS field - the new NV2P1 value for the NV_frac field - the new RMEGDI field - the new SRMASK field These fields have been generated from the reference JSON file. Reviewed-by: Joey Gouly Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/tools/sysreg | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index cbe182cc1fe6..b0c7e4d456ad 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1700,12 +1700,21 @@ EndEnum EndSysreg Sysreg ID_AA64MMFR4_EL1 3 0 0 7 4 -Res0 63:40 +Res0 63:48 +UnsignedEnum 47:44 SRMASK + 0b0000 NI + 0b0001 IMP +EndEnum +Res0 43:40 UnsignedEnum 39:36 E3DSE 0b0000 NI 0b0001 IMP EndEnum -Res0 35:28 +Res0 35:32 +UnsignedEnum 31:28 RMEGDI + 0b0000 NI + 0b0001 IMP +EndEnum SignedEnum 27:24 E2H0 0b0000 IMP 0b1110 NI_NV1 @@ -1714,6 +1723,7 @@ EndEnum UnsignedEnum 23:20 NV_frac 0b0000 NV_NV2 0b0001 NV2_ONLY + 0b0010 NV2P1 EndEnum UnsignedEnum 19:16 FGWTE3 0b0000 NI @@ -1733,7 +1743,10 @@ SignedEnum 7:4 EIESB 0b0010 ToELx 0b1111 ANY EndEnum -Res0 3:0 +UnsignedEnum 3:0 PoPS + 0b0000 NI + 0b0001 IMP +EndEnum EndSysreg Sysreg SCTLR_EL1 3 0 1 0 0 -- Gitee From 7668cb200561bc211e929cf10a6aaa7d5b860734 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 25 Oct 2024 18:23:38 +0000 Subject: [PATCH 08/75] arm64: sysreg: Migrate MDCR_EL2 definition to table ANBZ: #25252 commit 641630313e9c68b2a889b1aad684f29b9c3e4017 upstream. Migrate MDCR_EL2 over to the sysreg table and align definitions with DDI0601 2024-09. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20241025182354.3364124-4-oliver.upton@linux.dev Signed-off-by: Oliver Upton [ arm64: sysreg: keep HCR_RES0 in kvm_arm.h ] Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_arm.h | 153 +++++++++++++------------------ arch/arm64/tools/sysreg | 35 +++++++ 2 files changed, 99 insertions(+), 89 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 2d8b243a86cd..9cad835c6858 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -12,66 +12,70 @@ #include #include -/* Hyp Configuration Register (HCR) bits */ - -#define HCR_TID5 (UL(1) << 58) -#define HCR_DCT (UL(1) << 57) -#define HCR_ATA_SHIFT 56 -#define HCR_ATA (UL(1) << HCR_ATA_SHIFT) -#define HCR_TTLBOS (UL(1) << 55) -#define HCR_TTLBIS (UL(1) << 54) -#define HCR_ENSCXT (UL(1) << 53) -#define HCR_TOCU (UL(1) << 52) -#define HCR_AMVOFFEN (UL(1) << 51) -#define HCR_TICAB (UL(1) << 50) -#define HCR_TID4 (UL(1) << 49) -#define HCR_FIEN (UL(1) << 47) -#define HCR_FWB (UL(1) << 46) -#define HCR_NV2 (UL(1) << 45) -#define HCR_AT (UL(1) << 44) -#define HCR_NV1 (UL(1) << 43) -#define HCR_NV (UL(1) << 42) -#define HCR_API (UL(1) << 41) -#define HCR_APK (UL(1) << 40) -#define HCR_TEA (UL(1) << 37) -#define HCR_TERR (UL(1) << 36) -#define HCR_TLOR (UL(1) << 35) -#define HCR_E2H (UL(1) << 34) -#define HCR_ID (UL(1) << 33) -#define HCR_CD (UL(1) << 32) -#define HCR_RW_SHIFT 31 -#define HCR_RW (UL(1) << HCR_RW_SHIFT) -#define HCR_TRVM (UL(1) << 30) -#define HCR_HCD (UL(1) << 29) -#define HCR_TDZ (UL(1) << 28) -#define HCR_TGE (UL(1) << 27) -#define HCR_TVM (UL(1) << 26) -#define HCR_TTLB (UL(1) << 25) -#define HCR_TPU (UL(1) << 24) -#define HCR_TPC (UL(1) << 23) /* HCR_TPCP if FEAT_DPB */ -#define HCR_TSW (UL(1) << 22) -#define HCR_TACR (UL(1) << 21) -#define HCR_TIDCP (UL(1) << 20) -#define HCR_TSC (UL(1) << 19) -#define HCR_TID3 (UL(1) << 18) -#define HCR_TID2 (UL(1) << 17) -#define HCR_TID1 (UL(1) << 16) -#define HCR_TID0 (UL(1) << 15) -#define HCR_TWE (UL(1) << 14) -#define HCR_TWI (UL(1) << 13) -#define HCR_DC (UL(1) << 12) -#define HCR_BSU (3 << 10) -#define HCR_BSU_IS (UL(1) << 10) -#define HCR_FB (UL(1) << 9) -#define HCR_VSE (UL(1) << 8) -#define HCR_VI (UL(1) << 7) -#define HCR_VF (UL(1) << 6) -#define HCR_AMO (UL(1) << 5) -#define HCR_IMO (UL(1) << 4) -#define HCR_FMO (UL(1) << 3) -#define HCR_PTW (UL(1) << 2) -#define HCR_SWIO (UL(1) << 1) -#define HCR_VM (UL(1) << 0) +/* + * Because I'm terribly lazy and that repainting the whole of the KVM + * code with the proper names is a pain, use a helper to map the names + * inherited from AArch32 with the new fancy nomenclature. One day... + */ +#define __HCR(x) HCR_EL2_##x + +#define HCR_TID5 __HCR(TID5) +#define HCR_DCT __HCR(DCT) +#define HCR_ATA_SHIFT __HCR(ATA_SHIFT) +#define HCR_ATA __HCR(ATA) +#define HCR_TTLBOS __HCR(TTLBOS) +#define HCR_TTLBIS __HCR(TTLBIS) +#define HCR_ENSCXT __HCR(EnSCXT) +#define HCR_TOCU __HCR(TOCU) +#define HCR_AMVOFFEN __HCR(AMVOFFEN) +#define HCR_TICAB __HCR(TICAB) +#define HCR_TID4 __HCR(TID4) +#define HCR_FIEN __HCR(FIEN) +#define HCR_FWB __HCR(FWB) +#define HCR_NV2 __HCR(NV2) +#define HCR_AT __HCR(AT) +#define HCR_NV1 __HCR(NV1) +#define HCR_NV __HCR(NV) +#define HCR_API __HCR(API) +#define HCR_APK __HCR(APK) +#define HCR_TEA __HCR(TEA) +#define HCR_TERR __HCR(TERR) +#define HCR_TLOR __HCR(TLOR) +#define HCR_E2H __HCR(E2H) +#define HCR_ID __HCR(ID) +#define HCR_CD __HCR(CD) +#define HCR_RW __HCR(RW) +#define HCR_TRVM __HCR(TRVM) +#define HCR_HCD __HCR(HCD) +#define HCR_TDZ __HCR(TDZ) +#define HCR_TGE __HCR(TGE) +#define HCR_TVM __HCR(TVM) +#define HCR_TTLB __HCR(TTLB) +#define HCR_TPU __HCR(TPU) +#define HCR_TPC __HCR(TPCP) +#define HCR_TSW __HCR(TSW) +#define HCR_TACR __HCR(TACR) +#define HCR_TIDCP __HCR(TIDCP) +#define HCR_TSC __HCR(TSC) +#define HCR_TID3 __HCR(TID3) +#define HCR_TID2 __HCR(TID2) +#define HCR_TID1 __HCR(TID1) +#define HCR_TID0 __HCR(TID0) +#define HCR_TWE __HCR(TWE) +#define HCR_TWI __HCR(TWI) +#define HCR_DC __HCR(DC) +#define HCR_BSU __HCR(BSU) +#define HCR_BSU_IS __HCR(BSU_IS) +#define HCR_FB __HCR(FB) +#define HCR_VSE __HCR(VSE) +#define HCR_VI __HCR(VI) +#define HCR_VF __HCR(VF) +#define HCR_AMO __HCR(AMO) +#define HCR_IMO __HCR(IMO) +#define HCR_FMO __HCR(FMO) +#define HCR_PTW __HCR(PTW) +#define HCR_SWIO __HCR(SWIO) +#define HCR_VM __HCR(VM) #define HCR_RES0 ((UL(1) << 48) | (UL(1) << 39)) /* @@ -304,35 +308,6 @@ GENMASK(19, 14) | \ BIT(11)) -/* Hyp Debug Configuration Register bits */ -#define MDCR_EL2_E2TB_MASK (UL(0x3)) -#define MDCR_EL2_E2TB_SHIFT (UL(24)) -#define MDCR_EL2_HPMFZS (UL(1) << 36) -#define MDCR_EL2_HPMFZO (UL(1) << 29) -#define MDCR_EL2_MTPME (UL(1) << 28) -#define MDCR_EL2_TDCC (UL(1) << 27) -#define MDCR_EL2_HLP (UL(1) << 26) -#define MDCR_EL2_HCCD (UL(1) << 23) -#define MDCR_EL2_TTRF (UL(1) << 19) -#define MDCR_EL2_HPMD (UL(1) << 17) -#define MDCR_EL2_TPMS (UL(1) << 14) -#define MDCR_EL2_E2PB_MASK (UL(0x3)) -#define MDCR_EL2_E2PB_SHIFT (UL(12)) -#define MDCR_EL2_TDRA (UL(1) << 11) -#define MDCR_EL2_TDOSA (UL(1) << 10) -#define MDCR_EL2_TDA (UL(1) << 9) -#define MDCR_EL2_TDE (UL(1) << 8) -#define MDCR_EL2_HPME (UL(1) << 7) -#define MDCR_EL2_TPM (UL(1) << 6) -#define MDCR_EL2_TPMCR (UL(1) << 5) -#define MDCR_EL2_HPMN_MASK (UL(0x1F)) -#define MDCR_EL2_RES0 (GENMASK(63, 37) | \ - GENMASK(35, 30) | \ - GENMASK(25, 24) | \ - GENMASK(22, 20) | \ - BIT(18) | \ - GENMASK(16, 15)) - /* * FGT register definitions * diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index b0c7e4d456ad..ae2a7486c5e5 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2235,6 +2235,41 @@ Field 1 SWIO Field 0 VM EndSysreg +Sysreg MDCR_EL2 3 4 1 1 1 +Res0 63:51 +Field 50 EnSTEPOP +Res0 49:44 +Field 43 EBWE +Res0 42 +Field 41:40 PMEE +Res0 39:37 +Field 36 HPMFZS +Res0 35:32 +Field 31:30 PMSSE +Field 29 HPMFZO +Field 28 MTPME +Field 27 TDCC +Field 26 HLP +Field 25:24 E2TB +Field 23 HCCD +Res0 22:20 +Field 19 TTRF +Res0 18 +Field 17 HPMD +Res0 16 +Field 15 EnSPM +Field 14 TPMS +Field 13:12 E2PB +Field 11 TDRA +Field 10 TDOSA +Field 9 TDA +Field 8 TDE +Field 7 HPME +Field 6 TPM +Field 5 TPMCR +Field 4:0 HPMN +EndSysreg + Sysreg HFGRTR_EL2 3 4 1 1 4 Fields HFGxTR_EL2 EndSysreg -- Gitee From 4317c42bb0be1dbf7792f05b7405189d04ed213b Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 11 Oct 2023 19:57:39 +0000 Subject: [PATCH 09/75] tools headers arm64: Update sysreg.h with kernel sources ANBZ: #25252 commit 0359c946b13153bd57fac65f4f3600ba5673e3de upstream. The users of sysreg.h (perf, KVM selftests) are now generating the necessary sysreg-defs.h; sync sysreg.h with the kernel sources and fix the KVM selftests that use macros which suffered a rename. Signed-off-by: Jing Zhang Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231011195740.3349631-5-oliver.upton@linux.dev Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- tools/arch/arm64/include/asm/gpr-num.h | 26 + tools/arch/arm64/include/asm/sysreg.h | 839 ++++-------------- .../selftests/kvm/aarch64/aarch32_id_regs.c | 4 +- .../selftests/kvm/aarch64/debug-exceptions.c | 12 +- .../selftests/kvm/aarch64/page_fault_test.c | 6 +- .../selftests/kvm/lib/aarch64/processor.c | 6 +- 6 files changed, 232 insertions(+), 661 deletions(-) create mode 100644 tools/arch/arm64/include/asm/gpr-num.h diff --git a/tools/arch/arm64/include/asm/gpr-num.h b/tools/arch/arm64/include/asm/gpr-num.h new file mode 100644 index 000000000000..05da4a7c5788 --- /dev/null +++ b/tools/arch/arm64/include/asm/gpr-num.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_GPR_NUM_H +#define __ASM_GPR_NUM_H + +#ifdef __ASSEMBLY__ + + .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 + .equ .L__gpr_num_x\num, \num + .equ .L__gpr_num_w\num, \num + .endr + .equ .L__gpr_num_xzr, 31 + .equ .L__gpr_num_wzr, 31 + +#else /* __ASSEMBLY__ */ + +#define __DEFINE_ASM_GPR_NUMS \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ +" .equ .L__gpr_num_x\\num, \\num\n" \ +" .equ .L__gpr_num_w\\num, \\num\n" \ +" .endr\n" \ +" .equ .L__gpr_num_xzr, 31\n" \ +" .equ .L__gpr_num_wzr, 31\n" + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GPR_NUM_H */ diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h index 7640fa27be94..ccc13e991376 100644 --- a/tools/arch/arm64/include/asm/sysreg.h +++ b/tools/arch/arm64/include/asm/sysreg.h @@ -12,6 +12,8 @@ #include #include +#include + /* * ARMv8 ARM reserves the following encoding for system registers: * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview", @@ -87,20 +89,24 @@ */ #define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift) #define PSTATE_Imm_shift CRm_shift +#define SET_PSTATE(x, r) __emit_inst(0xd500401f | PSTATE_ ## r | ((!!x) << PSTATE_Imm_shift)) #define PSTATE_PAN pstate_field(0, 4) #define PSTATE_UAO pstate_field(0, 3) #define PSTATE_SSBS pstate_field(3, 1) +#define PSTATE_DIT pstate_field(3, 2) #define PSTATE_TCO pstate_field(3, 4) -#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift)) -#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift)) -#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) -#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift)) +#define SET_PSTATE_PAN(x) SET_PSTATE((x), PAN) +#define SET_PSTATE_UAO(x) SET_PSTATE((x), UAO) +#define SET_PSTATE_SSBS(x) SET_PSTATE((x), SSBS) +#define SET_PSTATE_DIT(x) SET_PSTATE((x), DIT) +#define SET_PSTATE_TCO(x) SET_PSTATE((x), TCO) #define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x)) #define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x)) #define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x)) +#define set_pstate_dit(x) asm volatile(SET_PSTATE_DIT(x)) #define __SYS_BARRIER_INSN(CRm, op2, Rt) \ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) @@ -108,25 +114,43 @@ #define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) +#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4) +#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) +#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4) +#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) +#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4) +#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6) + +/* + * Automatically generated definitions for system registers, the + * manual encodings below are in the process of being converted to + * come from here. The header relies on the definition of sys_reg() + * earlier in this file. + */ +#include "asm/sysreg-defs.h" /* * System registers, organised loosely by encoding but grouped together * where the architected name contains an index. e.g. ID_MMFR_EL1. */ -#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2) -#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0) -#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2) -#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2) -#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2) +#define SYS_SVCR_SMSTOP_SM_EL0 sys_reg(0, 3, 4, 2, 3) +#define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3) +#define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3) + #define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4) #define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5) #define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6) #define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7) #define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0) -#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4) + #define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4) +#define OSLSR_EL1_OSLM_MASK (BIT(3) | BIT(0)) +#define OSLSR_EL1_OSLM_NI 0 +#define OSLSR_EL1_OSLM_IMPLEMENTED BIT(3) +#define OSLSR_EL1_OSLK BIT(1) + #define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4) #define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4) #define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6) @@ -142,59 +166,12 @@ #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) -#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0) -#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1) -#define SYS_ID_PFR2_EL1 sys_reg(3, 0, 0, 3, 4) -#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2) -#define SYS_ID_DFR1_EL1 sys_reg(3, 0, 0, 3, 5) -#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3) -#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4) -#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5) -#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6) -#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7) -#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6) -#define SYS_ID_MMFR5_EL1 sys_reg(3, 0, 0, 3, 6) - -#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0) -#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1) -#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2) -#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3) -#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4) -#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5) -#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7) - -#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0) -#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) -#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) - -#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0) -#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1) -#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4) - -#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) -#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) - -#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) -#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) - -#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) -#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) - -#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) -#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) -#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) - -#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) -#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) -#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) #define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) -#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) -#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) #define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) @@ -230,159 +207,33 @@ #define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0) #define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1) -#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) #define SYS_PAR_EL1_F BIT(0) #define SYS_PAR_EL1_FST GENMASK(6, 1) /*** Statistical Profiling Extension ***/ -/* ID registers */ -#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) -#define SYS_PMSIDR_EL1_FE_SHIFT 0 -#define SYS_PMSIDR_EL1_FT_SHIFT 1 -#define SYS_PMSIDR_EL1_FL_SHIFT 2 -#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 -#define SYS_PMSIDR_EL1_LDS_SHIFT 4 -#define SYS_PMSIDR_EL1_ERND_SHIFT 5 -#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL -#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 -#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL -#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 -#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL - -#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) -#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 -#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU -#define SYS_PMBIDR_EL1_P_SHIFT 4 -#define SYS_PMBIDR_EL1_F_SHIFT 5 - -/* Sampling controls */ -#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) -#define SYS_PMSCR_EL1_E0SPE_SHIFT 0 -#define SYS_PMSCR_EL1_E1SPE_SHIFT 1 -#define SYS_PMSCR_EL1_CX_SHIFT 3 -#define SYS_PMSCR_EL1_PA_SHIFT 4 -#define SYS_PMSCR_EL1_TS_SHIFT 5 -#define SYS_PMSCR_EL1_PCT_SHIFT 6 - -#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) -#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 -#define SYS_PMSCR_EL2_E2SPE_SHIFT 1 -#define SYS_PMSCR_EL2_CX_SHIFT 3 -#define SYS_PMSCR_EL2_PA_SHIFT 4 -#define SYS_PMSCR_EL2_TS_SHIFT 5 -#define SYS_PMSCR_EL2_PCT_SHIFT 6 - -#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) - -#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) -#define SYS_PMSIRR_EL1_RND_SHIFT 0 -#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL - -/* Filtering controls */ -#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) - -#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) -#define SYS_PMSFCR_EL1_FE_SHIFT 0 -#define SYS_PMSFCR_EL1_FT_SHIFT 1 -#define SYS_PMSFCR_EL1_FL_SHIFT 2 -#define SYS_PMSFCR_EL1_B_SHIFT 16 -#define SYS_PMSFCR_EL1_LD_SHIFT 17 -#define SYS_PMSFCR_EL1_ST_SHIFT 18 - -#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) -#define SYS_PMSEVFR_EL1_RES0_8_2 \ +#define PMSEVFR_EL1_RES0_IMP \ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) -#define SYS_PMSEVFR_EL1_RES0_8_3 \ - (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) - -#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) -#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 - -/* Buffer controls */ -#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) -#define SYS_PMBLIMITR_EL1_E_SHIFT 0 -#define SYS_PMBLIMITR_EL1_FM_SHIFT 1 -#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL -#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) - -#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) +#define PMSEVFR_EL1_RES0_V1P1 \ + (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) +#define PMSEVFR_EL1_RES0_V1P2 \ + (PMSEVFR_EL1_RES0_V1P1 & ~BIT_ULL(6)) /* Buffer error reporting */ -#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) -#define SYS_PMBSR_EL1_COLL_SHIFT 16 -#define SYS_PMBSR_EL1_S_SHIFT 17 -#define SYS_PMBSR_EL1_EA_SHIFT 18 -#define SYS_PMBSR_EL1_DL_SHIFT 19 -#define SYS_PMBSR_EL1_EC_SHIFT 26 -#define SYS_PMBSR_EL1_EC_MASK 0x3fUL - -#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) - -#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 -#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL +#define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_FAULT_FSC_MASK PMBSR_EL1_MSS_MASK -#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 -#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL +#define PMBSR_EL1_BUF_BSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_BUF_BSC_MASK PMBSR_EL1_MSS_MASK -#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) +#define PMBSR_EL1_BUF_BSC_FULL 0x1UL /*** End of Statistical Profiling Extension ***/ -/* - * TRBE Registers - */ -#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0) -#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1) -#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2) -#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3) -#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4) -#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6) -#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7) - -#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0) -#define TRBLIMITR_LIMIT_SHIFT 12 -#define TRBLIMITR_NVM BIT(5) -#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0) -#define TRBLIMITR_TRIG_MODE_SHIFT 3 -#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0) -#define TRBLIMITR_FILL_MODE_SHIFT 1 -#define TRBLIMITR_ENABLE BIT(0) -#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0) -#define TRBPTR_PTR_SHIFT 0 -#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0) -#define TRBBASER_BASE_SHIFT 12 -#define TRBSR_EC_MASK GENMASK(5, 0) -#define TRBSR_EC_SHIFT 26 -#define TRBSR_IRQ BIT(22) -#define TRBSR_TRG BIT(21) -#define TRBSR_WRAP BIT(20) -#define TRBSR_ABORT BIT(18) -#define TRBSR_STOP BIT(17) -#define TRBSR_MSS_MASK GENMASK(15, 0) -#define TRBSR_MSS_SHIFT 0 -#define TRBSR_BSC_MASK GENMASK(5, 0) -#define TRBSR_BSC_SHIFT 0 -#define TRBSR_FSC_MASK GENMASK(5, 0) -#define TRBSR_FSC_SHIFT 0 -#define TRBMAR_SHARE_MASK GENMASK(1, 0) -#define TRBMAR_SHARE_SHIFT 8 -#define TRBMAR_OUTER_MASK GENMASK(3, 0) -#define TRBMAR_OUTER_SHIFT 4 -#define TRBMAR_INNER_MASK GENMASK(3, 0) -#define TRBMAR_INNER_SHIFT 0 -#define TRBTRG_TRG_MASK GENMASK(31, 0) -#define TRBTRG_TRG_SHIFT 0 -#define TRBIDR_FLAG BIT(5) -#define TRBIDR_PROG BIT(4) -#define TRBIDR_ALIGN_MASK GENMASK(3, 0) -#define TRBIDR_ALIGN_SHIFT 0 +#define TRBSR_EL1_BSC_MASK GENMASK(5, 0) +#define TRBSR_EL1_BSC_SHIFT 0 #define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) #define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) @@ -392,12 +243,6 @@ #define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0) #define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0) -#define SYS_LORSA_EL1 sys_reg(3, 0, 10, 4, 0) -#define SYS_LOREA_EL1 sys_reg(3, 0, 10, 4, 1) -#define SYS_LORN_EL1 sys_reg(3, 0, 10, 4, 2) -#define SYS_LORC_EL1 sys_reg(3, 0, 10, 4, 3) -#define SYS_LORID_EL1 sys_reg(3, 0, 10, 4, 7) - #define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0) #define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1) @@ -429,23 +274,10 @@ #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) -#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1) -#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4) - -#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7) - #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) -#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) -#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) -#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) -#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0) - -#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) -#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7) - #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) @@ -465,6 +297,7 @@ #define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2) #define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3) +#define SYS_TPIDR2_EL0 sys_reg(3, 3, 13, 0, 5) #define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7) @@ -506,6 +339,10 @@ #define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0) +#define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1) +#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5) +#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6) + #define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0) #define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1) #define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2) @@ -515,7 +352,9 @@ #define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0) #define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1) +#define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0) #define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0) +#define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0) #define __PMEV_op2(n) ((n) & 0x7) #define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) @@ -525,26 +364,48 @@ #define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7) +#define SYS_VPIDR_EL2 sys_reg(3, 4, 0, 0, 0) +#define SYS_VMPIDR_EL2 sys_reg(3, 4, 0, 0, 5) + #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) -#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4) -#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) -#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) -#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) +#define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1) +#define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0) +#define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1) +#define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2) +#define SYS_HSTR_EL2 sys_reg(3, 4, 1, 1, 3) +#define SYS_HACR_EL2 sys_reg(3, 4, 1, 1, 7) + +#define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0) +#define SYS_TTBR1_EL2 sys_reg(3, 4, 2, 0, 1) +#define SYS_TCR_EL2 sys_reg(3, 4, 2, 0, 2) +#define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) +#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) + #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) -#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) +#define SYS_SP_EL1 sys_reg(3, 4, 4, 1, 0) #define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) +#define SYS_AFSR0_EL2 sys_reg(3, 4, 5, 1, 0) +#define SYS_AFSR1_EL2 sys_reg(3, 4, 5, 1, 1) #define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0) #define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3) #define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) #define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0) + #define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0) +#define SYS_HPFAR_EL2 sys_reg(3, 4, 6, 0, 4) + +#define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0) +#define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0) -#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1) +#define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0) +#define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1) +#define SYS_RMR_EL2 sys_reg(3, 4, 12, 0, 2) +#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1) #define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) #define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0) #define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1) @@ -586,10 +447,14 @@ #define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6) #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) +#define SYS_CONTEXTIDR_EL2 sys_reg(3, 4, 13, 0, 1) +#define SYS_TPIDR_EL2 sys_reg(3, 4, 13, 0, 2) + +#define SYS_CNTVOFF_EL2 sys_reg(3, 4, 14, 0, 3) +#define SYS_CNTHCTL_EL2 sys_reg(3, 4, 14, 1, 0) + /* VHE encodings for architectural EL0/1 system registers */ #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) -#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) -#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) @@ -599,11 +464,9 @@ #define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) #define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) -#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) -#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) #define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1) @@ -612,37 +475,41 @@ #define SYS_CNTV_CTL_EL02 sys_reg(3, 5, 14, 3, 1) #define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2) +#define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0) + /* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_ENTP2 (BIT(60)) #define SCTLR_ELx_DSSBS (BIT(44)) #define SCTLR_ELx_ATA (BIT(43)) -#define SCTLR_ELx_TCF_SHIFT 40 -#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT) - +#define SCTLR_ELx_EE_SHIFT 25 #define SCTLR_ELx_ENIA_SHIFT 31 -#define SCTLR_ELx_ITFSB (BIT(37)) -#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT)) -#define SCTLR_ELx_ENIB (BIT(30)) -#define SCTLR_ELx_ENDA (BIT(27)) -#define SCTLR_ELx_EE (BIT(25)) -#define SCTLR_ELx_IESB (BIT(21)) -#define SCTLR_ELx_WXN (BIT(19)) -#define SCTLR_ELx_ENDB (BIT(13)) -#define SCTLR_ELx_I (BIT(12)) -#define SCTLR_ELx_SA (BIT(3)) -#define SCTLR_ELx_C (BIT(2)) -#define SCTLR_ELx_A (BIT(1)) -#define SCTLR_ELx_M (BIT(0)) +#define SCTLR_ELx_ITFSB (BIT(37)) +#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT)) +#define SCTLR_ELx_ENIB (BIT(30)) +#define SCTLR_ELx_LSMAOE (BIT(29)) +#define SCTLR_ELx_nTLSMD (BIT(28)) +#define SCTLR_ELx_ENDA (BIT(27)) +#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT)) +#define SCTLR_ELx_EIS (BIT(22)) +#define SCTLR_ELx_IESB (BIT(21)) +#define SCTLR_ELx_TSCXT (BIT(20)) +#define SCTLR_ELx_WXN (BIT(19)) +#define SCTLR_ELx_ENDB (BIT(13)) +#define SCTLR_ELx_I (BIT(12)) +#define SCTLR_ELx_EOS (BIT(11)) +#define SCTLR_ELx_SA (BIT(3)) +#define SCTLR_ELx_C (BIT(2)) +#define SCTLR_ELx_A (BIT(1)) +#define SCTLR_ELx_M (BIT(0)) /* SCTLR_EL2 specific flags. */ #define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ (BIT(29))) +#define SCTLR_EL2_BT (BIT(36)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL2 SCTLR_ELx_EE #else @@ -658,33 +525,6 @@ (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) /* SCTLR_EL1 specific flags. */ -#define SCTLR_EL1_EPAN (BIT(57)) -#define SCTLR_EL1_ATA0 (BIT(42)) - -#define SCTLR_EL1_TCF0_SHIFT 38 -#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT) -#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT) -#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT) -#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT) - -#define SCTLR_EL1_BT1 (BIT(36)) -#define SCTLR_EL1_BT0 (BIT(35)) -#define SCTLR_EL1_UCI (BIT(26)) -#define SCTLR_EL1_E0E (BIT(24)) -#define SCTLR_EL1_SPAN (BIT(23)) -#define SCTLR_EL1_NTWE (BIT(18)) -#define SCTLR_EL1_NTWI (BIT(16)) -#define SCTLR_EL1_UCT (BIT(15)) -#define SCTLR_EL1_DZE (BIT(14)) -#define SCTLR_EL1_UMA (BIT(9)) -#define SCTLR_EL1_SED (BIT(8)) -#define SCTLR_EL1_ITD (BIT(7)) -#define SCTLR_EL1_CP15BEN (BIT(5)) -#define SCTLR_EL1_SA0 (BIT(4)) - -#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ - (BIT(29))) - #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) #else @@ -692,14 +532,17 @@ #endif #define INIT_SCTLR_EL1_MMU_OFF \ - (ENDIAN_SET_EL1 | SCTLR_EL1_RES1) + (ENDIAN_SET_EL1 | SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | \ + SCTLR_EL1_EIS | SCTLR_EL1_TSCXT | SCTLR_EL1_EOS) #define INIT_SCTLR_EL1_MMU_ON \ - (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \ - SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \ - SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ - SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \ - SCTLR_EL1_EPAN | SCTLR_EL1_RES1) + (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | \ + SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I | \ + SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_nTWE | \ + SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ + ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | \ + SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | SCTLR_EL1_EIS | \ + SCTLR_EL1_TSCXT | SCTLR_EL1_EOS) /* MAIR_ELx memory attributes (used by Linux) */ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) @@ -712,387 +555,68 @@ /* Position the attr at the correct index */ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) -/* id_aa64isar0 */ -#define ID_AA64ISAR0_RNDR_SHIFT 60 -#define ID_AA64ISAR0_TLB_SHIFT 56 -#define ID_AA64ISAR0_TS_SHIFT 52 -#define ID_AA64ISAR0_FHM_SHIFT 48 -#define ID_AA64ISAR0_DP_SHIFT 44 -#define ID_AA64ISAR0_SM4_SHIFT 40 -#define ID_AA64ISAR0_SM3_SHIFT 36 -#define ID_AA64ISAR0_SHA3_SHIFT 32 -#define ID_AA64ISAR0_RDM_SHIFT 28 -#define ID_AA64ISAR0_ATOMICS_SHIFT 20 -#define ID_AA64ISAR0_CRC32_SHIFT 16 -#define ID_AA64ISAR0_SHA2_SHIFT 12 -#define ID_AA64ISAR0_SHA1_SHIFT 8 -#define ID_AA64ISAR0_AES_SHIFT 4 - -#define ID_AA64ISAR0_TLB_RANGE_NI 0x0 -#define ID_AA64ISAR0_TLB_RANGE 0x2 - -/* id_aa64isar1 */ -#define ID_AA64ISAR1_I8MM_SHIFT 52 -#define ID_AA64ISAR1_DGH_SHIFT 48 -#define ID_AA64ISAR1_BF16_SHIFT 44 -#define ID_AA64ISAR1_SPECRES_SHIFT 40 -#define ID_AA64ISAR1_SB_SHIFT 36 -#define ID_AA64ISAR1_FRINTTS_SHIFT 32 -#define ID_AA64ISAR1_GPI_SHIFT 28 -#define ID_AA64ISAR1_GPA_SHIFT 24 -#define ID_AA64ISAR1_LRCPC_SHIFT 20 -#define ID_AA64ISAR1_FCMA_SHIFT 16 -#define ID_AA64ISAR1_JSCVT_SHIFT 12 -#define ID_AA64ISAR1_API_SHIFT 8 -#define ID_AA64ISAR1_APA_SHIFT 4 -#define ID_AA64ISAR1_DPB_SHIFT 0 - -#define ID_AA64ISAR1_APA_NI 0x0 -#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2 -#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3 -#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4 -#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5 -#define ID_AA64ISAR1_API_NI 0x0 -#define ID_AA64ISAR1_API_IMP_DEF 0x1 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5 -#define ID_AA64ISAR1_GPA_NI 0x0 -#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_GPI_NI 0x0 -#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 - /* id_aa64pfr0 */ -#define ID_AA64PFR0_CSV3_SHIFT 60 -#define ID_AA64PFR0_CSV2_SHIFT 56 -#define ID_AA64PFR0_DIT_SHIFT 48 -#define ID_AA64PFR0_AMU_SHIFT 44 -#define ID_AA64PFR0_MPAM_SHIFT 40 -#define ID_AA64PFR0_SEL2_SHIFT 36 -#define ID_AA64PFR0_SVE_SHIFT 32 -#define ID_AA64PFR0_RAS_SHIFT 28 -#define ID_AA64PFR0_GIC_SHIFT 24 -#define ID_AA64PFR0_ASIMD_SHIFT 20 -#define ID_AA64PFR0_FP_SHIFT 16 -#define ID_AA64PFR0_EL3_SHIFT 12 -#define ID_AA64PFR0_EL2_SHIFT 8 -#define ID_AA64PFR0_EL1_SHIFT 4 -#define ID_AA64PFR0_EL0_SHIFT 0 - -#define ID_AA64PFR0_AMU 0x1 -#define ID_AA64PFR0_SVE 0x1 -#define ID_AA64PFR0_RAS_V1 0x1 -#define ID_AA64PFR0_RAS_V1P1 0x2 -#define ID_AA64PFR0_FP_NI 0xf -#define ID_AA64PFR0_FP_SUPPORTED 0x0 -#define ID_AA64PFR0_ASIMD_NI 0xf -#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 -#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1 -#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2 - -/* id_aa64pfr1 */ -#define ID_AA64PFR1_MPAMFRAC_SHIFT 16 -#define ID_AA64PFR1_RASFRAC_SHIFT 12 -#define ID_AA64PFR1_MTE_SHIFT 8 -#define ID_AA64PFR1_SSBS_SHIFT 4 -#define ID_AA64PFR1_BT_SHIFT 0 - -#define ID_AA64PFR1_SSBS_PSTATE_NI 0 -#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 -#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 -#define ID_AA64PFR1_BT_BTI 0x1 - -#define ID_AA64PFR1_MTE_NI 0x0 -#define ID_AA64PFR1_MTE_EL0 0x1 -#define ID_AA64PFR1_MTE 0x2 - -/* id_aa64zfr0 */ -#define ID_AA64ZFR0_F64MM_SHIFT 56 -#define ID_AA64ZFR0_F32MM_SHIFT 52 -#define ID_AA64ZFR0_I8MM_SHIFT 44 -#define ID_AA64ZFR0_SM4_SHIFT 40 -#define ID_AA64ZFR0_SHA3_SHIFT 32 -#define ID_AA64ZFR0_BF16_SHIFT 20 -#define ID_AA64ZFR0_BITPERM_SHIFT 16 -#define ID_AA64ZFR0_AES_SHIFT 4 -#define ID_AA64ZFR0_SVEVER_SHIFT 0 - -#define ID_AA64ZFR0_F64MM 0x1 -#define ID_AA64ZFR0_F32MM 0x1 -#define ID_AA64ZFR0_I8MM 0x1 -#define ID_AA64ZFR0_BF16 0x1 -#define ID_AA64ZFR0_SM4 0x1 -#define ID_AA64ZFR0_SHA3 0x1 -#define ID_AA64ZFR0_BITPERM 0x1 -#define ID_AA64ZFR0_AES 0x1 -#define ID_AA64ZFR0_AES_PMULL 0x2 -#define ID_AA64ZFR0_SVEVER_SVE2 0x1 +#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 +#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 /* id_aa64mmfr0 */ -#define ID_AA64MMFR0_ECV_SHIFT 60 -#define ID_AA64MMFR0_FGT_SHIFT 56 -#define ID_AA64MMFR0_EXS_SHIFT 44 -#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 -#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 -#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 -#define ID_AA64MMFR0_TGRAN4_SHIFT 28 -#define ID_AA64MMFR0_TGRAN64_SHIFT 24 -#define ID_AA64MMFR0_TGRAN16_SHIFT 20 -#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 -#define ID_AA64MMFR0_SNSMEM_SHIFT 12 -#define ID_AA64MMFR0_BIGENDEL_SHIFT 8 -#define ID_AA64MMFR0_ASID_SHIFT 4 -#define ID_AA64MMFR0_PARANGE_SHIFT 0 - -#define ID_AA64MMFR0_ASID_8 0x0 -#define ID_AA64MMFR0_ASID_16 0x2 - -#define ID_AA64MMFR0_TGRAN4_NI 0xf -#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0 -#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7 -#define ID_AA64MMFR0_TGRAN64_NI 0xf -#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0 -#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7 -#define ID_AA64MMFR0_TGRAN16_NI 0x0 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf - -#define ID_AA64MMFR0_PARANGE_32 0x0 -#define ID_AA64MMFR0_PARANGE_36 0x1 -#define ID_AA64MMFR0_PARANGE_40 0x2 -#define ID_AA64MMFR0_PARANGE_42 0x3 -#define ID_AA64MMFR0_PARANGE_44 0x4 -#define ID_AA64MMFR0_PARANGE_48 0x5 -#define ID_AA64MMFR0_PARANGE_52 0x6 +#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1 +#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf #define ARM64_MIN_PARANGE_BITS 32 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7 #ifdef CONFIG_ARM64_PA_BITS_52 -#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 +#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52 #else -#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48 +#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48 #endif -/* id_aa64mmfr1 */ -#define ID_AA64MMFR1_ETS_SHIFT 36 -#define ID_AA64MMFR1_TWED_SHIFT 32 -#define ID_AA64MMFR1_XNX_SHIFT 28 -#define ID_AA64MMFR1_SPECSEI_SHIFT 24 -#define ID_AA64MMFR1_PAN_SHIFT 20 -#define ID_AA64MMFR1_LOR_SHIFT 16 -#define ID_AA64MMFR1_HPD_SHIFT 12 -#define ID_AA64MMFR1_VHE_SHIFT 8 -#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 -#define ID_AA64MMFR1_HADBS_SHIFT 0 - -#define ID_AA64MMFR1_VMIDBITS_8 0 -#define ID_AA64MMFR1_VMIDBITS_16 2 - -/* id_aa64mmfr2 */ -#define ID_AA64MMFR2_E0PD_SHIFT 60 -#define ID_AA64MMFR2_EVT_SHIFT 56 -#define ID_AA64MMFR2_BBM_SHIFT 52 -#define ID_AA64MMFR2_TTL_SHIFT 48 -#define ID_AA64MMFR2_FWB_SHIFT 40 -#define ID_AA64MMFR2_IDS_SHIFT 36 -#define ID_AA64MMFR2_AT_SHIFT 32 -#define ID_AA64MMFR2_ST_SHIFT 28 -#define ID_AA64MMFR2_NV_SHIFT 24 -#define ID_AA64MMFR2_CCIDX_SHIFT 20 -#define ID_AA64MMFR2_LVA_SHIFT 16 -#define ID_AA64MMFR2_IESB_SHIFT 12 -#define ID_AA64MMFR2_LSM_SHIFT 8 -#define ID_AA64MMFR2_UAO_SHIFT 4 -#define ID_AA64MMFR2_CNP_SHIFT 0 - -/* id_aa64dfr0 */ -#define ID_AA64DFR0_MTPMU_SHIFT 48 -#define ID_AA64DFR0_TRBE_SHIFT 44 -#define ID_AA64DFR0_TRACE_FILT_SHIFT 40 -#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 -#define ID_AA64DFR0_PMSVER_SHIFT 32 -#define ID_AA64DFR0_CTX_CMPS_SHIFT 28 -#define ID_AA64DFR0_WRPS_SHIFT 20 -#define ID_AA64DFR0_BRPS_SHIFT 12 -#define ID_AA64DFR0_PMUVER_SHIFT 8 -#define ID_AA64DFR0_TRACEVER_SHIFT 4 -#define ID_AA64DFR0_DEBUGVER_SHIFT 0 - -#define ID_AA64DFR0_PMUVER_8_0 0x1 -#define ID_AA64DFR0_PMUVER_8_1 0x4 -#define ID_AA64DFR0_PMUVER_8_4 0x5 -#define ID_AA64DFR0_PMUVER_8_5 0x6 -#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf - -#define ID_AA64DFR0_PMSVER_8_2 0x1 -#define ID_AA64DFR0_PMSVER_8_3 0x2 - -#define ID_DFR0_PERFMON_SHIFT 24 - -#define ID_DFR0_PERFMON_8_0 0x3 -#define ID_DFR0_PERFMON_8_1 0x4 -#define ID_DFR0_PERFMON_8_4 0x5 -#define ID_DFR0_PERFMON_8_5 0x6 - -#define ID_ISAR4_SWP_FRAC_SHIFT 28 -#define ID_ISAR4_PSR_M_SHIFT 24 -#define ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT 20 -#define ID_ISAR4_BARRIER_SHIFT 16 -#define ID_ISAR4_SMC_SHIFT 12 -#define ID_ISAR4_WRITEBACK_SHIFT 8 -#define ID_ISAR4_WITHSHIFTS_SHIFT 4 -#define ID_ISAR4_UNPRIV_SHIFT 0 - -#define ID_DFR1_MTPMU_SHIFT 0 - -#define ID_ISAR0_DIVIDE_SHIFT 24 -#define ID_ISAR0_DEBUG_SHIFT 20 -#define ID_ISAR0_COPROC_SHIFT 16 -#define ID_ISAR0_CMPBRANCH_SHIFT 12 -#define ID_ISAR0_BITFIELD_SHIFT 8 -#define ID_ISAR0_BITCOUNT_SHIFT 4 -#define ID_ISAR0_SWAP_SHIFT 0 - -#define ID_ISAR5_RDM_SHIFT 24 -#define ID_ISAR5_CRC32_SHIFT 16 -#define ID_ISAR5_SHA2_SHIFT 12 -#define ID_ISAR5_SHA1_SHIFT 8 -#define ID_ISAR5_AES_SHIFT 4 -#define ID_ISAR5_SEVL_SHIFT 0 - -#define ID_ISAR6_I8MM_SHIFT 24 -#define ID_ISAR6_BF16_SHIFT 20 -#define ID_ISAR6_SPECRES_SHIFT 16 -#define ID_ISAR6_SB_SHIFT 12 -#define ID_ISAR6_FHM_SHIFT 8 -#define ID_ISAR6_DP_SHIFT 4 -#define ID_ISAR6_JSCVT_SHIFT 0 - -#define ID_MMFR0_INNERSHR_SHIFT 28 -#define ID_MMFR0_FCSE_SHIFT 24 -#define ID_MMFR0_AUXREG_SHIFT 20 -#define ID_MMFR0_TCM_SHIFT 16 -#define ID_MMFR0_SHARELVL_SHIFT 12 -#define ID_MMFR0_OUTERSHR_SHIFT 8 -#define ID_MMFR0_PMSA_SHIFT 4 -#define ID_MMFR0_VMSA_SHIFT 0 - -#define ID_MMFR4_EVT_SHIFT 28 -#define ID_MMFR4_CCIDX_SHIFT 24 -#define ID_MMFR4_LSM_SHIFT 20 -#define ID_MMFR4_HPDS_SHIFT 16 -#define ID_MMFR4_CNP_SHIFT 12 -#define ID_MMFR4_XNX_SHIFT 8 -#define ID_MMFR4_AC2_SHIFT 4 -#define ID_MMFR4_SPECSEI_SHIFT 0 - -#define ID_MMFR5_ETS_SHIFT 0 - -#define ID_PFR0_DIT_SHIFT 24 -#define ID_PFR0_CSV2_SHIFT 16 -#define ID_PFR0_STATE3_SHIFT 12 -#define ID_PFR0_STATE2_SHIFT 8 -#define ID_PFR0_STATE1_SHIFT 4 -#define ID_PFR0_STATE0_SHIFT 0 - -#define ID_DFR0_PERFMON_SHIFT 24 -#define ID_DFR0_MPROFDBG_SHIFT 20 -#define ID_DFR0_MMAPTRC_SHIFT 16 -#define ID_DFR0_COPTRC_SHIFT 12 -#define ID_DFR0_MMAPDBG_SHIFT 8 -#define ID_DFR0_COPSDBG_SHIFT 4 -#define ID_DFR0_COPDBG_SHIFT 0 - -#define ID_PFR2_SSBS_SHIFT 4 -#define ID_PFR2_CSV3_SHIFT 0 - -#define MVFR0_FPROUND_SHIFT 28 -#define MVFR0_FPSHVEC_SHIFT 24 -#define MVFR0_FPSQRT_SHIFT 20 -#define MVFR0_FPDIVIDE_SHIFT 16 -#define MVFR0_FPTRAP_SHIFT 12 -#define MVFR0_FPDP_SHIFT 8 -#define MVFR0_FPSP_SHIFT 4 -#define MVFR0_SIMD_SHIFT 0 - -#define MVFR1_SIMDFMAC_SHIFT 28 -#define MVFR1_FPHP_SHIFT 24 -#define MVFR1_SIMDHP_SHIFT 20 -#define MVFR1_SIMDSP_SHIFT 16 -#define MVFR1_SIMDINT_SHIFT 12 -#define MVFR1_SIMDLS_SHIFT 8 -#define MVFR1_FPDNAN_SHIFT 4 -#define MVFR1_FPFTZ_SHIFT 0 - -#define ID_PFR1_GIC_SHIFT 28 -#define ID_PFR1_VIRT_FRAC_SHIFT 24 -#define ID_PFR1_SEC_FRAC_SHIFT 20 -#define ID_PFR1_GENTIMER_SHIFT 16 -#define ID_PFR1_VIRTUALIZATION_SHIFT 12 -#define ID_PFR1_MPROGMOD_SHIFT 8 -#define ID_PFR1_SECURITY_SHIFT 4 -#define ID_PFR1_PROGMOD_SHIFT 0 - #if defined(CONFIG_ARM64_4K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX -#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX +#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT #elif defined(CONFIG_ARM64_16K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX -#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX +#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT #elif defined(CONFIG_ARM64_64K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX -#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN64_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX +#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT #endif -#define MVFR2_FPMISC_SHIFT 4 -#define MVFR2_SIMDMISC_SHIFT 0 - -#define DCZID_DZP_SHIFT 4 -#define DCZID_BS_SHIFT 0 +#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */ +#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */ -/* - * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which - * are reserved by the SVE architecture for future expansion of the LEN - * field, with compatible semantics. - */ -#define ZCR_ELx_LEN_SHIFT 0 -#define ZCR_ELx_LEN_SIZE 9 -#define ZCR_ELx_LEN_MASK 0x1ff +#define CPACR_EL1_SMEN_EL1EN (BIT(24)) /* enable EL1 access */ +#define CPACR_EL1_SMEN_EL0EN (BIT(25)) /* enable EL0 access, if EL1EN set */ #define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */ #define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */ -#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) - -/* TCR EL1 Bit Definitions */ -#define SYS_TCR_EL1_TCMA1 (BIT(58)) -#define SYS_TCR_EL1_TCMA0 (BIT(57)) /* GCR_EL1 Definitions */ #define SYS_GCR_EL1_RRND (BIT(16)) #define SYS_GCR_EL1_EXCL_MASK 0xffffUL +#define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL) + /* RGSR_EL1 Definitions */ #define SYS_RGSR_EL1_TAG_MASK 0xfUL #define SYS_RGSR_EL1_SEED_SHIFT 8 #define SYS_RGSR_EL1_SEED_MASK 0xffffUL -/* GMID_EL1 field definitions */ -#define SYS_GMID_EL1_BS_SHIFT 0 -#define SYS_GMID_EL1_BS_SIZE 4 - /* TFSR{,E0}_EL1 bit definitions */ #define SYS_TFSR_EL1_TF0_SHIFT 0 #define SYS_TFSR_EL1_TF1_SHIFT 1 @@ -1103,6 +627,7 @@ #define SYS_MPIDR_SAFE_VAL (BIT(31)) #define TRFCR_ELx_TS_SHIFT 5 +#define TRFCR_ELx_TS_MASK ((0x3UL) << TRFCR_ELx_TS_SHIFT) #define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT) #define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT) #define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT) @@ -1110,7 +635,6 @@ #define TRFCR_ELx_ExTRE BIT(1) #define TRFCR_ELx_E0TRE BIT(0) - /* GIC Hypervisor interface registers */ /* ICH_MISR_EL2 bit definitions */ #define ICH_MISR_EOI (1 << 0) @@ -1137,6 +661,7 @@ #define ICH_HCR_TC (1 << 10) #define ICH_HCR_TALL0 (1 << 11) #define ICH_HCR_TALL1 (1 << 12) +#define ICH_HCR_TDIR (1 << 14) #define ICH_HCR_EOIcount_SHIFT 27 #define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) @@ -1169,49 +694,60 @@ #define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) #define ICH_VTR_A3V_SHIFT 21 #define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) +#define ICH_VTR_TDS_SHIFT 19 +#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT) + +/* + * Permission Indirection Extension (PIE) permission encodings. + * Encodings with the _O suffix, have overlays applied (Permission Overlay Extension). + */ +#define PIE_NONE_O 0x0 +#define PIE_R_O 0x1 +#define PIE_X_O 0x2 +#define PIE_RX_O 0x3 +#define PIE_RW_O 0x5 +#define PIE_RWnX_O 0x6 +#define PIE_RWX_O 0x7 +#define PIE_R 0x8 +#define PIE_GCS 0x9 +#define PIE_RX 0xa +#define PIE_RW 0xc +#define PIE_RWX 0xe + +#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4)) #define ARM64_FEATURE_FIELD_BITS 4 -/* Create a mask for the feature bits of the specified feature. */ -#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT)) +/* Defined for compatibility only, do not add new users. */ +#define ARM64_FEATURE_MASK(x) (x##_MASK) #ifdef __ASSEMBLY__ - .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 - .equ .L__reg_num_x\num, \num - .endr - .equ .L__reg_num_xzr, 31 - .macro mrs_s, rt, sreg - __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt)) + __emit_inst(0xd5200000|(\sreg)|(.L__gpr_num_\rt)) .endm .macro msr_s, sreg, rt - __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt)) + __emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt)) .endm #else +#include #include #include #include -#define __DEFINE_MRS_MSR_S_REGNUM \ -" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ -" .equ .L__reg_num_x\\num, \\num\n" \ -" .endr\n" \ -" .equ .L__reg_num_xzr, 31\n" - #define DEFINE_MRS_S \ - __DEFINE_MRS_MSR_S_REGNUM \ + __DEFINE_ASM_GPR_NUMS \ " .macro mrs_s, rt, sreg\n" \ - __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \ + __emit_inst(0xd5200000|(\\sreg)|(.L__gpr_num_\\rt)) \ " .endm\n" #define DEFINE_MSR_S \ - __DEFINE_MRS_MSR_S_REGNUM \ + __DEFINE_ASM_GPR_NUMS \ " .macro msr_s, sreg, rt\n" \ - __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \ + __emit_inst(0xd5000000|(\\sreg)|(.L__gpr_num_\\rt)) \ " .endm\n" #define UNDEFINE_MRS_S \ @@ -1291,6 +827,15 @@ par; \ }) +#define SYS_FIELD_GET(reg, field, val) \ + FIELD_GET(reg##_##field##_MASK, val) + +#define SYS_FIELD_PREP(reg, field, val) \ + FIELD_PREP(reg##_##field##_MASK, val) + +#define SYS_FIELD_PREP_ENUM(reg, field, val) \ + FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val) + #endif #endif /* __ASM_SYSREG_H */ diff --git a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c index b90580840b22..8e5bd07a3727 100644 --- a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c +++ b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c @@ -146,8 +146,8 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu) vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val); - el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), val); - return el0 == ID_AA64PFR0_ELx_64BIT_ONLY; + el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); + return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY; } int main(void) diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index f5b6cb3a0019..866002917441 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -116,12 +116,12 @@ static void reset_debug_state(void) /* Reset all bcr/bvr/wcr/wvr registers */ dfr0 = read_sysreg(id_aa64dfr0_el1); - brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), dfr0); + brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0); for (i = 0; i <= brps; i++) { write_dbgbcr(i, 0); write_dbgbvr(i, 0); } - wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), dfr0); + wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0); for (i = 0; i <= wrps; i++) { write_dbgwcr(i, 0); write_dbgwvr(i, 0); @@ -418,7 +418,7 @@ static void guest_code_ss(int test_cnt) static int debug_version(uint64_t id_aa64dfr0) { - return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), id_aa64dfr0); + return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0); } static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn) @@ -539,14 +539,14 @@ void test_guest_debug_exceptions_all(uint64_t aa64dfr0) int b, w, c; /* Number of breakpoints */ - brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), aa64dfr0) + 1; + brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1; __TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required"); /* Number of watchpoints */ - wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), aa64dfr0) + 1; + wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1; /* Number of context aware breakpoints */ - ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_CTX_CMPS), aa64dfr0) + 1; + ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1; pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__, brp_num, wrp_num, ctx_brp_num); diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index 47bb914ab2fa..975d28be3cca 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -96,14 +96,14 @@ static bool guest_check_lse(void) uint64_t isar0 = read_sysreg(id_aa64isar0_el1); uint64_t atomic; - atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS), isar0); + atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0); return atomic >= 2; } static bool guest_check_dc_zva(void) { uint64_t dczid = read_sysreg(dczid_el0); - uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_DZP), dczid); + uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid); return dzp == 0; } @@ -196,7 +196,7 @@ static bool guest_set_ha(void) uint64_t hadbs, tcr; /* Skip if HA is not supported. */ - hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS), mmfr1); + hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1); if (hadbs == 0) return false; diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 3a0259e25335..6fe12e985ba5 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -518,9 +518,9 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®); TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd)); - *ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN4), val) != 0xf; - *ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN64), val) == 0; - *ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN16), val) != 0; + *ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val) != 0xf; + *ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val) == 0; + *ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val) != 0; close(vcpu_fd); close(vm_fd); -- Gitee From a342d7f2215493de73506870ef08bcfcb41e3655 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 11 Oct 2023 19:57:36 +0000 Subject: [PATCH 10/75] tools: arm64: Add a Makefile for generating sysreg-defs.h ANBZ: #25252 commit 02e85f74668e1aa0062708b58d8aef020054e56c upstream. Use a common Makefile for generating sysreg-defs.h, which will soon be needed by perf and KVM selftests. The naming scheme of the generated macros is not expected to change, so just refer to the canonical script/data in the kernel source rather than copying to tools. Co-developed-by: Jing Zhang Signed-off-by: Jing Zhang Reviewed-by: Mark Brown Reviewed-by: Eric Auger Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231011195740.3349631-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- tools/arch/arm64/include/.gitignore | 1 + tools/arch/arm64/tools/Makefile | 38 +++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 tools/arch/arm64/include/.gitignore create mode 100644 tools/arch/arm64/tools/Makefile diff --git a/tools/arch/arm64/include/.gitignore b/tools/arch/arm64/include/.gitignore new file mode 100644 index 000000000000..9ab870da897d --- /dev/null +++ b/tools/arch/arm64/include/.gitignore @@ -0,0 +1 @@ +generated/ diff --git a/tools/arch/arm64/tools/Makefile b/tools/arch/arm64/tools/Makefile new file mode 100644 index 000000000000..f867e6036c62 --- /dev/null +++ b/tools/arch/arm64/tools/Makefile @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0 + +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(CURDIR))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +endif + +include $(srctree)/tools/scripts/Makefile.include + +AWK ?= awk +MKDIR ?= mkdir +RM ?= rm + +ifeq ($(V),1) +Q = +else +Q = @ +endif + +arm64_tools_dir = $(srctree)/arch/arm64/tools +arm64_sysreg_tbl = $(arm64_tools_dir)/sysreg +arm64_gen_sysreg = $(arm64_tools_dir)/gen-sysreg.awk +arm64_generated_dir = $(srctree)/tools/arch/arm64/include/generated +arm64_sysreg_defs = $(arm64_generated_dir)/asm/sysreg-defs.h + +all: $(arm64_sysreg_defs) + @: + +$(arm64_sysreg_defs): $(arm64_gen_sysreg) $(arm64_sysreg_tbl) + $(Q)$(MKDIR) -p $(dir $@) + $(QUIET_GEN)$(AWK) -f $^ > $@ + +clean: + $(Q)$(RM) -rf $(arm64_generated_dir) + +.PHONY: all clean -- Gitee From ac0519fea970945934a94dbb20910b9591c10601 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 11 Oct 2023 19:57:37 +0000 Subject: [PATCH 11/75] perf build: Generate arm64's sysreg-defs.h and add to include path ANBZ: #25252 commit e2bdd172e6652c2f5554d125a5048bc9f9b0dfa3 upstream. Start generating sysreg-defs.h in anticipation of updating sysreg.h to a version that needs the generated output. Acked-by: Arnaldo Carvalho de Melo Acked-by: Namhyung Kim Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231011195740.3349631-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- tools/perf/Makefile.perf | 15 +++++++++++++-- tools/perf/util/Build | 2 +- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index ff72a0d9dea0..22ac49001915 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -472,6 +472,15 @@ drm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/drm_ioctl.sh # Create output directory if not already present _dummy := $(shell [ -d '$(beauty_ioctl_outdir)' ] || mkdir -p '$(beauty_ioctl_outdir)') +arm64_gen_sysreg_dir := $(srctree)/tools/arch/arm64/tools + +arm64-sysreg-defs: FORCE + $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) + +arm64-sysreg-defs-clean: + $(call QUIET_CLEAN,arm64-sysreg-defs) + $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) clean > /dev/null + $(drm_ioctl_array): $(drm_hdr_dir)/drm.h $(drm_hdr_dir)/i915_drm.h $(drm_ioctl_tbl) $(Q)$(SHELL) '$(drm_ioctl_tbl)' $(drm_hdr_dir) > $@ @@ -745,7 +754,9 @@ endif __build-dir = $(subst $(OUTPUT),,$(dir $@)) build-dir = $(or $(__build-dir),.) -prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioctl_array) \ +prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders \ + arm64-sysreg-defs \ + $(drm_ioctl_array) \ $(fadvise_advice_array) \ $(fsconfig_arrays) \ $(fsmount_arrays) \ @@ -1154,7 +1165,7 @@ endif # BUILD_BPF_SKEL bpf-skel-clean: $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) $(SKEL_OUT)/vmlinux.h -clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean +clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean arm64-sysreg-defs-clean fixdep-clean python-clean bpf-skel-clean $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS) $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete $(Q)$(RM) $(OUTPUT).config-detected diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 89a051732e87..244bd568cd19 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -346,7 +346,7 @@ CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ET CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" CFLAGS_header.o += -include $(OUTPUT)PERF-VERSION-FILE -CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/ +CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/ -I$(srctree)/tools/arch/arm64/include/generated/ $(OUTPUT)util/argv_split.o: ../lib/argv_split.c FORCE $(call rule_mkdir) -- Gitee From 20cd353bb2fefd50aeb87c3b3d1d4258cac7ab33 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 11 Oct 2023 19:57:38 +0000 Subject: [PATCH 12/75] KVM: selftests: Generate sysreg-defs.h and add to include path ANBZ: #25252 commit 9697d84cc3b6d9bff4b1fbffc10a4bb1398af9ba upstream. Start generating sysreg-defs.h for arm64 builds in anticipation of updating sysreg.h to a version that depends on it. Reviewed-by: Mark Brown Reviewed-by: Eric Auger Tested-by: Eric Auger Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231011195740.3349631-4-oliver.upton@linux.dev Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- tools/testing/selftests/kvm/Makefile | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b886931408db..78e1bb5973a0 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -17,6 +17,17 @@ else ARCH_DIR := $(ARCH) endif +ifeq ($(ARCH),arm64) +arm64_tools_dir := $(top_srcdir)/tools/arch/arm64/tools/ +GEN_HDRS := $(top_srcdir)/tools/arch/arm64/include/generated/ +CFLAGS += -I$(GEN_HDRS) + +prepare: + $(MAKE) -C $(arm64_tools_dir) +else +prepare: +endif + LIBKVM += lib/assert.c LIBKVM += lib/elf.c LIBKVM += lib/guest_modes.c @@ -257,13 +268,18 @@ $(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c $(SPLIT_TESTS_TARGETS): %: %.o $(SPLIT_TESTS_OBJS) $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@ -EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) $(SPLIT_TESTS_OBJS) cscope.* +EXTRA_CLEAN += $(GEN_HDRS) \ + $(LIBKVM_OBJS) \ + $(SPLIT_TESTS_OBJS) \ + $(TEST_DEP_FILES) \ + $(TEST_GEN_OBJ) \ + cscope.* x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)))) -$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c +$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c prepare $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ -$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S +$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S prepare $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ # Compile the string overrides as freestanding to prevent the compiler from @@ -275,6 +291,7 @@ $(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS)))) $(TEST_GEN_PROGS): $(LIBKVM_OBJS) $(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS) +$(TEST_GEN_OBJ): prepare cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib .. cscope: -- Gitee From 4654c68b1d6d0aabff8eec51f5b838cda7739358 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 21 Nov 2023 19:29:55 +0000 Subject: [PATCH 13/75] tools perf: Add arm64 sysreg files to MANIFEST ANBZ: #25252 commit ef5c958090a909c9f2ab717ba6abb86869e42da7 upstream. Ian pointed out that source tarballs are incomplete as of commit e2bdd172e665 ("perf build: Generate arm64's sysreg-defs.h and add to include path"), since the source files needed from the kernel tree do not appear in the manifest. Add them. Reported-by: Ian Rogers Fixes: e2bdd172e665 ("perf build: Generate arm64's sysreg-defs.h and add to include path") Signed-off-by: Oliver Upton Link: https://lore.kernel.org/r/20231121192956.919380-2-oliver.upton@linux.dev Signed-off-by: Namhyung Kim Signed-off-by: Wei Chen --- tools/perf/MANIFEST | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 1da7f4b91b4f..dc42de1785ce 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -1,3 +1,5 @@ +arch/arm64/tools/gen-sysreg.awk +arch/arm64/tools/sysreg tools/perf tools/arch tools/scripts -- Gitee From 69c7bc75f1ffaf94dcf386c24f3b997a518df6a7 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 21 Nov 2023 19:29:56 +0000 Subject: [PATCH 14/75] perf build: Ensure sysreg-defs Makefile respects output dir ANBZ: #25252 commit a29ee6aea7030786a63fde0d6d83a8f477b060fb upstream. Currently the sysreg-defs are written out to the source tree unconditionally, ignoring the specified output directory. Correct the build rule to emit the header to the output directory. Opportunistically reorganize the rules to avoid interleaving with the set of beauty make rules. Reported-by: Ian Rogers Signed-off-by: Oliver Upton Link: https://lore.kernel.org/r/20231121192956.919380-3-oliver.upton@linux.dev Signed-off-by: Namhyung Kim Signed-off-by: Wei Chen --- tools/perf/Makefile.perf | 24 +++++++++++++++--------- tools/perf/util/Build | 2 +- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 22ac49001915..f0e7591bc459 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -456,6 +456,21 @@ export INSTALL SHELL_PATH SHELL = $(SHELL_PATH) +arm64_gen_sysreg_dir := $(srctree)/tools/arch/arm64/tools +ifneq ($(OUTPUT),) + arm64_gen_sysreg_outdir := $(OUTPUT) +else + arm64_gen_sysreg_outdir := $(CURDIR) +endif + +arm64-sysreg-defs: FORCE + $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) + +arm64-sysreg-defs-clean: + $(call QUIET_CLEAN,arm64-sysreg-defs) + $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) \ + clean > /dev/null + beauty_linux_dir := $(srctree)/tools/perf/trace/beauty/include/linux/ linux_uapi_dir := $(srctree)/tools/include/uapi/linux asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic @@ -472,15 +487,6 @@ drm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/drm_ioctl.sh # Create output directory if not already present _dummy := $(shell [ -d '$(beauty_ioctl_outdir)' ] || mkdir -p '$(beauty_ioctl_outdir)') -arm64_gen_sysreg_dir := $(srctree)/tools/arch/arm64/tools - -arm64-sysreg-defs: FORCE - $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) - -arm64-sysreg-defs-clean: - $(call QUIET_CLEAN,arm64-sysreg-defs) - $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) clean > /dev/null - $(drm_ioctl_array): $(drm_hdr_dir)/drm.h $(drm_hdr_dir)/i915_drm.h $(drm_ioctl_tbl) $(Q)$(SHELL) '$(drm_ioctl_tbl)' $(drm_hdr_dir) > $@ diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 244bd568cd19..2187f81a4891 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -346,7 +346,7 @@ CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ET CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" CFLAGS_header.o += -include $(OUTPUT)PERF-VERSION-FILE -CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/ -I$(srctree)/tools/arch/arm64/include/generated/ +CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/ -I$(OUTPUT)arch/arm64/include/generated/ $(OUTPUT)util/argv_split.o: ../lib/argv_split.c FORCE $(call rule_mkdir) -- Gitee From e9eb33ecbd4a2393b880e5e69c4a0ca95d7b4db3 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:41 +0000 Subject: [PATCH 15/75] KVM: arm64: PMU: Introduce helpers to set the guest's PMU ANBZ: #25252 commit 1616ca6f3c10723c1b60ae44724212fae88f502d upstream. Introduce new helper functions to set the guest's PMU (kvm->arch.arm_pmu) either to a default probed instance or to a caller requested one, and use it when the guest's PMU needs to be set. These helpers will make it easier for the following patches to modify the relevant code. No functional change intended. Reviewed-by: Sebastian Ott Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-2-rananta@google.com Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- arch/arm64/kvm/pmu-emul.c | 50 +++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 05e20825a59a..28a4566d2e0f 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -841,6 +841,36 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) return true; } +static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) +{ + lockdep_assert_held(&kvm->arch.config_lock); + + kvm->arch.arm_pmu = arm_pmu; +} + +/** + * kvm_arm_set_default_pmu - No PMU set, get the default one. + * @kvm: The kvm pointer + * + * The observant among you will notice that the supported_cpus + * mask does not get updated for the default PMU even though it + * is quite possible the selected instance supports only a + * subset of cores in the system. This is intentional, and + * upholds the preexisting behavior on heterogeneous systems + * where vCPUs can be scheduled on any core but the guest + * counters could stop working. + */ +static int kvm_arm_set_default_pmu(struct kvm *kvm) +{ + struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu(); + + if (!arm_pmu) + return -ENODEV; + + kvm_arm_set_pmu(kvm, arm_pmu); + return 0; +} + static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) { struct kvm *kvm = vcpu->kvm; @@ -860,7 +890,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) break; } - kvm->arch.arm_pmu = arm_pmu; + kvm_arm_set_pmu(kvm, arm_pmu); cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); ret = 0; break; @@ -884,20 +914,10 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return -EBUSY; if (!kvm->arch.arm_pmu) { - /* - * No PMU set, get the default one. - * - * The observant among you will notice that the supported_cpus - * mask does not get updated for the default PMU even though it - * is quite possible the selected instance supports only a - * subset of cores in the system. This is intentional, and - * upholds the preexisting behavior on heterogeneous systems - * where vCPUs can be scheduled on any core but the guest - * counters could stop working. - */ - kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); - if (!kvm->arch.arm_pmu) - return -ENODEV; + int ret = kvm_arm_set_default_pmu(kvm); + + if (ret) + return ret; } switch (attr->attr) { -- Gitee From 0dcf9ca8172b9869efcd2faa6060b3ce4e75366a Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:42 +0000 Subject: [PATCH 16/75] KVM: arm64: Select default PMU in KVM_ARM_VCPU_INIT handler ANBZ: #25252 commit 427733579744ef22ee6d0da9907560d79d937458 upstream. Future changes to KVM's sysreg emulation will rely on having a valid PMU instance to determine the number of implemented counters (PMCR_EL0.N). This is earlier than when userspace is expected to modify the vPMU device attributes, where the default is selected today. Select the default PMU when handling KVM_ARM_VCPU_INIT such that it is available in time for sysreg emulation. Reviewed-by: Sebastian Ott Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Link: https://lore.kernel.org/r/20231020214053.2144305-3-rananta@google.com [Oliver: rewrite changelog] Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- arch/arm64/kvm/arm.c | 19 +++++++++++++++++++ arch/arm64/kvm/pmu-emul.c | 16 ++++------------ include/kvm/arm_pmu.h | 6 ++++++ 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 087e15406822..8a3d3e29a729 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1241,6 +1241,21 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); } +static int kvm_setup_vcpu(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + int ret = 0; + + /* + * When the vCPU has a PMU, but no PMU is set for the guest + * yet, set the default one. + */ + if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) + ret = kvm_arm_set_default_pmu(kvm); + + return ret; +} + static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init) { @@ -1256,6 +1271,10 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); + ret = kvm_setup_vcpu(vcpu); + if (ret) + goto out_unlock; + /* Now we know what it is, we can reset it. */ ret = kvm_reset_vcpu(vcpu); if (ret) { diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 28a4566d2e0f..21319b0faaaa 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -684,10 +684,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) * It is still necessary to get a valid cpu, though, to probe for the * default PMU instance as userspace is not required to specify a PMU * type. In order to uphold the preexisting behavior KVM selects the - * PMU instance for the core where the first call to the - * KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case - * would be a user with disdain of all things big.LITTLE that affines - * the VMM to a particular cluster of cores. + * PMU instance for the core during vcpu init. A dependent use + * case would be a user with disdain of all things big.LITTLE that + * affines the VMM to a particular cluster of cores. * * In any case, userspace should just do the sane thing and use the UAPI * to select a PMU type directly. But, be wary of the baggage being @@ -860,7 +859,7 @@ static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) * where vCPUs can be scheduled on any core but the guest * counters could stop working. */ -static int kvm_arm_set_default_pmu(struct kvm *kvm) +int kvm_arm_set_default_pmu(struct kvm *kvm) { struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu(); @@ -913,13 +912,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) if (vcpu->arch.pmu.created) return -EBUSY; - if (!kvm->arch.arm_pmu) { - int ret = kvm_arm_set_default_pmu(kvm); - - if (ret) - return ret; - } - switch (attr->attr) { case KVM_ARM_VCPU_PMU_V3_IRQ: { int __user *uaddr = (int __user *)(long)attr->addr; diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 9ff5c8dcb1e3..7acf753b480b 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -104,6 +104,7 @@ void kvm_vcpu_pmu_resync_el0(void); }) u8 kvm_arm_pmu_get_pmuver_limit(void); +int kvm_arm_set_default_pmu(struct kvm *kvm); #else struct kvm_pmu { @@ -177,6 +178,11 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void) } static inline void kvm_vcpu_pmu_resync_el0(void) {} +static inline int kvm_arm_set_default_pmu(struct kvm *kvm) +{ + return -ENODEV; +} + #endif #endif -- Gitee From 41fe5046b682bc461f6d65783e9dbb0d0340b919 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:43 +0000 Subject: [PATCH 17/75] KVM: arm64: PMU: Add a helper to read a vCPU's PMCR_EL0 ANBZ: #25252 commit 57fc267f1b5caa56dfb46f60e20e673cbc4cc4a8 upstream. Add a helper to read a vCPU's PMCR_EL0, and use it whenever KVM reads a vCPU's PMCR_EL0. Currently, the PMCR_EL0 value is tracked per vCPU. The following patches will make (only) PMCR_EL0.N track per guest. Having the new helper will be useful to combine the PMCR_EL0.N field (tracked per guest) and the other fields (tracked per vCPU) to provide the value of PMCR_EL0. No functional change intended. Reviewed-by: Sebastian Ott Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-4-rananta@google.com Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- arch/arm64/kvm/arm.c | 3 +-- arch/arm64/kvm/pmu-emul.c | 21 +++++++++++++++------ arch/arm64/kvm/sys_regs.c | 6 +++--- include/kvm/arm_pmu.h | 6 ++++++ 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 8a3d3e29a729..17f07630dcec 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -813,8 +813,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) - kvm_pmu_handle_pmcr(vcpu, - __vcpu_sys_reg(vcpu, PMCR_EL0)); + kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) kvm_vcpu_pmu_restore_guest(vcpu); diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 21319b0faaaa..eddc6517d089 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -67,7 +67,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) { - u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0); + u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc)); return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); @@ -245,7 +245,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { - u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; + u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT; val &= ARMV8_PMU_PMCR_N_MASK; if (val == 0) @@ -267,7 +267,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) if (!kvm_vcpu_has_pmu(vcpu)) return; - if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val) return; for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { @@ -319,7 +319,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) { u64 reg = 0; - if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { + if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) { reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); } @@ -420,7 +420,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, { int i; - if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) return; /* Weed out disabled counters */ @@ -563,7 +563,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) { struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); - return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && + return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) && (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)); } @@ -1040,3 +1040,12 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return -ENXIO; } + +/** + * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU + * @vcpu: The vcpu pointer + */ +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return __vcpu_sys_reg(vcpu, PMCR_EL0); +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 7995914e5fe4..b08f8d13ecd0 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -853,7 +853,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * Only update writeable bits of PMCR (continuing into * kvm_pmu_handle_pmcr() as well) */ - val = __vcpu_sys_reg(vcpu, PMCR_EL0); + val = kvm_vcpu_read_pmcr(vcpu); val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; if (!kvm_supports_32bit_el0()) @@ -861,7 +861,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, kvm_pmu_handle_pmcr(vcpu, val); } else { /* PMCR.P & PMCR.C are RAZ */ - val = __vcpu_sys_reg(vcpu, PMCR_EL0) + val = kvm_vcpu_read_pmcr(vcpu) & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); p->regval = val; } @@ -910,7 +910,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) { u64 pmcr, val; - pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); + pmcr = kvm_vcpu_read_pmcr(vcpu); val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { kvm_inject_undefined(vcpu); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 7acf753b480b..708d2018b4f4 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -106,6 +106,7 @@ void kvm_vcpu_pmu_resync_el0(void); u8 kvm_arm_pmu_get_pmuver_limit(void); int kvm_arm_set_default_pmu(struct kvm *kvm); +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; @@ -183,6 +184,11 @@ static inline int kvm_arm_set_default_pmu(struct kvm *kvm) return -ENODEV; } +static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return 0; +} + #endif #endif -- Gitee From 0dcea924cea3feebdf314c6721aef113c1d5787e Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:44 +0000 Subject: [PATCH 18/75] KVM: arm64: PMU: Set PMCR_EL0.N for vCPU based on the associated PMU ANBZ: #25252 commit 4d20debf9ca160720a0b01ba4f2dc3d62296c4d1 upstream. The number of PMU event counters is indicated in PMCR_EL0.N. For a vCPU with PMUv3 configured, the value is set to the same value as the current PE on every vCPU reset. Unless the vCPU is pinned to PEs that has the PMU associated to the guest from the initial vCPU reset, the value might be different from the PMU's PMCR_EL0.N on heterogeneous PMU systems. Fix this by setting the vCPU's PMCR_EL0.N to the PMU's PMCR_EL0.N value. Track the PMCR_EL0.N per guest, as only one PMU can be set for the guest (PMCR_EL0.N must be the same for all vCPUs of the guest), and it is convenient for updating the value. To achieve this, the patch introduces a helper, kvm_arm_pmu_get_max_counters(), that reads the maximum number of counters from the arm_pmu associated to the VM. Make the function global as upcoming patches will be interested to know the value while setting the PMCR.N of the guest from userspace. KVM does not yet support userspace modifying PMCR_EL0.N. The following patch will add support for that. Reviewed-by: Sebastian Ott Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Link: https://lore.kernel.org/r/20231020214053.2144305-5-rananta@google.com Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/pmu-emul.c | 21 ++++++++++++++++++++- arch/arm64/kvm/sys_regs.c | 28 ++++++++++++++-------------- include/kvm/arm_pmu.h | 6 ++++++ 4 files changed, 43 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a0b36e4879d..57481e6cabe7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -256,6 +256,9 @@ struct kvm_arch { cpumask_var_t supported_cpus; + /* PMCR_EL0.N value for the guest */ + u8 pmcr_n; + /* Hypercall features firmware registers' descriptor */ struct kvm_smccc_features smccc_feat; struct maple_tree smccc_filter; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index eddc6517d089..6435107818e1 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -840,11 +840,27 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) return true; } +/** + * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters. + * @kvm: The kvm pointer + */ +u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) +{ + struct arm_pmu *arm_pmu = kvm->arch.arm_pmu; + + /* + * The arm_pmu->num_events considers the cycle counter as well. + * Ignore that and return only the general-purpose counters. + */ + return arm_pmu->num_events - 1; +} + static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) { lockdep_assert_held(&kvm->arch.config_lock); kvm->arch.arm_pmu = arm_pmu; + kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm); } /** @@ -1047,5 +1063,8 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) */ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) { - return __vcpu_sys_reg(vcpu, PMCR_EL0); + u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) & + ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); + + return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b08f8d13ecd0..1dd394f515f8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -752,12 +752,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX); - /* No PMU available, any PMU reg may UNDEF... */ - if (!kvm_arm_support_pmu_v3()) - return 0; - - n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT; - n &= ARMV8_PMU_PMCR_N_MASK; + n = vcpu->kvm->arch.pmcr_n; if (n) mask |= GENMASK(n - 1, 0); @@ -793,17 +788,15 @@ static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { - u64 pmcr; + u64 pmcr = 0; - /* No PMU available, PMCR_EL0 may UNDEF... */ - if (!kvm_arm_support_pmu_v3()) - return 0; - - /* Only preserve PMCR_EL0.N, and reset the rest to 0 */ - pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); if (!kvm_supports_32bit_el0()) pmcr |= ARMV8_PMU_PMCR_LC; + /* + * The value of PMCR.N field is included when the + * vCPU register is read via kvm_vcpu_read_pmcr(). + */ __vcpu_sys_reg(vcpu, r->reg) = pmcr; return __vcpu_sys_reg(vcpu, r->reg); @@ -1134,6 +1127,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 *val) +{ + *val = kvm_vcpu_read_pmcr(vcpu); + return 0; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ @@ -2285,7 +2285,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SVCR), undef_access }, { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, - .reset = reset_pmcr, .reg = PMCR_EL0 }, + .reset = reset_pmcr, .reg = PMCR_EL0, .get_user = get_pmcr }, { PMU_SYS_REG(PMCNTENSET_EL0), .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, { PMU_SYS_REG(PMCNTENCLR_EL0), diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 708d2018b4f4..64130a2acbae 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -105,6 +105,7 @@ void kvm_vcpu_pmu_resync_el0(void); u8 kvm_arm_pmu_get_pmuver_limit(void); int kvm_arm_set_default_pmu(struct kvm *kvm); +u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); #else @@ -184,6 +185,11 @@ static inline int kvm_arm_set_default_pmu(struct kvm *kvm) return -ENODEV; } +static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) +{ + return 0; +} + static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) { return 0; -- Gitee From cfba95e610b178f240dd9336753f4414cbf1d6a9 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:45 +0000 Subject: [PATCH 19/75] KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} ANBZ: #25252 commit a45f41d754e0b37de4b7dc1fb3c6b7a1285882fc upstream. For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and PMOVS{SET,CLR} registers are expected to RAZ. To honor this, explicitly implement the {get,set}_user functions for these registers to mask out unimplemented counters for userspace reads and writes. Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Raghavendra Rao Ananta Link: https://lore.kernel.org/r/20231020214053.2144305-6-rananta@google.com [Oliver: drop unnecessary locking] Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- arch/arm64/kvm/sys_regs.c | 51 ++++++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 1dd394f515f8..30ccaf1ab032 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1018,6 +1018,39 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val) +{ + bool set; + + val &= kvm_pmu_valid_counter_mask(vcpu); + + switch (r->reg) { + case PMOVSSET_EL0: + /* CRm[1] being set indicates a SET register, and CLR otherwise */ + set = r->CRm & 2; + break; + default: + /* Op2[0] being set indicates a SET register, and CLR otherwise */ + set = r->Op2 & 1; + break; + } + + if (set) + __vcpu_sys_reg(vcpu, r->reg) |= val; + else + __vcpu_sys_reg(vcpu, r->reg) &= ~val; + + return 0; +} + +static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val) +{ + u64 mask = kvm_pmu_valid_counter_mask(vcpu); + + *val = __vcpu_sys_reg(vcpu, r->reg) & mask; + return 0; +} + static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { @@ -2231,9 +2264,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* PMBIDR_EL1 is not trapped */ { PMU_SYS_REG(PMINTENSET_EL1), - .access = access_pminten, .reg = PMINTENSET_EL1 }, + .access = access_pminten, .reg = PMINTENSET_EL1, + .get_user = get_pmreg, .set_user = set_pmreg }, { PMU_SYS_REG(PMINTENCLR_EL1), - .access = access_pminten, .reg = PMINTENSET_EL1 }, + .access = access_pminten, .reg = PMINTENSET_EL1, + .get_user = get_pmreg, .set_user = set_pmreg }, { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, @@ -2287,11 +2322,14 @@ static const struct sys_reg_desc sys_reg_descs[] = { { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, .reg = PMCR_EL0, .get_user = get_pmcr }, { PMU_SYS_REG(PMCNTENSET_EL0), - .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, + .access = access_pmcnten, .reg = PMCNTENSET_EL0, + .get_user = get_pmreg, .set_user = set_pmreg }, { PMU_SYS_REG(PMCNTENCLR_EL0), - .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, + .access = access_pmcnten, .reg = PMCNTENSET_EL0, + .get_user = get_pmreg, .set_user = set_pmreg }, { PMU_SYS_REG(PMOVSCLR_EL0), - .access = access_pmovs, .reg = PMOVSSET_EL0 }, + .access = access_pmovs, .reg = PMOVSSET_EL0, + .get_user = get_pmreg, .set_user = set_pmreg }, /* * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was * previously (and pointlessly) advertised in the past... @@ -2319,7 +2357,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr, .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, { PMU_SYS_REG(PMOVSSET_EL0), - .access = access_pmovs, .reg = PMOVSSET_EL0 }, + .access = access_pmovs, .reg = PMOVSSET_EL0, + .get_user = get_pmreg, .set_user = set_pmreg }, { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, -- Gitee From 80035018f5771ba18743a17bab68898b30aaebc3 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:46 +0000 Subject: [PATCH 20/75] KVM: arm64: Sanitize PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} before first run ANBZ: #25252 commit 27131b199f9fdc0e15baa0ff9d1695b54a96e39c upstream. For unimplemented counters, the registers PM{C,I}NTEN{SET,CLR} and PMOVS{SET,CLR} are expected to have the corresponding bits RAZ. Hence to ensure correct KVM's PMU emulation, mask out the RES0 bits. Defer this work to the point that userspace can no longer change the number of advertised PMCs. Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-7-rananta@google.com Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/pmu-emul.c | 11 +++++++++++ include/kvm/arm_pmu.h | 3 ++- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 17f07630dcec..0a2f06c2a634 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -813,7 +813,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) - kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); + kvm_vcpu_reload_pmu(vcpu); if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) kvm_vcpu_pmu_restore_guest(vcpu); diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6435107818e1..59ed6598232e 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -752,6 +752,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) return val & mask; } +void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) +{ + u64 mask = kvm_pmu_valid_counter_mask(vcpu); + + kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); + + __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask; + __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask; + __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask; +} + int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) { if (!kvm_vcpu_has_pmu(vcpu)) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 64130a2acbae..6b3d34724594 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -13,7 +13,6 @@ #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) - struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; @@ -66,6 +65,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); +void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu); int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, @@ -174,6 +174,7 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {} static inline u8 kvm_arm_pmu_get_pmuver_limit(void) { return 0; -- Gitee From 2a726297e6ff3b0d69b1f46bf2fd3268329f9b43 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:47 +0000 Subject: [PATCH 21/75] KVM: arm64: PMU: Allow userspace to limit PMCR_EL0.N for the guest ANBZ: #25252 commit ea9ca904d24ff15ded92fd76c16462c47bcae2f8 upstream. KVM does not yet support userspace modifying PMCR_EL0.N (With the previous patch, KVM ignores what is written by userspace). Add support userspace limiting PMCR_EL0.N. Disallow userspace to set PMCR_EL0.N to a value that is greater than the host value as KVM doesn't support more event counters than what the host HW implements. Also, make this register immutable after the VM has started running. To maintain the existing expectations, instead of returning an error, KVM returns a success for these two cases. Finally, ignore writes to read-only bits that are cleared on vCPU reset, and RES{0,1} bits (including writable bits that KVM doesn't support yet), as those bits shouldn't be modified (at least with the current KVM). Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Link: https://lore.kernel.org/r/20231020214053.2144305-8-rananta@google.com Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- arch/arm64/kvm/sys_regs.c | 46 +++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 30ccaf1ab032..054a3cd31f53 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -750,9 +750,9 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { - u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX); + u64 mask = BIT(ARMV8_PMU_CYCLE_IDX); + u8 n = vcpu->kvm->arch.pmcr_n; - n = vcpu->kvm->arch.pmcr_n; if (n) mask |= GENMASK(n - 1, 0); @@ -1167,6 +1167,44 @@ static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, return 0; } +static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 val) +{ + u8 new_n = (val >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; + struct kvm *kvm = vcpu->kvm; + + mutex_lock(&kvm->arch.config_lock); + + /* + * The vCPU can't have more counters than the PMU hardware + * implements. Ignore this error to maintain compatibility + * with the existing KVM behavior. + */ + if (!kvm_vm_has_ran_once(kvm) && + new_n <= kvm_arm_pmu_get_max_counters(kvm)) + kvm->arch.pmcr_n = new_n; + + mutex_unlock(&kvm->arch.config_lock); + + /* + * Ignore writes to RES0 bits, read only bits that are cleared on + * vCPU reset, and writable bits that KVM doesn't support yet. + * (i.e. only PMCR.N and bits [7:0] are mutable from userspace) + * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU. + * But, we leave the bit as it is here, as the vCPU's PMUver might + * be changed later (NOTE: the bit will be cleared on first vCPU run + * if necessary). + */ + val &= ARMV8_PMU_PMCR_MASK; + + /* The LC bit is RES1 when AArch32 is not supported */ + if (!kvm_supports_32bit_el0()) + val |= ARMV8_PMU_PMCR_LC; + + __vcpu_sys_reg(vcpu, r->reg) = val; + return 0; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ @@ -2319,8 +2357,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CTR_EL0), access_ctr }, { SYS_DESC(SYS_SVCR), undef_access }, - { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, - .reset = reset_pmcr, .reg = PMCR_EL0, .get_user = get_pmcr }, + { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, + .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr }, { PMU_SYS_REG(PMCNTENSET_EL0), .access = access_pmcnten, .reg = PMCNTENSET_EL0, .get_user = get_pmreg, .set_user = set_pmreg }, -- Gitee From 86f0357aa43895b4a09f245b9f8a2af1b0171485 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:48 +0000 Subject: [PATCH 22/75] tools: Import arm_pmuv3.h ANBZ: #25252 commit 9f4b3273dfbe0ecb628b65fe9a80aae17caba20f upstream. Import kernel's include/linux/perf/arm_pmuv3.h, with the definition of PMEVN_SWITCH() additionally including an assert() for the 'default' case. The following patches will use macros defined in this header. Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-9-rananta@google.com Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- tools/include/perf/arm_pmuv3.h | 308 +++++++++++++++++++++++++++++++++ 1 file changed, 308 insertions(+) create mode 100644 tools/include/perf/arm_pmuv3.h diff --git a/tools/include/perf/arm_pmuv3.h b/tools/include/perf/arm_pmuv3.h new file mode 100644 index 000000000000..e822d49fb5b8 --- /dev/null +++ b/tools/include/perf/arm_pmuv3.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2012 ARM Ltd. + */ + +#ifndef __PERF_ARM_PMUV3_H +#define __PERF_ARM_PMUV3_H + +#include +#include + +#define ARMV8_PMU_MAX_COUNTERS 32 +#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) + +/* + * Common architectural and microarchitectural event numbers. + */ +#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000 +#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001 +#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004 +#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005 +#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006 +#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007 +#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008 +#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009 +#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A +#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B +#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C +#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D +#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E +#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F +#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010 +#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011 +#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012 +#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013 +#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018 +#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019 +#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A +#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B +#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C +#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D +#define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020 +#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021 +#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022 +#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023 +#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024 +#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025 +#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026 +#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027 +#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028 +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029 +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C +#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D +#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E +#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F +#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030 +#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033 +#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034 +#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037 +#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039 +#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A +#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B +#define ARMV8_PMUV3_PERFCTR_STALL 0x003C +#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D +#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E +#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F + +/* Statistical profiling extension microarchitectural events */ +#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000 +#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001 +#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002 +#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003 + +/* AMUv1 architecture events */ +#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004 +#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005 + +/* long-latency read miss events */ +#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009 +#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B + +/* Trace buffer events */ +#define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C +#define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E + +/* Trace unit events */ +#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010 +#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011 +#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012 +#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013 +#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018 +#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019 +#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A +#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B + +/* additional latency from alignment events */ +#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020 +#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021 +#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022 + +/* Armv8.5 Memory Tagging Extension events */ +#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024 +#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025 +#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026 + +/* ARMv8 recommended implementation defined event types */ +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048 + +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053 + +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058 + +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065 +#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066 +#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067 +#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068 +#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069 +#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A + +#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C +#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D +#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E +#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F +#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070 +#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071 +#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072 +#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073 +#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074 +#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075 +#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076 +#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077 +#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078 +#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079 +#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A + +#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C +#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D +#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E + +#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081 +#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082 +#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083 +#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084 + +#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086 +#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087 +#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088 + +#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F +#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090 +#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091 + +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3 + +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8 + +/* + * Per-CPU PMCR: config reg + */ +#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ +#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ +#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ +#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ +#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ +#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ +#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ +#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ +#define ARMV8_PMU_PMCR_N_MASK 0x1f +#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ + +/* + * PMOVSR: counters overflow flag status reg + */ +#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ +#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK + +/* + * PMXEVTYPER: Event selection reg + */ +#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ +#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ + +/* + * Event filters for PMUv3 + */ +#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) +#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) +#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) + +/* + * PMUSERENR: user enable reg + */ +#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ +#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ +#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ +#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ +#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ + +/* PMMIR_EL1.SLOTS mask */ +#define ARMV8_PMU_SLOTS_MASK 0xff + +#define ARMV8_PMU_BUS_SLOTS_SHIFT 8 +#define ARMV8_PMU_BUS_SLOTS_MASK 0xff +#define ARMV8_PMU_BUS_WIDTH_SHIFT 16 +#define ARMV8_PMU_BUS_WIDTH_MASK 0xf + +/* + * This code is really good + */ + +#define PMEVN_CASE(n, case_macro) \ + case n: case_macro(n); break + +#define PMEVN_SWITCH(x, case_macro) \ + do { \ + switch (x) { \ + PMEVN_CASE(0, case_macro); \ + PMEVN_CASE(1, case_macro); \ + PMEVN_CASE(2, case_macro); \ + PMEVN_CASE(3, case_macro); \ + PMEVN_CASE(4, case_macro); \ + PMEVN_CASE(5, case_macro); \ + PMEVN_CASE(6, case_macro); \ + PMEVN_CASE(7, case_macro); \ + PMEVN_CASE(8, case_macro); \ + PMEVN_CASE(9, case_macro); \ + PMEVN_CASE(10, case_macro); \ + PMEVN_CASE(11, case_macro); \ + PMEVN_CASE(12, case_macro); \ + PMEVN_CASE(13, case_macro); \ + PMEVN_CASE(14, case_macro); \ + PMEVN_CASE(15, case_macro); \ + PMEVN_CASE(16, case_macro); \ + PMEVN_CASE(17, case_macro); \ + PMEVN_CASE(18, case_macro); \ + PMEVN_CASE(19, case_macro); \ + PMEVN_CASE(20, case_macro); \ + PMEVN_CASE(21, case_macro); \ + PMEVN_CASE(22, case_macro); \ + PMEVN_CASE(23, case_macro); \ + PMEVN_CASE(24, case_macro); \ + PMEVN_CASE(25, case_macro); \ + PMEVN_CASE(26, case_macro); \ + PMEVN_CASE(27, case_macro); \ + PMEVN_CASE(28, case_macro); \ + PMEVN_CASE(29, case_macro); \ + PMEVN_CASE(30, case_macro); \ + default: \ + WARN(1, "Invalid PMEV* index\n"); \ + assert(0); \ + } \ + } while (0) + +#endif -- Gitee From caae21586bfb76661762f85c9b5f0463dafec785 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:49 +0000 Subject: [PATCH 23/75] KVM: selftests: aarch64: Introduce vpmu_counter_access test ANBZ: #25252 commit 8d0aebe1ca2bd6f86ee2b4de30c8a4aafcf2908b upstream. Introduce vpmu_counter_access test for arm64 platforms. The test configures PMUv3 for a vCPU, sets PMCR_EL0.N for the vCPU, and check if the guest can consistently see the same number of the PMU event counters (PMCR_EL0.N) that userspace sets. This test case is done with each of the PMCR_EL0.N values from 0 to 31 (With the PMCR_EL0.N values greater than the host value, the test expects KVM_SET_ONE_REG for the PMCR_EL0 to fail). Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-10-rananta@google.com Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- tools/testing/selftests/kvm/Makefile | 1 + .../kvm/aarch64/vpmu_counter_access.c | 255 ++++++++++++++++++ 2 files changed, 256 insertions(+) create mode 100644 tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 78e1bb5973a0..d424e8d39582 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -160,6 +160,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config TEST_GEN_PROGS_aarch64 += aarch64/vgic_init TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq +TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access TEST_GEN_PROGS_aarch64 += access_tracking_perf_test TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += dirty_log_test diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c new file mode 100644 index 000000000000..4c6e1fe87e0e --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * vpmu_counter_access - Test vPMU event counter access + * + * Copyright (c) 2023 Google LLC. + * + * This test checks if the guest can see the same number of the PMU event + * counters (PMCR_EL0.N) that userspace sets. + * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host. + */ +#include +#include +#include +#include +#include +#include + +/* The max number of the PMU event counters (excluding the cycle counter) */ +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) + +struct vpmu_vm { + struct kvm_vm *vm; + struct kvm_vcpu *vcpu; + int gic_fd; +}; + +static struct vpmu_vm vpmu_vm; + +static uint64_t get_pmcr_n(uint64_t pmcr) +{ + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; +} + +static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) +{ + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); +} + +static void guest_sync_handler(struct ex_regs *regs) +{ + uint64_t esr, ec; + + esr = read_sysreg(esr_el1); + ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK; + __GUEST_ASSERT(0, "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx", regs->pc, esr, ec); +} + +/* + * The guest is configured with PMUv3 with @expected_pmcr_n number of + * event counters. + * Check if @expected_pmcr_n is consistent with PMCR_EL0.N. + */ +static void guest_code(uint64_t expected_pmcr_n) +{ + uint64_t pmcr, pmcr_n; + + __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, + "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx", + expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS); + + pmcr = read_sysreg(pmcr_el0); + pmcr_n = get_pmcr_n(pmcr); + + /* Make sure that PMCR_EL0.N indicates the value userspace set */ + __GUEST_ASSERT(pmcr_n == expected_pmcr_n, + "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx", + expected_pmcr_n, pmcr_n); + + GUEST_DONE(); +} + +#define GICD_BASE_GPA 0x8000000ULL +#define GICR_BASE_GPA 0x80A0000ULL + +/* Create a VM that has one vCPU with PMUv3 configured. */ +static void create_vpmu_vm(void *guest_code) +{ + struct kvm_vcpu_init init; + uint8_t pmuver, ec; + uint64_t dfr0, irq = 23; + struct kvm_device_attr irq_attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_IRQ, + .addr = (uint64_t)&irq, + }; + struct kvm_device_attr init_attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_INIT, + }; + + /* The test creates the vpmu_vm multiple times. Ensure a clean state */ + memset(&vpmu_vm, 0, sizeof(vpmu_vm)); + + vpmu_vm.vm = vm_create(1); + vm_init_descriptor_tables(vpmu_vm.vm); + for (ec = 0; ec < ESR_EC_NUM; ec++) { + vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec, + guest_sync_handler); + } + + /* Create vCPU with PMUv3 */ + vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); + init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); + vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code); + vcpu_init_descriptor_tables(vpmu_vm.vcpu); + vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64, + GICD_BASE_GPA, GICR_BASE_GPA); + __TEST_REQUIRE(vpmu_vm.gic_fd >= 0, + "Failed to create vgic-v3, skipping"); + + /* Make sure that PMUv3 support is indicated in the ID register */ + vcpu_get_reg(vpmu_vm.vcpu, + KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0); + pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0); + TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && + pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP, + "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver); + + /* Initialize vPMU */ + vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); + vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr); +} + +static void destroy_vpmu_vm(void) +{ + close(vpmu_vm.gic_fd); + kvm_vm_free(vpmu_vm.vm); +} + +static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n) +{ + struct ucall uc; + + vcpu_args_set(vcpu, 1, pmcr_n); + vcpu_run(vcpu); + switch (get_ucall(vcpu, &uc)) { + case UCALL_ABORT: + REPORT_GUEST_ASSERT(uc); + break; + case UCALL_DONE: + break; + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + break; + } +} + +static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail) +{ + struct kvm_vcpu *vcpu; + uint64_t pmcr, pmcr_orig; + + create_vpmu_vm(guest_code); + vcpu = vpmu_vm.vcpu; + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig); + pmcr = pmcr_orig; + + /* + * Setting a larger value of PMCR.N should not modify the field, and + * return a success. + */ + set_pmcr_n(&pmcr, pmcr_n); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr); + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); + + if (expect_fail) + TEST_ASSERT(pmcr_orig == pmcr, + "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n", + pmcr, pmcr_n); + else + TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr), + "Failed to update PMCR.N to %lu (received: %lu)\n", + pmcr_n, get_pmcr_n(pmcr)); +} + +/* + * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n, + * and run the test. + */ +static void run_test(uint64_t pmcr_n) +{ + uint64_t sp; + struct kvm_vcpu *vcpu; + struct kvm_vcpu_init init; + + pr_debug("Test with pmcr_n %lu\n", pmcr_n); + + test_create_vpmu_vm_with_pmcr_n(pmcr_n, false); + vcpu = vpmu_vm.vcpu; + + /* Save the initial sp to restore them later to run the guest again */ + vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp); + + run_vcpu(vcpu, pmcr_n); + + /* + * Reset and re-initialize the vCPU, and run the guest code again to + * check if PMCR_EL0.N is preserved. + */ + vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); + init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); + aarch64_vcpu_setup(vcpu, &init); + vcpu_init_descriptor_tables(vcpu); + vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp); + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); + + run_vcpu(vcpu, pmcr_n); + + destroy_vpmu_vm(); +} + +/* + * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for + * the vCPU to @pmcr_n, which is larger than the host value. + * The attempt should fail as @pmcr_n is too big to set for the vCPU. + */ +static void run_error_test(uint64_t pmcr_n) +{ + pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n); + + test_create_vpmu_vm_with_pmcr_n(pmcr_n, true); + destroy_vpmu_vm(); +} + +/* + * Return the default number of implemented PMU event counters excluding + * the cycle counter (i.e. PMCR_EL0.N value) for the guest. + */ +static uint64_t get_pmcr_n_limit(void) +{ + uint64_t pmcr; + + create_vpmu_vm(guest_code); + vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); + destroy_vpmu_vm(); + return get_pmcr_n(pmcr); +} + +int main(void) +{ + uint64_t i, pmcr_n; + + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); + + pmcr_n = get_pmcr_n_limit(); + for (i = 0; i <= pmcr_n; i++) + run_test(i); + + for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++) + run_error_test(i); + + return 0; +} -- Gitee From 31fc1ec21c5944d24df63a874d356cf47dd13d74 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:50 +0000 Subject: [PATCH 24/75] KVM: selftests: aarch64: vPMU register test for implemented counters ANBZ: #25252 commit ada1ae68262deea2685ccd38136ea2db233dfc4c upstream. Add a new test case to the vpmu_counter_access test to check if PMU registers or their bits for implemented counters on the vCPU are readable/writable as expected, and can be programmed to count events. Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-11-rananta@google.com Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- .../kvm/aarch64/vpmu_counter_access.c | 270 +++++++++++++++++- 1 file changed, 266 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index 4c6e1fe87e0e..a579286b6f11 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -5,7 +5,8 @@ * Copyright (c) 2023 Google LLC. * * This test checks if the guest can see the same number of the PMU event - * counters (PMCR_EL0.N) that userspace sets. + * counters (PMCR_EL0.N) that userspace sets, and if the guest can access + * those counters. * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host. */ #include @@ -37,6 +38,255 @@ static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); } +/* Read PMEVTCNTR_EL0 through PMXEVCNTR_EL0 */ +static inline unsigned long read_sel_evcntr(int sel) +{ + write_sysreg(sel, pmselr_el0); + isb(); + return read_sysreg(pmxevcntr_el0); +} + +/* Write PMEVTCNTR_EL0 through PMXEVCNTR_EL0 */ +static inline void write_sel_evcntr(int sel, unsigned long val) +{ + write_sysreg(sel, pmselr_el0); + isb(); + write_sysreg(val, pmxevcntr_el0); + isb(); +} + +/* Read PMEVTYPER_EL0 through PMXEVTYPER_EL0 */ +static inline unsigned long read_sel_evtyper(int sel) +{ + write_sysreg(sel, pmselr_el0); + isb(); + return read_sysreg(pmxevtyper_el0); +} + +/* Write PMEVTYPER_EL0 through PMXEVTYPER_EL0 */ +static inline void write_sel_evtyper(int sel, unsigned long val) +{ + write_sysreg(sel, pmselr_el0); + isb(); + write_sysreg(val, pmxevtyper_el0); + isb(); +} + +static inline void enable_counter(int idx) +{ + uint64_t v = read_sysreg(pmcntenset_el0); + + write_sysreg(BIT(idx) | v, pmcntenset_el0); + isb(); +} + +static inline void disable_counter(int idx) +{ + uint64_t v = read_sysreg(pmcntenset_el0); + + write_sysreg(BIT(idx) | v, pmcntenclr_el0); + isb(); +} + +static void pmu_disable_reset(void) +{ + uint64_t pmcr = read_sysreg(pmcr_el0); + + /* Reset all counters, disabling them */ + pmcr &= ~ARMV8_PMU_PMCR_E; + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); + isb(); +} + +#define RETURN_READ_PMEVCNTRN(n) \ + return read_sysreg(pmevcntr##n##_el0) +static unsigned long read_pmevcntrn(int n) +{ + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); + return 0; +} + +#define WRITE_PMEVCNTRN(n) \ + write_sysreg(val, pmevcntr##n##_el0) +static void write_pmevcntrn(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); + isb(); +} + +#define READ_PMEVTYPERN(n) \ + return read_sysreg(pmevtyper##n##_el0) +static unsigned long read_pmevtypern(int n) +{ + PMEVN_SWITCH(n, READ_PMEVTYPERN); + return 0; +} + +#define WRITE_PMEVTYPERN(n) \ + write_sysreg(val, pmevtyper##n##_el0) +static void write_pmevtypern(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); + isb(); +} + +/* + * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}_EL0 + * accessors that test cases will use. Each of the accessors will + * either directly reads/writes PMEV{CNTR,TYPER}_EL0 + * (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through + * PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()). + * + * This is used to test that combinations of those accessors provide + * the consistent behavior. + */ +struct pmc_accessor { + /* A function to be used to read PMEVTCNTR_EL0 */ + unsigned long (*read_cntr)(int idx); + /* A function to be used to write PMEVTCNTR_EL0 */ + void (*write_cntr)(int idx, unsigned long val); + /* A function to be used to read PMEVTYPER_EL0 */ + unsigned long (*read_typer)(int idx); + /* A function to be used to write PMEVTYPER_EL0 */ + void (*write_typer)(int idx, unsigned long val); +}; + +struct pmc_accessor pmc_accessors[] = { + /* test with all direct accesses */ + { read_pmevcntrn, write_pmevcntrn, read_pmevtypern, write_pmevtypern }, + /* test with all indirect accesses */ + { read_sel_evcntr, write_sel_evcntr, read_sel_evtyper, write_sel_evtyper }, + /* read with direct accesses, and write with indirect accesses */ + { read_pmevcntrn, write_sel_evcntr, read_pmevtypern, write_sel_evtyper }, + /* read with indirect accesses, and write with direct accesses */ + { read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern }, +}; + +/* + * Convert a pointer of pmc_accessor to an index in pmc_accessors[], + * assuming that the pointer is one of the entries in pmc_accessors[]. + */ +#define PMC_ACC_TO_IDX(acc) (acc - &pmc_accessors[0]) + +#define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \ +{ \ + uint64_t _tval = read_sysreg(regname); \ + \ + if (set_expected) \ + __GUEST_ASSERT((_tval & mask), \ + "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \ + _tval, mask, set_expected); \ + else \ + __GUEST_ASSERT(!(_tval & mask), \ + "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \ + _tval, mask, set_expected); \ +} + +/* + * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers + * are set or cleared as specified in @set_expected. + */ +static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected) +{ + GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmintenset_el1, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmintenclr_el1, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmovsset_el0, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmovsclr_el0, mask, set_expected); +} + +/* + * Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding + * to the specified counter (@pmc_idx) can be read/written as expected. + * When @set_op is true, it tries to set the bit for the counter in + * those registers by writing the SET registers (the bit won't be set + * if the counter is not implemented though). + * Otherwise, it tries to clear the bits in the registers by writing + * the CLR registers. + * Then, it checks if the values indicated in the registers are as expected. + */ +static void test_bitmap_pmu_regs(int pmc_idx, bool set_op) +{ + uint64_t pmcr_n, test_bit = BIT(pmc_idx); + bool set_expected = false; + + if (set_op) { + write_sysreg(test_bit, pmcntenset_el0); + write_sysreg(test_bit, pmintenset_el1); + write_sysreg(test_bit, pmovsset_el0); + + /* The bit will be set only if the counter is implemented */ + pmcr_n = get_pmcr_n(read_sysreg(pmcr_el0)); + set_expected = (pmc_idx < pmcr_n) ? true : false; + } else { + write_sysreg(test_bit, pmcntenclr_el0); + write_sysreg(test_bit, pmintenclr_el1); + write_sysreg(test_bit, pmovsclr_el0); + } + check_bitmap_pmu_regs(test_bit, set_expected); +} + +/* + * Tests for reading/writing registers for the (implemented) event counter + * specified by @pmc_idx. + */ +static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) +{ + uint64_t write_data, read_data; + + /* Disable all PMCs and reset all PMCs to zero. */ + pmu_disable_reset(); + + /* + * Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1. + */ + + /* Make sure that the bit in those registers are set to 0 */ + test_bitmap_pmu_regs(pmc_idx, false); + /* Test if setting the bit in those registers works */ + test_bitmap_pmu_regs(pmc_idx, true); + /* Test if clearing the bit in those registers works */ + test_bitmap_pmu_regs(pmc_idx, false); + + /* + * Tests for reading/writing the event type register. + */ + + /* + * Set the event type register to an arbitrary value just for testing + * of reading/writing the register. + * Arm ARM says that for the event from 0x0000 to 0x003F, + * the value indicated in the PMEVTYPER_EL0.evtCount field is + * the value written to the field even when the specified event + * is not supported. + */ + write_data = (ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMUV3_PERFCTR_INST_RETIRED); + acc->write_typer(pmc_idx, write_data); + read_data = acc->read_typer(pmc_idx); + __GUEST_ASSERT(read_data == write_data, + "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx", + pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data); + + /* + * Tests for reading/writing the event count register. + */ + + read_data = acc->read_cntr(pmc_idx); + + /* The count value must be 0, as it is disabled and reset */ + __GUEST_ASSERT(read_data == 0, + "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx", + pmc_idx, PMC_ACC_TO_IDX(acc), read_data); + + write_data = read_data + pmc_idx + 0x12345; + acc->write_cntr(pmc_idx, write_data); + read_data = acc->read_cntr(pmc_idx); + __GUEST_ASSERT(read_data == write_data, + "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx", + pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data); +} + static void guest_sync_handler(struct ex_regs *regs) { uint64_t esr, ec; @@ -49,11 +299,14 @@ static void guest_sync_handler(struct ex_regs *regs) /* * The guest is configured with PMUv3 with @expected_pmcr_n number of * event counters. - * Check if @expected_pmcr_n is consistent with PMCR_EL0.N. + * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and + * if reading/writing PMU registers for implemented counters works + * as expected. */ static void guest_code(uint64_t expected_pmcr_n) { uint64_t pmcr, pmcr_n; + int i, pmc; __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx", @@ -67,6 +320,15 @@ static void guest_code(uint64_t expected_pmcr_n) "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx", expected_pmcr_n, pmcr_n); + /* + * Tests for reading/writing PMU registers for implemented counters. + * Use each combination of PMEVT{CNTR,TYPER}_EL0 accessor functions. + */ + for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) { + for (pmc = 0; pmc < pmcr_n; pmc++) + test_access_pmc_regs(&pmc_accessors[i], pmc); + } + GUEST_DONE(); } @@ -179,7 +441,7 @@ static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail) * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n, * and run the test. */ -static void run_test(uint64_t pmcr_n) +static void run_access_test(uint64_t pmcr_n) { uint64_t sp; struct kvm_vcpu *vcpu; @@ -246,7 +508,7 @@ int main(void) pmcr_n = get_pmcr_n_limit(); for (i = 0; i <= pmcr_n; i++) - run_test(i); + run_access_test(i); for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++) run_error_test(i); -- Gitee From 40d65abf34c974350419b7c83b03175f84028f6b Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:51 +0000 Subject: [PATCH 25/75] KVM: selftests: aarch64: vPMU register test for unimplemented counters ANBZ: #25252 commit e1cc87206348f36fb78e417b5813f78f672a4aef upstream. Add a new test case to the vpmu_counter_access test to check if PMU registers or their bits for unimplemented counters are not accessible or are RAZ, as expected. Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-12-rananta@google.com [Oliver: fix issues relating to exception return address] Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- .../kvm/aarch64/vpmu_counter_access.c | 82 +++++++++++++++++-- .../selftests/kvm/include/aarch64/processor.h | 1 + 2 files changed, 76 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index a579286b6f11..8694e89684bf 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -5,8 +5,9 @@ * Copyright (c) 2023 Google LLC. * * This test checks if the guest can see the same number of the PMU event - * counters (PMCR_EL0.N) that userspace sets, and if the guest can access - * those counters. + * counters (PMCR_EL0.N) that userspace sets, if the guest can access + * those counters, and if the guest is prevented from accessing any + * other counters. * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host. */ #include @@ -287,25 +288,74 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data); } +#define INVALID_EC (-1ul) +uint64_t expected_ec = INVALID_EC; + static void guest_sync_handler(struct ex_regs *regs) { uint64_t esr, ec; esr = read_sysreg(esr_el1); ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK; - __GUEST_ASSERT(0, "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx", regs->pc, esr, ec); + + __GUEST_ASSERT(expected_ec == ec, + "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx", + regs->pc, esr, ec, expected_ec); + + /* skip the trapping instruction */ + regs->pc += 4; + + /* Use INVALID_EC to indicate an exception occurred */ + expected_ec = INVALID_EC; +} + +/* + * Run the given operation that should trigger an exception with the + * given exception class. The exception handler (guest_sync_handler) + * will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip + * the instruction that trapped. + */ +#define TEST_EXCEPTION(ec, ops) \ +({ \ + GUEST_ASSERT(ec != INVALID_EC); \ + WRITE_ONCE(expected_ec, ec); \ + dsb(ish); \ + ops; \ + GUEST_ASSERT(expected_ec == INVALID_EC); \ +}) + +/* + * Tests for reading/writing registers for the unimplemented event counter + * specified by @pmc_idx (>= PMCR_EL0.N). + */ +static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx) +{ + /* + * Reading/writing the event count/type registers should cause + * an UNDEFINED exception. + */ + TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_cntr(pmc_idx)); + TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0)); + TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_typer(pmc_idx)); + TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_typer(pmc_idx, 0)); + /* + * The bit corresponding to the (unimplemented) counter in + * {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ. + */ + test_bitmap_pmu_regs(pmc_idx, 1); + test_bitmap_pmu_regs(pmc_idx, 0); } /* * The guest is configured with PMUv3 with @expected_pmcr_n number of * event counters. * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and - * if reading/writing PMU registers for implemented counters works - * as expected. + * if reading/writing PMU registers for implemented or unimplemented + * counters works as expected. */ static void guest_code(uint64_t expected_pmcr_n) { - uint64_t pmcr, pmcr_n; + uint64_t pmcr, pmcr_n, unimp_mask; int i, pmc; __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, @@ -320,15 +370,33 @@ static void guest_code(uint64_t expected_pmcr_n) "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx", expected_pmcr_n, pmcr_n); + /* + * Make sure that (RAZ) bits corresponding to unimplemented event + * counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset + * to zero. + * (NOTE: bits for implemented event counters are reset to UNKNOWN) + */ + unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n); + check_bitmap_pmu_regs(unimp_mask, false); + /* * Tests for reading/writing PMU registers for implemented counters. - * Use each combination of PMEVT{CNTR,TYPER}_EL0 accessor functions. + * Use each combination of PMEV{CNTR,TYPER}_EL0 accessor functions. */ for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) { for (pmc = 0; pmc < pmcr_n; pmc++) test_access_pmc_regs(&pmc_accessors[i], pmc); } + /* + * Tests for reading/writing PMU registers for unimplemented counters. + * Use each combination of PMEV{CNTR,TYPER}_EL0 accessor functions. + */ + for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) { + for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++) + test_access_invalid_pmc_regs(&pmc_accessors[i], pmc); + } + GUEST_DONE(); } diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index cb537253a6b9..c42d683102c7 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -104,6 +104,7 @@ enum { #define ESR_EC_SHIFT 26 #define ESR_EC_MASK (ESR_EC_NUM - 1) +#define ESR_EC_UNKNOWN 0x0 #define ESR_EC_SVC64 0x15 #define ESR_EC_IABT 0x21 #define ESR_EC_DABT 0x25 -- Gitee From 14b55b3c4cd8260989445ed0ed18d8ae0480dc31 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:52 +0000 Subject: [PATCH 26/75] KVM: selftests: aarch64: vPMU test for validating user accesses ANBZ: #25252 commit 62708be351fe7b06be6f6fd30e95c94095bf21d3 upstream. Add a vPMU test scenario to validate the userspace accesses for the registers PM{C,I}NTEN{SET,CLR} and PMOVS{SET,CLR} to ensure that KVM honors the architectural definitions of these registers for a given PMCR.N. Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-13-rananta@google.com Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- .../kvm/aarch64/vpmu_counter_access.c | 87 ++++++++++++++++++- 1 file changed, 86 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index 8694e89684bf..5ea78986e665 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -8,6 +8,8 @@ * counters (PMCR_EL0.N) that userspace sets, if the guest can access * those counters, and if the guest is prevented from accessing any * other counters. + * It also checks if the userspace accesses to the PMU regsisters honor the + * PMCR.N value that's set for the guest. * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host. */ #include @@ -20,6 +22,9 @@ /* The max number of the PMU event counters (excluding the cycle counter) */ #define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) +/* The cycle counter bit position that's common among the PMU registers */ +#define ARMV8_PMU_CYCLE_IDX 31 + struct vpmu_vm { struct kvm_vm *vm; struct kvm_vcpu *vcpu; @@ -28,6 +33,13 @@ struct vpmu_vm { static struct vpmu_vm vpmu_vm; +struct pmreg_sets { + uint64_t set_reg_id; + uint64_t clr_reg_id; +}; + +#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} + static uint64_t get_pmcr_n(uint64_t pmcr) { return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; @@ -39,6 +51,15 @@ static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); } +static uint64_t get_counters_mask(uint64_t n) +{ + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); + + if (n) + mask |= GENMASK(n - 1, 0); + return mask; +} + /* Read PMEVTCNTR_EL0 through PMXEVCNTR_EL0 */ static inline unsigned long read_sel_evcntr(int sel) { @@ -541,6 +562,68 @@ static void run_access_test(uint64_t pmcr_n) destroy_vpmu_vm(); } +static struct pmreg_sets validity_check_reg_sets[] = { + PMREG_SET(SYS_PMCNTENSET_EL0, SYS_PMCNTENCLR_EL0), + PMREG_SET(SYS_PMINTENSET_EL1, SYS_PMINTENCLR_EL1), + PMREG_SET(SYS_PMOVSSET_EL0, SYS_PMOVSCLR_EL0), +}; + +/* + * Create a VM, and check if KVM handles the userspace accesses of + * the PMU register sets in @validity_check_reg_sets[] correctly. + */ +static void run_pmregs_validity_test(uint64_t pmcr_n) +{ + int i; + struct kvm_vcpu *vcpu; + uint64_t set_reg_id, clr_reg_id, reg_val; + uint64_t valid_counters_mask, max_counters_mask; + + test_create_vpmu_vm_with_pmcr_n(pmcr_n, false); + vcpu = vpmu_vm.vcpu; + + valid_counters_mask = get_counters_mask(pmcr_n); + max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS); + + for (i = 0; i < ARRAY_SIZE(validity_check_reg_sets); i++) { + set_reg_id = validity_check_reg_sets[i].set_reg_id; + clr_reg_id = validity_check_reg_sets[i].clr_reg_id; + + /* + * Test if the 'set' and 'clr' variants of the registers + * are initialized based on the number of valid counters. + */ + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), ®_val); + TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, + "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", + KVM_ARM64_SYS_REG(set_reg_id), reg_val); + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), ®_val); + TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, + "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", + KVM_ARM64_SYS_REG(clr_reg_id), reg_val); + + /* + * Using the 'set' variant, force-set the register to the + * max number of possible counters and test if KVM discards + * the bits for unimplemented counters as it should. + */ + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask); + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), ®_val); + TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, + "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", + KVM_ARM64_SYS_REG(set_reg_id), reg_val); + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), ®_val); + TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, + "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", + KVM_ARM64_SYS_REG(clr_reg_id), reg_val); + } + + destroy_vpmu_vm(); +} + /* * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for * the vCPU to @pmcr_n, which is larger than the host value. @@ -575,8 +658,10 @@ int main(void) TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); pmcr_n = get_pmcr_n_limit(); - for (i = 0; i <= pmcr_n; i++) + for (i = 0; i <= pmcr_n; i++) { run_access_test(i); + run_pmregs_validity_test(i); + } for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++) run_error_test(i); -- Gitee From 2b448d15c524312dc7edf78d0e818bd3be19c591 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 19 Oct 2023 18:56:17 +0000 Subject: [PATCH 27/75] KVM: arm64: Make PMEVTYPER_EL0.NSH RES0 if EL2 isn't advertised ANBZ: #25252 commit bc512d6a9b92390760f0e8c895501c5016539929 upstream. The NSH bit, which filters event counting at EL2, is required by the architecture if an implementation has EL2. Even though KVM doesn't support nested virt yet, it makes no effort to hide the existence of EL2 from the ID registers. Userspace can, however, change the value of PFR0 to hide EL2. Align KVM's sysreg emulation with the architecture and make NSH RES0 if EL2 isn't advertised. Keep in mind the bit is ignored when constructing the backing perf event. While at it, build the event type mask using explicit field definitions instead of relying on ARMV8_PMU_EVTYPE_MASK. KVM probably should've been doing this in the first place, as it avoids changes to the aforementioned mask affecting sysreg emulation. Reviewed-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20231019185618.3442949-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton Signed-off-by: Wei Chen --- arch/arm64/kvm/pmu-emul.c | 21 ++++++++++++++------- arch/arm64/kvm/sys_regs.c | 8 ++++++-- include/kvm/arm_pmu.h | 5 +++++ 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 59ed6598232e..3714cc291ecf 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -55,6 +55,18 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) return __kvm_pmu_event_mask(pmuver); } +u64 kvm_pmu_evtyper_mask(struct kvm *kvm) +{ + u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 | + kvm_pmu_event_mask(kvm); + u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1); + + if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0)) + mask |= ARMV8_PMU_INCLUDE_EL2; + + return mask; +} + /** * kvm_pmc_is_64bit - determine if counter is 64bit * @pmc: counter context @@ -651,18 +663,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) { struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); - u64 reg, mask; + u64 reg; if (!kvm_vcpu_has_pmu(vcpu)) return; - mask = ARMV8_PMU_EVTYPE_MASK; - mask &= ~ARMV8_PMU_EVTYPE_EVENT; - mask |= kvm_pmu_event_mask(vcpu->kvm); - reg = counter_index_to_evtreg(pmc->idx); - - __vcpu_sys_reg(vcpu, reg) = data & mask; + __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm); kvm_pmu_create_perf_event(pmc); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 054a3cd31f53..06c868a12f14 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -772,8 +772,12 @@ static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { + /* This thing will UNDEF, who cares about the reset value? */ + if (!kvm_vcpu_has_pmu(vcpu)) + return 0; + reset_unknown(vcpu, r); - __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK; + __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); return __vcpu_sys_reg(vcpu, r->reg); } @@ -1012,7 +1016,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); kvm_vcpu_pmu_restore_guest(vcpu); } else { - p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; + p->regval = __vcpu_sys_reg(vcpu, reg); } return true; diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 6b3d34724594..9d8a2f11f39b 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -104,6 +104,7 @@ void kvm_vcpu_pmu_resync_el0(void); }) u8 kvm_arm_pmu_get_pmuver_limit(void); +u64 kvm_pmu_evtyper_mask(struct kvm *kvm); int kvm_arm_set_default_pmu(struct kvm *kvm); u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); @@ -179,6 +180,10 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void) { return 0; } +static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) +{ + return 0; +} static inline void kvm_vcpu_pmu_resync_el0(void) {} static inline int kvm_arm_set_default_pmu(struct kvm *kvm) -- Gitee From 4102730d48df23b7b60af22515a902835a8529c7 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:13 +0000 Subject: [PATCH 28/75] arm: perf: Remove inlines from arm_pmuv3.c ANBZ: #25252 commit 9343c790e6de7edd2bab17572832f4f21c748dc2 upstream. These are all static and in one compilation unit so the inline has no effect on the binary. Except if FTRACE is enabled, then 3 functions which were already not inlined now get the nops added which allows them to be traced. Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-2-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_pmuv3.c | 46 ++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 5f978554db0c..f2a821c05650 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -304,12 +304,12 @@ PMU_FORMAT_ATTR(rdpmc, "config1:1"); static int sysctl_perf_user_access __read_mostly; -static inline bool armv8pmu_event_is_64bit(struct perf_event *event) +static bool armv8pmu_event_is_64bit(struct perf_event *event) { return event->attr.config1 & 0x1; } -static inline bool armv8pmu_event_want_user_access(struct perf_event *event) +static bool armv8pmu_event_want_user_access(struct perf_event *event) { return event->attr.config1 & 0x2; } @@ -401,7 +401,7 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver)); } -static inline bool armv8pmu_event_has_user_read(struct perf_event *event) +static bool armv8pmu_event_has_user_read(struct perf_event *event) { return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT; } @@ -411,7 +411,7 @@ static inline bool armv8pmu_event_has_user_read(struct perf_event *event) * except when we have allocated the 64bit cycle counter (for CPU * cycles event) or when user space counter access is enabled. */ -static inline bool armv8pmu_event_is_chained(struct perf_event *event) +static bool armv8pmu_event_is_chained(struct perf_event *event) { int idx = event->hw.idx; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); @@ -432,36 +432,36 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event) #define ARMV8_IDX_TO_COUNTER(x) \ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) -static inline u64 armv8pmu_pmcr_read(void) +static u64 armv8pmu_pmcr_read(void) { return read_pmcr(); } -static inline void armv8pmu_pmcr_write(u64 val) +static void armv8pmu_pmcr_write(u64 val) { val &= ARMV8_PMU_PMCR_MASK; isb(); write_pmcr(val); } -static inline int armv8pmu_has_overflowed(u32 pmovsr) +static int armv8pmu_has_overflowed(u32 pmovsr) { return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; } -static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) +static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) { return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); } -static inline u64 armv8pmu_read_evcntr(int idx) +static u64 armv8pmu_read_evcntr(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); return read_pmevcntrn(counter); } -static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) +static u64 armv8pmu_read_hw_counter(struct perf_event *event) { int idx = event->hw.idx; u64 val = armv8pmu_read_evcntr(idx); @@ -523,14 +523,14 @@ static u64 armv8pmu_read_counter(struct perf_event *event) return armv8pmu_unbias_long_counter(event, value); } -static inline void armv8pmu_write_evcntr(int idx, u64 value) +static void armv8pmu_write_evcntr(int idx, u64 value) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); write_pmevcntrn(counter, value); } -static inline void armv8pmu_write_hw_counter(struct perf_event *event, +static void armv8pmu_write_hw_counter(struct perf_event *event, u64 value) { int idx = event->hw.idx; @@ -556,7 +556,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) armv8pmu_write_hw_counter(event, value); } -static inline void armv8pmu_write_evtype(int idx, u32 val) +static void armv8pmu_write_evtype(int idx, u32 val) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); @@ -564,7 +564,7 @@ static inline void armv8pmu_write_evtype(int idx, u32 val) write_pmevtypern(counter, val); } -static inline void armv8pmu_write_event_type(struct perf_event *event) +static void armv8pmu_write_event_type(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -598,7 +598,7 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event) return mask; } -static inline void armv8pmu_enable_counter(u32 mask) +static void armv8pmu_enable_counter(u32 mask) { /* * Make sure event configuration register writes are visible before we @@ -608,7 +608,7 @@ static inline void armv8pmu_enable_counter(u32 mask) write_pmcntenset(mask); } -static inline void armv8pmu_enable_event_counter(struct perf_event *event) +static void armv8pmu_enable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; u32 mask = armv8pmu_event_cnten_mask(event); @@ -620,7 +620,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event) armv8pmu_enable_counter(mask); } -static inline void armv8pmu_disable_counter(u32 mask) +static void armv8pmu_disable_counter(u32 mask) { write_pmcntenclr(mask); /* @@ -630,7 +630,7 @@ static inline void armv8pmu_disable_counter(u32 mask) isb(); } -static inline void armv8pmu_disable_event_counter(struct perf_event *event) +static void armv8pmu_disable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; u32 mask = armv8pmu_event_cnten_mask(event); @@ -642,18 +642,18 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event) armv8pmu_disable_counter(mask); } -static inline void armv8pmu_enable_intens(u32 mask) +static void armv8pmu_enable_intens(u32 mask) { write_pmintenset(mask); } -static inline void armv8pmu_enable_event_irq(struct perf_event *event) +static void armv8pmu_enable_event_irq(struct perf_event *event) { u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); armv8pmu_enable_intens(BIT(counter)); } -static inline void armv8pmu_disable_intens(u32 mask) +static void armv8pmu_disable_intens(u32 mask) { write_pmintenclr(mask); isb(); @@ -662,13 +662,13 @@ static inline void armv8pmu_disable_intens(u32 mask) isb(); } -static inline void armv8pmu_disable_event_irq(struct perf_event *event) +static void armv8pmu_disable_event_irq(struct perf_event *event) { u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); armv8pmu_disable_intens(BIT(counter)); } -static inline u32 armv8pmu_getreset_flags(void) +static u32 armv8pmu_getreset_flags(void) { u32 value; -- Gitee From adbc8b6f27bdb48ab89c254356519072cac06593 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:14 +0000 Subject: [PATCH 29/75] arm: perf/kvm: Use GENMASK for ARMV8_PMU_PMCR_N ANBZ: #25252 commit 62e1f212e5fe7624249212813ee96202e0c31430 upstream. This is so that FIELD_GET and FIELD_PREP can be used and that the fields are in a consistent format to arm64/tools/sysreg Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-3-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm64/kvm/pmu-emul.c | 8 +++----- arch/arm64/kvm/sys_regs.c | 4 ++-- drivers/perf/arm_pmuv3.c | 4 ++-- include/linux/perf/arm_pmuv3.h | 3 +-- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 3714cc291ecf..79a0d4da5ad5 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -257,9 +257,8 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { - u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT; + u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu)); - val &= ARMV8_PMU_PMCR_N_MASK; if (val == 0) return BIT(ARMV8_PMU_CYCLE_IDX); else @@ -1081,8 +1080,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) */ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) { - u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) & - ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); + u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); - return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); + return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 06c868a12f14..a16157649813 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -908,7 +908,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) u64 pmcr, val; pmcr = kvm_vcpu_read_pmcr(vcpu); - val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; + val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { kvm_inject_undefined(vcpu); return false; @@ -1174,7 +1174,7 @@ static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val) { - u8 new_n = (val >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; + u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val); struct kvm *kvm = vcpu->kvm; mutex_lock(&kvm->arch.config_lock); diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index f2a821c05650..cadf127b9a1b 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -1111,8 +1112,7 @@ static void __armv8pmu_probe_pmu(void *info) probe->present = true; /* Read the nb of CNTx counters supported from PMNC */ - cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) - & ARMV8_PMU_PMCR_N_MASK; + cpu_pmu->num_events = FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read()); /* Add the CPU cycles counter */ cpu_pmu->num_events += 1; diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index e3899bd77f5c..28a21d13908a 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -215,8 +215,7 @@ #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ #define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ -#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ -#define ARMV8_PMU_PMCR_N_MASK 0x1f +#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */ #define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ /* -- Gitee From 59cf45d2af5604634b5433edd2c9f5d39a95e490 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:15 +0000 Subject: [PATCH 30/75] arm: perf: Use GENMASK for PMMIR fields ANBZ: #25252 commit 2f6a00f30600417ee2737f2b1229c75663f1e3c9 upstream. This is so that FIELD_GET and FIELD_PREP can be used and that the fields are in a consistent format to arm64/tools/sysreg Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-4-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_pmuv3.c | 8 +++----- include/linux/perf/arm_pmuv3.h | 9 +++------ 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index cadf127b9a1b..1e1c43087fd1 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -332,7 +332,7 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr, { struct pmu *pmu = dev_get_drvdata(dev); struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); - u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK; + u32 slots = FIELD_GET(ARMV8_PMU_SLOTS, cpu_pmu->reg_pmmir); return sysfs_emit(page, "0x%08x\n", slots); } @@ -344,8 +344,7 @@ static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr, { struct pmu *pmu = dev_get_drvdata(dev); struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); - u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT) - & ARMV8_PMU_BUS_SLOTS_MASK; + u32 bus_slots = FIELD_GET(ARMV8_PMU_BUS_SLOTS, cpu_pmu->reg_pmmir); return sysfs_emit(page, "0x%08x\n", bus_slots); } @@ -357,8 +356,7 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr, { struct pmu *pmu = dev_get_drvdata(dev); struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); - u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT) - & ARMV8_PMU_BUS_WIDTH_MASK; + u32 bus_width = FIELD_GET(ARMV8_PMU_BUS_WIDTH, cpu_pmu->reg_pmmir); u32 val = 0; /* Encoded as Log2(number of bytes), plus one */ diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 28a21d13908a..5d71b947ab3d 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -247,12 +247,9 @@ #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ /* PMMIR_EL1.SLOTS mask */ -#define ARMV8_PMU_SLOTS_MASK 0xff - -#define ARMV8_PMU_BUS_SLOTS_SHIFT 8 -#define ARMV8_PMU_BUS_SLOTS_MASK 0xff -#define ARMV8_PMU_BUS_WIDTH_SHIFT 16 -#define ARMV8_PMU_BUS_WIDTH_MASK 0xf +#define ARMV8_PMU_SLOTS GENMASK(7, 0) +#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8) +#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16) /* * This code is really good -- Gitee From a1890153add455d784e1cebd27333395c4abc37f Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:16 +0000 Subject: [PATCH 31/75] arm: perf: Convert remaining fields to use GENMASK ANBZ: #25252 commit d30f09b6d7de5d159dbb537f9d67dceb67409420 upstream. Convert the remaining fields to use either GENMASK or be built from other fields. These all already started at bit 0 so don't need a code change for the lack of _SHIFT. Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-5-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_pmuv3.c | 2 +- include/linux/perf/arm_pmuv3.h | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 1e1c43087fd1..d31535241322 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -675,7 +675,7 @@ static u32 armv8pmu_getreset_flags(void) value = read_pmovsclr(); /* Write to clear flags */ - value &= ARMV8_PMU_OVSR_MASK; + value &= ARMV8_PMU_OVERFLOWED_MASK; write_pmovsclr(value); return value; diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 5d71b947ab3d..1c213099f2c1 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -216,19 +216,25 @@ #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ #define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ #define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */ -#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ +/* Mask for writable bits */ +#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \ + ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \ + ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \ + ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP) /* * PMOVSR: counters overflow flag status reg */ -#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ -#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK +#define ARMV8_PMU_OVSR_P GENMASK(30, 0) +#define ARMV8_PMU_OVSR_C BIT(31) +/* Mask for writable bits is both P and C fields */ +#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C) /* * PMXEVTYPER: Event selection reg */ #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ -#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ +#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */ /* * Event filters for PMUv3 @@ -240,11 +246,13 @@ /* * PMUSERENR: user enable reg */ -#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ #define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ +/* Mask for writable bits */ +#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \ + ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER) /* PMMIR_EL1.SLOTS mask */ #define ARMV8_PMU_SLOTS GENMASK(7, 0) -- Gitee From d4b3146b3be5441db6564239ded5bcb8bb9f76d5 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:17 +0000 Subject: [PATCH 32/75] arm64: perf: Include threshold control fields in PMEVTYPER mask ANBZ: #25252 commit 3115ee021bfb04efde2e96507bfcc1330261a6a1 upstream. FEAT_PMUv3_TH (Armv8.8) adds two new fields to PMEVTYPER, so include them in the mask. These aren't writable on 32 bit kernels as they are in the high part of the register, so only include them for arm64. It would be difficult to do this statically in the asm header files for each platform without resulting in circular includes or #ifdefs inline in the code. For that reason the ARMV8_PMU_EVTYPE_MASK definition has been removed and the mask is constructed programmatically. Reviewed-by: Suzuki K Poulose Reviewed-by: Anshuman Khandual Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-6-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_pmuv3.c | 9 ++++++++- include/linux/perf/arm_pmuv3.h | 3 ++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index d31535241322..50a947b043c2 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -558,8 +558,15 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) static void armv8pmu_write_evtype(int idx, u32 val) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); + unsigned long mask = ARMV8_PMU_EVTYPE_EVENT | + ARMV8_PMU_INCLUDE_EL2 | + ARMV8_PMU_EXCLUDE_EL0 | + ARMV8_PMU_EXCLUDE_EL1; - val &= ARMV8_PMU_EVTYPE_MASK; + if (IS_ENABLED(CONFIG_ARM64)) + mask |= ARMV8_PMU_EVTYPE_TC | ARMV8_PMU_EVTYPE_TH; + + val &= mask; write_pmevtypern(counter, val); } diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 1c213099f2c1..70bce300e176 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -233,8 +233,9 @@ /* * PMXEVTYPER: Event selection reg */ -#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ #define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */ +#define ARMV8_PMU_EVTYPE_TH GENMASK(43, 32) +#define ARMV8_PMU_EVTYPE_TC GENMASK(63, 61) /* * Event filters for PMUv3 -- Gitee From fe7a67f8ea30df64db290eca47e53485982d0413 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:18 +0000 Subject: [PATCH 33/75] arm: pmu: Share user ABI format mechanism with SPE ANBZ: #25252 commit f6da86969a3c284466ab6080764b2ed91689f262 upstream. This mechanism makes it much easier to define and read new attributes so move it to the arm_pmu.h header so that it can be shared. At the same time update the existing format attributes to use it. GENMASK has to be changed to GENMASK_ULL because the config fields are 64 bits even on arm32 where this will also be used now. Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-7-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_pmuv3.c | 21 ++++++++++++++++----- drivers/perf/arm_spe_pmu.c | 22 ---------------------- include/linux/perf/arm_pmu.h | 22 ++++++++++++++++++++++ 3 files changed, 38 insertions(+), 27 deletions(-) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 50a947b043c2..a21d23285650 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -299,20 +299,31 @@ static const struct attribute_group armv8_pmuv3_events_attr_group = { .is_visible = armv8pmu_event_attr_is_visible, }; -PMU_FORMAT_ATTR(event, "config:0-15"); -PMU_FORMAT_ATTR(long, "config1:0"); -PMU_FORMAT_ATTR(rdpmc, "config1:1"); +/* User ABI */ +#define ATTR_CFG_FLD_event_CFG config +#define ATTR_CFG_FLD_event_LO 0 +#define ATTR_CFG_FLD_event_HI 15 +#define ATTR_CFG_FLD_long_CFG config1 +#define ATTR_CFG_FLD_long_LO 0 +#define ATTR_CFG_FLD_long_HI 0 +#define ATTR_CFG_FLD_rdpmc_CFG config1 +#define ATTR_CFG_FLD_rdpmc_LO 1 +#define ATTR_CFG_FLD_rdpmc_HI 1 + +GEN_PMU_FORMAT_ATTR(event); +GEN_PMU_FORMAT_ATTR(long); +GEN_PMU_FORMAT_ATTR(rdpmc); static int sysctl_perf_user_access __read_mostly; static bool armv8pmu_event_is_64bit(struct perf_event *event) { - return event->attr.config1 & 0x1; + return ATTR_CFG_GET_FLD(&event->attr, long); } static bool armv8pmu_event_want_user_access(struct perf_event *event) { - return event->attr.config1 & 0x2; + return ATTR_CFG_GET_FLD(&event->attr, rdpmc); } static struct attribute *armv8_pmuv3_format_attrs[] = { diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 2bec2e3af0bd..71835682046e 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -206,28 +206,6 @@ static const struct attribute_group arm_spe_pmu_cap_group = { #define ATTR_CFG_FLD_inv_event_filter_LO 0 #define ATTR_CFG_FLD_inv_event_filter_HI 63 -/* Why does everything I do descend into this? */ -#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ - (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi - -#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ - __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) - -#define GEN_PMU_FORMAT_ATTR(name) \ - PMU_FORMAT_ATTR(name, \ - _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \ - ATTR_CFG_FLD_##name##_LO, \ - ATTR_CFG_FLD_##name##_HI)) - -#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \ - ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0)) - -#define ATTR_CFG_GET_FLD(attr, name) \ - _ATTR_CFG_GET_FLD(attr, \ - ATTR_CFG_FLD_##name##_CFG, \ - ATTR_CFG_FLD_##name##_LO, \ - ATTR_CFG_FLD_##name##_HI) - GEN_PMU_FORMAT_ATTR(ts_enable); GEN_PMU_FORMAT_ATTR(pa_enable); GEN_PMU_FORMAT_ATTR(pct_enable); diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index e4471a9fd63b..e382c9332b4b 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -189,4 +189,26 @@ void armpmu_free_irq(int irq, int cpu); #define ARMV8_SPE_PDEV_NAME "arm,spe-v1" #define ARMV8_TRBE_PDEV_NAME "arm,trbe" +/* Why does everything I do descend into this? */ +#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ + (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi + +#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ + __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) + +#define GEN_PMU_FORMAT_ATTR(name) \ + PMU_FORMAT_ATTR(name, \ + _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \ + ATTR_CFG_FLD_##name##_LO, \ + ATTR_CFG_FLD_##name##_HI)) + +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \ + ((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0)) + +#define ATTR_CFG_GET_FLD(attr, name) \ + _ATTR_CFG_GET_FLD(attr, \ + ATTR_CFG_FLD_##name##_CFG, \ + ATTR_CFG_FLD_##name##_LO, \ + ATTR_CFG_FLD_##name##_HI) + #endif /* __ARM_PMU_H__ */ -- Gitee From 6fb944629674fa5723dfb68edfe63a654d553684 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:20 +0000 Subject: [PATCH 34/75] KVM: selftests: aarch64: Update tools copy of arm_pmuv3.h ANBZ: #25252 commit c7b98bf0fc79bd2d91f6ef84e07b5f648d43c13e upstream. Now that ARMV8_PMU_PMCR_N is made with GENMASK, update usages to treat it as a pre-shifted mask. Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-9-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- tools/include/perf/arm_pmuv3.h | 43 +++++++++++-------- .../kvm/aarch64/vpmu_counter_access.c | 5 +-- 2 files changed, 28 insertions(+), 20 deletions(-) diff --git a/tools/include/perf/arm_pmuv3.h b/tools/include/perf/arm_pmuv3.h index e822d49fb5b8..1e397d55384e 100644 --- a/tools/include/perf/arm_pmuv3.h +++ b/tools/include/perf/arm_pmuv3.h @@ -218,45 +218,54 @@ #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ #define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ -#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ -#define ARMV8_PMU_PMCR_N_MASK 0x1f -#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ +#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */ +/* Mask for writable bits */ +#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \ + ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \ + ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \ + ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP) /* * PMOVSR: counters overflow flag status reg */ -#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ -#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK +#define ARMV8_PMU_OVSR_P GENMASK(30, 0) +#define ARMV8_PMU_OVSR_C BIT(31) +/* Mask for writable bits is both P and C fields */ +#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C) /* * PMXEVTYPER: Event selection reg */ -#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ -#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ +#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */ +#define ARMV8_PMU_EVTYPE_TH GENMASK(43, 32) +#define ARMV8_PMU_EVTYPE_TC GENMASK(63, 61) /* * Event filters for PMUv3 */ -#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) -#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) -#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) +#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) +#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) +#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29) +#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28) +#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) +#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26) /* * PMUSERENR: user enable reg */ -#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ #define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ +/* Mask for writable bits */ +#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \ + ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER) /* PMMIR_EL1.SLOTS mask */ -#define ARMV8_PMU_SLOTS_MASK 0xff - -#define ARMV8_PMU_BUS_SLOTS_SHIFT 8 -#define ARMV8_PMU_BUS_SLOTS_MASK 0xff -#define ARMV8_PMU_BUS_WIDTH_SHIFT 16 -#define ARMV8_PMU_BUS_WIDTH_MASK 0xf +#define ARMV8_PMU_SLOTS GENMASK(7, 0) +#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8) +#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16) +#define ARMV8_PMU_THWIDTH GENMASK(23, 20) /* * This code is really good diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index 5ea78986e665..9d51b5691349 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -42,13 +42,12 @@ struct pmreg_sets { static uint64_t get_pmcr_n(uint64_t pmcr) { - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; + return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); } static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) { - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); + u64p_replace_bits((__u64 *) pmcr, pmcr_n, ARMV8_PMU_PMCR_N); } static uint64_t get_counters_mask(uint64_t n) -- Gitee From 535a8ef6cf7f744e55446e11f69c6ecb53210068 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:21 +0000 Subject: [PATCH 35/75] arm: pmu: Move error message and -EOPNOTSUPP to individual PMUs ANBZ: #25252 commit 186c91aaf54989a9c74869dcc6ba031313d8e2b8 upstream. -EPERM or -EINVAL always get converted to -EOPNOTSUPP, so replace them. This will allow __hw_perf_event_init() to return a different code or not print that particular message for a different error in the next commit. Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-10-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm/kernel/perf_event_v7.c | 6 ++++-- drivers/perf/apple_m1_cpu_pmu.c | 6 ++++-- drivers/perf/arm_pmu.c | 11 +++++------ drivers/perf/arm_pmuv3.c | 6 ++++-- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index eb2190477da1..4e115076d323 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1072,8 +1072,10 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, { unsigned long config_base = 0; - if (attr->exclude_idle) - return -EPERM; + if (attr->exclude_idle) { + pr_debug("ARM performance counters do not support mode exclusion\n"); + return -EOPNOTSUPP; + } if (attr->exclude_user) config_base |= ARMV7_EXCLUDE_USER; if (attr->exclude_kernel) diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index cd2de44b61b9..f322e5ca1114 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -524,8 +524,10 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event, { unsigned long config_base = 0; - if (!attr->exclude_guest) - return -EINVAL; + if (!attr->exclude_guest) { + pr_debug("ARM performance counters do not support mode exclusion\n"); + return -EOPNOTSUPP; + } if (!attr->exclude_kernel) config_base |= M1_PMU_CFG_COUNT_KERNEL; if (!attr->exclude_user) diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index b3bb4d62f13d..30d67e670603 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -443,7 +443,7 @@ __hw_perf_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; - int mapping; + int mapping, ret; hwc->flags = 0; mapping = armpmu->map_event(event); @@ -468,11 +468,10 @@ __hw_perf_event_init(struct perf_event *event) /* * Check whether we need to exclude the counter from certain modes. */ - if (armpmu->set_event_filter && - armpmu->set_event_filter(hwc, &event->attr)) { - pr_debug("ARM performance counters do not support " - "mode exclusion\n"); - return -EOPNOTSUPP; + if (armpmu->set_event_filter) { + ret = armpmu->set_event_filter(hwc, &event->attr); + if (ret) + return ret; } /* diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index a21d23285650..c851a6822ccb 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -936,8 +936,10 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, { unsigned long config_base = 0; - if (attr->exclude_idle) - return -EPERM; + if (attr->exclude_idle) { + pr_debug("ARM performance counters do not support mode exclusion\n"); + return -EOPNOTSUPP; + } /* * If we're running in hyp mode, then we *are* the hypervisor. -- Gitee From 2b5de14c82722990332c60e971462a414ec431a3 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:22 +0000 Subject: [PATCH 36/75] arm64: perf: Add support for event counting threshold ANBZ: #25252 commit 816c26754447e8b28d6c604e1f5b1d205b2586ee upstream. FEAT_PMUv3_TH (Armv8.8) permits a PMU counter to increment only on events whose count meets a specified threshold condition. For example if PMEVTYPERn.TC (Threshold Control) is set to 0b101 (Greater than or equal, count), and the threshold is set to 2, then the PMU counter will now only increment by 1 when an event would have previously incremented the PMU counter by 2 or more on a single processor cycle. Three new Perf event config fields, 'threshold', 'threshold_compare' and 'threshold_count' have been added to control the feature. threshold_compare maps to the upper two bits of PMEVTYPERn.TC and threshold_count maps to the first bit of TC. These separate attributes have been picked rather than enumerating all the possible combinations of the TC field as in the Arm ARM. The attributes would be used on a Perf command line like this: $ perf stat -e stall_slot/threshold=2,threshold_compare=2/ A new capability for reading out the maximum supported threshold value has also been added: $ cat /sys/bus/event_source/devices/armv8_pmuv3/caps/threshold_max 0x000000ff If a threshold higher than threshold_max is provided, then an error is generated. If FEAT_PMUv3_TH isn't implemented or a 32 bit kernel is running, then threshold_max reads zero, and attempting to set a threshold value will also result in an error. The threshold is per PMU counter, and there are potentially different threshold_max values per PMU type on heterogeneous systems. Bits higher than 32 now need to be written into PMEVTYPER, so armv8pmu_write_evtype() has to be updated to take an unsigned long value rather than u32 which gives the correct behavior on both aarch32 and 64. Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-11-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_pmuv3.c | 79 +++++++++++++++++++++++++++++++++- include/linux/perf/arm_pmuv3.h | 1 + 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index c851a6822ccb..cd0c329253f2 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -309,10 +309,22 @@ static const struct attribute_group armv8_pmuv3_events_attr_group = { #define ATTR_CFG_FLD_rdpmc_CFG config1 #define ATTR_CFG_FLD_rdpmc_LO 1 #define ATTR_CFG_FLD_rdpmc_HI 1 +#define ATTR_CFG_FLD_threshold_count_CFG config1 /* PMEVTYPER.TC[0] */ +#define ATTR_CFG_FLD_threshold_count_LO 2 +#define ATTR_CFG_FLD_threshold_count_HI 2 +#define ATTR_CFG_FLD_threshold_compare_CFG config1 /* PMEVTYPER.TC[2:1] */ +#define ATTR_CFG_FLD_threshold_compare_LO 3 +#define ATTR_CFG_FLD_threshold_compare_HI 4 +#define ATTR_CFG_FLD_threshold_CFG config1 /* PMEVTYPER.TH */ +#define ATTR_CFG_FLD_threshold_LO 5 +#define ATTR_CFG_FLD_threshold_HI 16 GEN_PMU_FORMAT_ATTR(event); GEN_PMU_FORMAT_ATTR(long); GEN_PMU_FORMAT_ATTR(rdpmc); +GEN_PMU_FORMAT_ATTR(threshold_count); +GEN_PMU_FORMAT_ATTR(threshold_compare); +GEN_PMU_FORMAT_ATTR(threshold); static int sysctl_perf_user_access __read_mostly; @@ -326,10 +338,27 @@ static bool armv8pmu_event_want_user_access(struct perf_event *event) return ATTR_CFG_GET_FLD(&event->attr, rdpmc); } +static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr) +{ + u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare); + u8 th_count = ATTR_CFG_GET_FLD(attr, threshold_count); + + /* + * The count bit is always the bottom bit of the full control field, and + * the comparison is the upper two bits, but it's not explicitly + * labelled in the Arm ARM. For the Perf interface we split it into two + * fields, so reconstruct it here. + */ + return (th_compare << 1) | th_count; +} + static struct attribute *armv8_pmuv3_format_attrs[] = { &format_attr_event.attr, &format_attr_long.attr, &format_attr_rdpmc.attr, + &format_attr_threshold.attr, + &format_attr_threshold_compare.attr, + &format_attr_threshold_count.attr, NULL, }; @@ -379,10 +408,38 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(bus_width); +static u32 threshold_max(struct arm_pmu *cpu_pmu) +{ + /* + * PMMIR.THWIDTH is readable and non-zero on aarch32, but it would be + * impossible to write the threshold in the upper 32 bits of PMEVTYPER. + */ + if (IS_ENABLED(CONFIG_ARM)) + return 0; + + /* + * The largest value that can be written to PMEVTYPER_EL0.TH is + * (2 ^ PMMIR.THWIDTH) - 1. + */ + return (1 << FIELD_GET(ARMV8_PMU_THWIDTH, cpu_pmu->reg_pmmir)) - 1; +} + +static ssize_t threshold_max_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); + + return sysfs_emit(page, "0x%08x\n", threshold_max(cpu_pmu)); +} + +static DEVICE_ATTR_RO(threshold_max); + static struct attribute *armv8_pmuv3_caps_attrs[] = { &dev_attr_slots.attr, &dev_attr_bus_slots.attr, &dev_attr_bus_width.attr, + &dev_attr_threshold_max.attr, NULL, }; @@ -566,7 +623,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) armv8pmu_write_hw_counter(event, value); } -static void armv8pmu_write_evtype(int idx, u32 val) +static void armv8pmu_write_evtype(int idx, unsigned long val) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); unsigned long mask = ARMV8_PMU_EVTYPE_EVENT | @@ -935,6 +992,10 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, struct perf_event_attr *attr) { unsigned long config_base = 0; + struct perf_event *perf_event = container_of(attr, struct perf_event, + attr); + struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); + u32 th; if (attr->exclude_idle) { pr_debug("ARM performance counters do not support mode exclusion\n"); @@ -968,6 +1029,22 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, if (attr->exclude_user) config_base |= ARMV8_PMU_EXCLUDE_EL0; + /* + * If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will + * be 0 and will also trigger this check, preventing it from being used. + */ + th = ATTR_CFG_GET_FLD(attr, threshold); + if (th > threshold_max(cpu_pmu)) { + pr_debug("PMU event threshold exceeds max value\n"); + return -EINVAL; + } + + if (IS_ENABLED(CONFIG_ARM64) && th) { + config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TH, th); + config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TC, + armv8pmu_event_threshold_control(attr)); + } + /* * Install the filter into config_base as this is used to * construct the event type. diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 70bce300e176..96274cc4fd60 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -259,6 +259,7 @@ #define ARMV8_PMU_SLOTS GENMASK(7, 0) #define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8) #define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16) +#define ARMV8_PMU_THWIDTH GENMASK(23, 20) /* * This code is really good -- Gitee From 4d056062072d8f55db91cee8b6881289b5a657aa Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 11 Dec 2023 16:13:23 +0000 Subject: [PATCH 37/75] Documentation: arm64: Document the PMU event counting threshold feature ANBZ: #25252 commit bd690638e2c27dcea1d56376aa4bf3995d82ccfc upstream. Add documentation for the new Perf event open parameters and the threshold_max capability file. Reviewed-by: Anshuman Khandual Reviewed-by: Suzuki K Poulose Acked-by: Namhyung Kim Signed-off-by: James Clark Link: https://lore.kernel.org/r/20231211161331.1277825-12-james.clark@arm.com Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- Documentation/arch/arm64/perf.rst | 72 +++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/Documentation/arch/arm64/perf.rst b/Documentation/arch/arm64/perf.rst index 1f87b57c2332..997fd716b82f 100644 --- a/Documentation/arch/arm64/perf.rst +++ b/Documentation/arch/arm64/perf.rst @@ -164,3 +164,75 @@ and should be used to mask the upper bits as needed. https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/perf/arch/arm64/tests/user-events.c .. _tools/lib/perf/tests/test-evsel.c: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/perf/tests/test-evsel.c + +Event Counting Threshold +========================================== + +Overview +-------- + +FEAT_PMUv3_TH (Armv8.8) permits a PMU counter to increment only on +events whose count meets a specified threshold condition. For example if +threshold_compare is set to 2 ('Greater than or equal'), and the +threshold is set to 2, then the PMU counter will now only increment by +when an event would have previously incremented the PMU counter by 2 or +more on a single processor cycle. + +To increment by 1 after passing the threshold condition instead of the +number of events on that cycle, add the 'threshold_count' option to the +commandline. + +How-to +------ + +These are the parameters for controlling the feature: + +.. list-table:: + :header-rows: 1 + + * - Parameter + - Description + * - threshold + - Value to threshold the event by. A value of 0 means that + thresholding is disabled and the other parameters have no effect. + * - threshold_compare + - | Comparison function to use, with the following values supported: + | + | 0: Not-equal + | 1: Equals + | 2: Greater-than-or-equal + | 3: Less-than + * - threshold_count + - If this is set, count by 1 after passing the threshold condition + instead of the value of the event on this cycle. + +The threshold, threshold_compare and threshold_count values can be +provided per event, for example: + +.. code-block:: sh + + perf stat -e stall_slot/threshold=2,threshold_compare=2/ \ + -e dtlb_walk/threshold=10,threshold_compare=3,threshold_count/ + +In this example the stall_slot event will count by 2 or more on every +cycle where 2 or more stalls happen. And dtlb_walk will count by 1 on +every cycle where the number of dtlb walks were less than 10. + +The maximum supported threshold value can be read from the caps of each +PMU, for example: + +.. code-block:: sh + + cat /sys/bus/event_source/devices/armv8_pmuv3/caps/threshold_max + + 0x000000ff + +If a value higher than this is given, then opening the event will result +in an error. The highest possible maximum is 4095, as the config field +for threshold is limited to 12 bits, and the Perf tool will refuse to +parse higher values. + +If the PMU doesn't support FEAT_PMUv3_TH, then threshold_max will read +0, and attempting to set a threshold value will also result in an error. +threshold_max will also read as 0 on aarch32 guests, even if the host +is running on hardware with the feature. -- Gitee From b3a3a61c5cd3c5bc13acc1fa5b0645a47045394f Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 26 Jun 2024 16:32:25 -0600 Subject: [PATCH 38/75] perf: arm_pmuv3: Avoid assigning fixed cycle counter with threshold ANBZ: #25252 commit 81e15ca3e523a508d62806fe681c1d289361ca16 upstream. If the user has requested a counting threshold for the CPU cycles event, then the fixed cycle counter can't be assigned as it lacks threshold support. Currently, the thresholds will work or not randomly depending on which counter the event is assigned. While using thresholds for CPU cycles doesn't make much sense, it can be useful for testing purposes. Fixes: 816c26754447 ("arm64: perf: Add support for event counting threshold") Signed-off-by: Rob Herring (Arm) Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20240626-arm-pmu-3-9-icntr-v2-1-c9784b4f4065@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_pmuv3.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index cd0c329253f2..4f1832b73bca 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -338,6 +338,11 @@ static bool armv8pmu_event_want_user_access(struct perf_event *event) return ATTR_CFG_GET_FLD(&event->attr, rdpmc); } +static u32 armv8pmu_event_get_threshold(struct perf_event_attr *attr) +{ + return ATTR_CFG_GET_FLD(attr, threshold); +} + static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr) { u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare); @@ -941,7 +946,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; /* Always prefer to place a cycle counter into the cycle counter. */ - if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { + if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) && + !armv8pmu_event_get_threshold(&event->attr)) { if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) return ARMV8_IDX_CYCLE_COUNTER; else if (armv8pmu_event_is_64bit(event) && @@ -1033,7 +1039,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, * If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will * be 0 and will also trigger this check, preventing it from being used. */ - th = ATTR_CFG_GET_FLD(attr, threshold); + th = armv8pmu_event_get_threshold(attr); if (th > threshold_max(cpu_pmu)) { pr_debug("PMU event threshold exceeds max value\n"); return -EINVAL; -- Gitee From 180d1edba5d305587f720c33184d8a1f109c020e Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 26 Jun 2024 16:32:26 -0600 Subject: [PATCH 39/75] perf: arm_pmuv3: Drop unnecessary IS_ENABLED(CONFIG_ARM64) check ANBZ: #25252 commit 598c1a2d9f4ba8a04cf92af8e988052179e31199 upstream. The IS_ENABLED(CONFIG_ARM64) check for threshold support is unnecessary. The purpose is to not enable thresholds on arm32, but if threshold is non-zero, the check against threshold_max() just above here will have errored out because threshold_max() is always 0 on arm32. Signed-off-by: Rob Herring (Arm) Acked-by: Mark rutland Link: https://lore.kernel.org/r/20240626-arm-pmu-3-9-icntr-v2-2-c9784b4f4065@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_pmuv3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 4f1832b73bca..cb714d94d177 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -1045,7 +1045,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, return -EINVAL; } - if (IS_ENABLED(CONFIG_ARM64) && th) { + if (th) { config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TH, th); config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TC, armv8pmu_event_threshold_control(attr)); -- Gitee From 5fccf4f5a77134edb1650e10c723ccf4ae4c66c9 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 26 Jun 2024 16:32:27 -0600 Subject: [PATCH 40/75] perf/arm: Move 32-bit PMU drivers to drivers/perf/ ANBZ: #25252 commit 8d75537bebfa14fcd493b1f840b07a8cff5a640d upstream. It is preferred to put drivers under drivers/ rather than under arch/. The PMU drivers also depend on arm_pmu.c, so it's better to place them all together. Acked-by: Mark Rutland Signed-off-by: Rob Herring (Arm) Link: https://lore.kernel.org/r/20240626-arm-pmu-3-9-icntr-v2-3-c9784b4f4065@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm/kernel/Makefile | 2 -- drivers/perf/Kconfig | 12 ++++++++++++ drivers/perf/Makefile | 3 +++ .../perf_event_v6.c => drivers/perf/arm_v6_pmu.c | 3 --- .../perf_event_v7.c => drivers/perf/arm_v7_pmu.c | 3 --- .../perf/arm_xscale_pmu.c | 3 --- 6 files changed, 15 insertions(+), 11 deletions(-) rename arch/arm/kernel/perf_event_v6.c => drivers/perf/arm_v6_pmu.c (99%) rename arch/arm/kernel/perf_event_v7.c => drivers/perf/arm_v7_pmu.c (99%) rename arch/arm/kernel/perf_event_xscale.c => drivers/perf/arm_xscale_pmu.c (99%) diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index ae2f2b2b4e5a..94a85cf1260d 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -77,8 +77,6 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o -obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \ - perf_event_v7.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o obj-$(CONFIG_VDSO) += vdso.o diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index ec6e0d9194a1..8ded2ca9fbc6 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -56,6 +56,18 @@ config ARM_PMU Say y if you want to use CPU performance monitors on ARM-based systems. +config ARM_V6_PMU + depends on ARM_PMU && (CPU_V6 || CPU_V6K) + def_bool y + +config ARM_V7_PMU + depends on ARM_PMU && CPU_V7 + def_bool y + +config ARM_XSCALE_PMU + depends on ARM_PMU && CPU_XSCALE + def_bool y + config RISCV_PMU depends on RISCV bool "RISC-V PMU framework" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index a06338e3401c..16611eb02a8a 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -6,6 +6,9 @@ obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o +obj-$(CONFIG_ARM_V6_PMU) += arm_v6_pmu.o +obj-$(CONFIG_ARM_V7_PMU) += arm_v7_pmu.o +obj-$(CONFIG_ARM_XSCALE_PMU) += arm_xscale_pmu.o obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o obj-$(CONFIG_FSL_IMX9_DDR_PMU) += fsl_imx9_ddr_perf.o diff --git a/arch/arm/kernel/perf_event_v6.c b/drivers/perf/arm_v6_pmu.c similarity index 99% rename from arch/arm/kernel/perf_event_v6.c rename to drivers/perf/arm_v6_pmu.c index 1ae99deeec54..9bc4340c784e 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/drivers/perf/arm_v6_pmu.c @@ -31,8 +31,6 @@ * enable the interrupt. */ -#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) - #include #include @@ -587,4 +585,3 @@ static struct platform_driver armv6_pmu_driver = { }; builtin_platform_driver(armv6_pmu_driver); -#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ diff --git a/arch/arm/kernel/perf_event_v7.c b/drivers/perf/arm_v7_pmu.c similarity index 99% rename from arch/arm/kernel/perf_event_v7.c rename to drivers/perf/arm_v7_pmu.c index 4e115076d323..1c1c06eb5023 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/drivers/perf/arm_v7_pmu.c @@ -17,8 +17,6 @@ * counter and all 4 performance counters together can be reset separately. */ -#ifdef CONFIG_CPU_V7 - #include #include #include @@ -2046,4 +2044,3 @@ static struct platform_driver armv7_pmu_driver = { }; builtin_platform_driver(armv7_pmu_driver); -#endif /* CONFIG_CPU_V7 */ diff --git a/arch/arm/kernel/perf_event_xscale.c b/drivers/perf/arm_xscale_pmu.c similarity index 99% rename from arch/arm/kernel/perf_event_xscale.c rename to drivers/perf/arm_xscale_pmu.c index f6cdcacfb96d..9ac30d51dd6f 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/drivers/perf/arm_xscale_pmu.c @@ -13,8 +13,6 @@ * PMU structures. */ -#ifdef CONFIG_CPU_XSCALE - #include #include @@ -773,4 +771,3 @@ static struct platform_driver xscale_pmu_driver = { }; builtin_platform_driver(xscale_pmu_driver); -#endif /* CONFIG_CPU_XSCALE */ -- Gitee From 0289fe91098c74d33ec893cdda0f7c119557d64c Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 26 Jun 2024 16:32:28 -0600 Subject: [PATCH 41/75] perf: arm_v6/7_pmu: Drop non-DT probe support ANBZ: #25252 commit 12f051c987dc91d3bf7c4dbb740321f8b24070b4 upstream. There are no non-DT based PMU users for v6 or v7, so drop the custom non-DT probe table. Unfortunately XScale still needs non-DT probing. Note that this drops support for arm1156 PMU, but there are no arm1156 based systems supported in the kernel. Acked-by: Mark Rutland Signed-off-by: Rob Herring (Arm) Link: https://lore.kernel.org/r/20240626-arm-pmu-3-9-icntr-v2-4-c9784b4f4065@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_v6_pmu.c | 18 +----------------- drivers/perf/arm_v7_pmu.c | 10 +--------- 2 files changed, 2 insertions(+), 26 deletions(-) diff --git a/drivers/perf/arm_v6_pmu.c b/drivers/perf/arm_v6_pmu.c index 9bc4340c784e..72472e574f6b 100644 --- a/drivers/perf/arm_v6_pmu.c +++ b/drivers/perf/arm_v6_pmu.c @@ -509,13 +509,6 @@ static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) return 0; } -static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu) -{ - armv6pmu_init(cpu_pmu); - cpu_pmu->name = "armv6_1156"; - return 0; -} - static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu) { armv6pmu_init(cpu_pmu); @@ -562,18 +555,9 @@ static const struct of_device_id armv6_pmu_of_device_ids[] = { { /* sentinel value */ } }; -static const struct pmu_probe_info armv6_pmu_probe_table[] = { - ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init), - ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init), - ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init), - ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init), - { /* sentinel value */ } -}; - static int armv6_pmu_device_probe(struct platform_device *pdev) { - return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids, - armv6_pmu_probe_table); + return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids, NULL); } static struct platform_driver armv6_pmu_driver = { diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c index 1c1c06eb5023..a0992fc1fa0f 100644 --- a/drivers/perf/arm_v7_pmu.c +++ b/drivers/perf/arm_v7_pmu.c @@ -2021,17 +2021,9 @@ static const struct of_device_id armv7_pmu_of_device_ids[] = { {}, }; -static const struct pmu_probe_info armv7_pmu_probe_table[] = { - ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init), - ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init), - { /* sentinel value */ } -}; - - static int armv7_pmu_device_probe(struct platform_device *pdev) { - return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids, - armv7_pmu_probe_table); + return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids, NULL); } static struct platform_driver armv7_pmu_driver = { -- Gitee From 72f8206df63a70076b66f8c1460daaabadeee9ca Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 26 Jun 2024 16:32:29 -0600 Subject: [PATCH 42/75] perf: arm_pmuv3: Include asm/arm_pmuv3.h from linux/perf/arm_pmuv3.h ANBZ: #25252 commit d688ffa269421cec73c7e13fd0cb03879b07db89 upstream. The arm64 asm/arm_pmuv3.h depends on defines from linux/perf/arm_pmuv3.h. Rather than depend on include order, follow the usual pattern of "linux" headers including "asm" headers of the same name. With this change, the include of linux/kvm_host.h is problematic due to circular includes: In file included from ../arch/arm64/include/asm/arm_pmuv3.h:9, from ../include/linux/perf/arm_pmuv3.h:312, from ../include/kvm/arm_pmu.h:11, from ../arch/arm64/include/asm/kvm_host.h:38, from ../arch/arm64/mm/init.c:41: ../include/linux/kvm_host.h:383:30: error: field 'arch' has incomplete type Switching to asm/kvm_host.h solves the issue. Signed-off-by: Rob Herring (Arm) Link: https://lore.kernel.org/r/20240626-arm-pmu-3-9-icntr-v2-5-c9784b4f4065@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm64/include/asm/arm_pmuv3.h | 2 +- arch/arm64/kvm/pmu-emul.c | 1 - drivers/perf/arm_pmuv3.c | 2 -- include/linux/perf/arm_pmuv3.h | 2 ++ 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h index c27404fa4418..a4697a0b6835 100644 --- a/arch/arm64/include/asm/arm_pmuv3.h +++ b/arch/arm64/include/asm/arm_pmuv3.h @@ -6,7 +6,7 @@ #ifndef __ASM_PMUV3_H #define __ASM_PMUV3_H -#include +#include #include #include diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 79a0d4da5ad5..08ebffbb6bd3 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -14,7 +14,6 @@ #include #include #include -#include #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index cb714d94d177..9eaa5b072744 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -25,8 +25,6 @@ #include #include -#include - /* ARMv8 Cortex-A53 specific event types. */ #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 96274cc4fd60..f523916de014 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -306,4 +306,6 @@ } \ } while (0) +#include + #endif -- Gitee From edca8cc3a71069e58635c576df458fe79f736630 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 31 Jul 2024 10:51:18 -0600 Subject: [PATCH 43/75] perf: arm_pmu: Remove event index to counter remapping ANBZ: #25252 commit bf5ffc8c80e0cf5205849cd0c9c3cb261d2beee6 upstream. Xscale and Armv6 PMUs defined the cycle counter at 0 and event counters starting at 1 and had 1:1 event index to counter numbering. On Armv7 and later, this changed the cycle counter to 31 and event counters start at 0. The drivers for Armv7 and PMUv3 kept the old event index numbering and introduced an event index to counter conversion. The conversion uses masking to convert from event index to a counter number. This operation relies on having at most 32 counters so that the cycle counter index 0 can be transformed to counter number 31. Armv9.4 adds support for an additional fixed function counter (instructions) which increases possible counters to more than 32, and the conversion won't work anymore as a simple subtract and mask. The primary reason for the translation (other than history) seems to be to have a contiguous mask of counters 0-N. Keeping that would result in more complicated index to counter conversions. Instead, store a mask of available counters rather than just number of events. That provides more information in addition to the number of events. No (intended) functional changes. Acked-by: Mark Rutland Signed-off-by: Rob Herring (Arm) Tested-by: James Clark Link: https://lore.kernel.org/r/20240731-arm-pmu-3-9-icntr-v3-1-280a8d7ff465@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm64/kvm/pmu-emul.c | 6 +-- drivers/perf/apple_m1_cpu_pmu.c | 4 +- drivers/perf/arm_pmu.c | 11 ++--- drivers/perf/arm_pmuv3.c | 62 +++++++++----------------- drivers/perf/arm_v6_pmu.c | 6 ++- drivers/perf/arm_v7_pmu.c | 77 +++++++++++++-------------------- drivers/perf/arm_xscale_pmu.c | 12 +++-- include/linux/perf/arm_pmu.h | 2 +- include/linux/perf/arm_pmuv3.h | 1 + 9 files changed, 75 insertions(+), 106 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 08ebffbb6bd3..7cd5df4abe8f 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -865,10 +865,10 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) struct arm_pmu *arm_pmu = kvm->arch.arm_pmu; /* - * The arm_pmu->num_events considers the cycle counter as well. - * Ignore that and return only the general-purpose counters. + * The arm_pmu->cntr_mask considers the fixed counter(s) as well. + * Ignore those and return only the general-purpose counters. */ - return arm_pmu->num_events - 1; + return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS); } static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index f322e5ca1114..c8f607912567 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -400,7 +400,7 @@ static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu) regs = get_irq_regs(); - for (idx = 0; idx < cpu_pmu->num_events; idx++) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, M1_PMU_NR_COUNTERS) { struct perf_event *event = cpuc->events[idx]; struct perf_sample_data data; @@ -560,7 +560,7 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags) cpu_pmu->reset = m1_pmu_reset; cpu_pmu->set_event_filter = m1_pmu_set_event_filter; - cpu_pmu->num_events = M1_PMU_NR_COUNTERS; + bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS); cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group; return 0; diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 30d67e670603..9f0d32136433 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -520,7 +520,7 @@ static void armpmu_enable(struct pmu *pmu) { struct arm_pmu *armpmu = to_arm_pmu(pmu); struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events); + bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS); /* For task-bound events we may be called on other CPUs */ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) @@ -740,7 +740,7 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) struct perf_event *event; int idx; - for (idx = 0; idx < armpmu->num_events; idx++) { + for_each_set_bit(idx, armpmu->cntr_mask, ARMPMU_MAX_HWEVENTS) { event = hw_events->events[idx]; if (!event) continue; @@ -770,7 +770,7 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, { struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events); + bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS); if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) return NOTIFY_DONE; @@ -923,8 +923,9 @@ int armpmu_register(struct arm_pmu *pmu) if (ret) goto out_destroy; - pr_info("enabled with %s PMU driver, %d counters available%s\n", - pmu->name, pmu->num_events, + pr_info("enabled with %s PMU driver, %d (%*pb) counters available%s\n", + pmu->name, bitmap_weight(pmu->cntr_mask, ARMPMU_MAX_HWEVENTS), + ARMPMU_MAX_HWEVENTS, &pmu->cntr_mask, has_nmi ? ", using NMIs" : ""); kvm_host_pmu_init(pmu); diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 9eaa5b072744..3d24cfec007c 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -454,9 +454,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { /* * Perf Events' indices */ -#define ARMV8_IDX_CYCLE_COUNTER 0 -#define ARMV8_IDX_COUNTER0 1 -#define ARMV8_IDX_CYCLE_COUNTER_USER 32 +#define ARMV8_IDX_CYCLE_COUNTER 31 /* * We unconditionally enable ARMv8.5-PMU long event counter support @@ -489,19 +487,12 @@ static bool armv8pmu_event_is_chained(struct perf_event *event) return !armv8pmu_event_has_user_read(event) && armv8pmu_event_is_64bit(event) && !armv8pmu_has_long_event(cpu_pmu) && - (idx != ARMV8_IDX_CYCLE_COUNTER); + (idx < ARMV8_PMU_MAX_GENERAL_COUNTERS); } /* * ARMv8 low level PMU access */ - -/* - * Perf Event to low level counters mapping - */ -#define ARMV8_IDX_TO_COUNTER(x) \ - (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) - static u64 armv8pmu_pmcr_read(void) { return read_pmcr(); @@ -521,14 +512,12 @@ static int armv8pmu_has_overflowed(u32 pmovsr) static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) { - return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); + return pmnc & BIT(idx); } static u64 armv8pmu_read_evcntr(int idx) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - - return read_pmevcntrn(counter); + return read_pmevcntrn(idx); } static u64 armv8pmu_read_hw_counter(struct perf_event *event) @@ -557,7 +546,7 @@ static bool armv8pmu_event_needs_bias(struct perf_event *event) return false; if (armv8pmu_has_long_event(cpu_pmu) || - idx == ARMV8_IDX_CYCLE_COUNTER) + idx >= ARMV8_PMU_MAX_GENERAL_COUNTERS) return true; return false; @@ -595,9 +584,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event) static void armv8pmu_write_evcntr(int idx, u64 value) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - - write_pmevcntrn(counter, value); + write_pmevcntrn(idx, value); } static void armv8pmu_write_hw_counter(struct perf_event *event, @@ -628,7 +615,6 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) static void armv8pmu_write_evtype(int idx, unsigned long val) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); unsigned long mask = ARMV8_PMU_EVTYPE_EVENT | ARMV8_PMU_INCLUDE_EL2 | ARMV8_PMU_EXCLUDE_EL0 | @@ -638,7 +624,7 @@ static void armv8pmu_write_evtype(int idx, unsigned long val) mask |= ARMV8_PMU_EVTYPE_TC | ARMV8_PMU_EVTYPE_TH; val &= mask; - write_pmevtypern(counter, val); + write_pmevtypern(idx, val); } static void armv8pmu_write_event_type(struct perf_event *event) @@ -667,7 +653,7 @@ static void armv8pmu_write_event_type(struct perf_event *event) static u32 armv8pmu_event_cnten_mask(struct perf_event *event) { - int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); + int counter = event->hw.idx; u32 mask = BIT(counter); if (armv8pmu_event_is_chained(event)) @@ -726,8 +712,7 @@ static void armv8pmu_enable_intens(u32 mask) static void armv8pmu_enable_event_irq(struct perf_event *event) { - u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); - armv8pmu_enable_intens(BIT(counter)); + armv8pmu_enable_intens(BIT(event->hw.idx)); } static void armv8pmu_disable_intens(u32 mask) @@ -741,8 +726,7 @@ static void armv8pmu_disable_intens(u32 mask) static void armv8pmu_disable_event_irq(struct perf_event *event) { - u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); - armv8pmu_disable_intens(BIT(counter)); + armv8pmu_disable_intens(BIT(event->hw.idx)); } static u32 armv8pmu_getreset_flags(void) @@ -786,7 +770,8 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); /* Clear any unused counters to avoid leaking their contents */ - for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) { + for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask, + ARMPMU_MAX_HWEVENTS) { if (i == ARMV8_IDX_CYCLE_COUNTER) write_pmccntr(0); else @@ -869,7 +854,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) * to prevent skews in group events. */ armv8pmu_stop(cpu_pmu); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -908,7 +893,7 @@ static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc, { int idx; - for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } @@ -924,7 +909,9 @@ static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc, * Chaining requires two consecutive event counters, where * the lower idx must be even. */ - for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS) { + if (!(idx & 0x1)) + continue; if (!test_and_set_bit(idx, cpuc->used_mask)) { /* Check if the preceding even counter is available */ if (!test_and_set_bit(idx - 1, cpuc->used_mask)) @@ -978,15 +965,7 @@ static int armv8pmu_user_event_idx(struct perf_event *event) if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event)) return 0; - /* - * We remap the cycle counter index to 32 to - * match the offset applied to the rest of - * the counter indices. - */ - if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER) - return ARMV8_IDX_CYCLE_COUNTER_USER; - - return event->hw.idx; + return event->hw.idx + 1; } /* @@ -1211,10 +1190,11 @@ static void __armv8pmu_probe_pmu(void *info) probe->present = true; /* Read the nb of CNTx counters supported from PMNC */ - cpu_pmu->num_events = FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read()); + bitmap_set(cpu_pmu->cntr_mask, + 0, FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read())); /* Add the CPU cycles counter */ - cpu_pmu->num_events += 1; + set_bit(ARMV8_IDX_CYCLE_COUNTER, cpu_pmu->cntr_mask); pmceid[0] = pmceid_raw[0] = read_pmceid0(); pmceid[1] = pmceid_raw[1] = read_pmceid1(); diff --git a/drivers/perf/arm_v6_pmu.c b/drivers/perf/arm_v6_pmu.c index 72472e574f6b..1bc48a4fad3b 100644 --- a/drivers/perf/arm_v6_pmu.c +++ b/drivers/perf/arm_v6_pmu.c @@ -64,6 +64,7 @@ enum armv6_counters { ARMV6_CYCLE_COUNTER = 0, ARMV6_COUNTER0, ARMV6_COUNTER1, + ARMV6_NUM_COUNTERS }; /* @@ -321,7 +322,7 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) */ armv6_pmcr_write(pmcr); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV6_NUM_COUNTERS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -499,7 +500,8 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->start = armv6pmu_start; cpu_pmu->stop = armv6pmu_stop; cpu_pmu->map_event = armv6_map_event; - cpu_pmu->num_events = 3; + + bitmap_set(cpu_pmu->cntr_mask, 0, ARMV6_NUM_COUNTERS); } static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c index a0992fc1fa0f..f53acd60b28e 100644 --- a/drivers/perf/arm_v7_pmu.c +++ b/drivers/perf/arm_v7_pmu.c @@ -649,24 +649,12 @@ static struct attribute_group armv7_pmuv2_events_attr_group = { /* * Perf Events' indices */ -#define ARMV7_IDX_CYCLE_COUNTER 0 -#define ARMV7_IDX_COUNTER0 1 -#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \ - (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) - -#define ARMV7_MAX_COUNTERS 32 -#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) - +#define ARMV7_IDX_CYCLE_COUNTER 31 +#define ARMV7_IDX_COUNTER_MAX 31 /* * ARMv7 low level PMNC access */ -/* - * Perf Event to low level counters mapping - */ -#define ARMV7_IDX_TO_COUNTER(x) \ - (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) - /* * Per-CPU PMNC: config reg */ @@ -725,19 +713,17 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc) static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx) { - return idx >= ARMV7_IDX_CYCLE_COUNTER && - idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); + return test_bit(idx, cpu_pmu->cntr_mask); } static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) { - return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); + return pmnc & BIT(idx); } static inline void armv7_pmnc_select_counter(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); + asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (idx)); isb(); } @@ -787,29 +773,25 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val) static inline void armv7_pmnc_enable_counter(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(idx))); } static inline void armv7_pmnc_disable_counter(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(idx))); } static inline void armv7_pmnc_enable_intens(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(idx))); } static inline void armv7_pmnc_disable_intens(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(idx))); isb(); /* Clear the overflow flag in case an interrupt is pending. */ - asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(idx))); isb(); } @@ -853,15 +835,12 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); pr_info("CCNT =0x%08x\n", val); - for (cnt = ARMV7_IDX_COUNTER0; - cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { + for_each_set_bit(cnt, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) { armv7_pmnc_select_counter(cnt); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); - pr_info("CNT[%d] count =0x%08x\n", - ARMV7_IDX_TO_COUNTER(cnt), val); + pr_info("CNT[%d] count =0x%08x\n", cnt, val); asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); - pr_info("CNT[%d] evtsel=0x%08x\n", - ARMV7_IDX_TO_COUNTER(cnt), val); + pr_info("CNT[%d] evtsel=0x%08x\n", cnt, val); } } #endif @@ -968,7 +947,7 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) */ regs = get_irq_regs(); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1047,7 +1026,7 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, * For anything other than a cycle counter, try and use * the events counters */ - for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } @@ -1093,7 +1072,7 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, static void armv7pmu_reset(void *info) { struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; - u32 idx, nb_cnt = cpu_pmu->num_events, val; + u32 idx, val; if (cpu_pmu->secure_access) { asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val)); @@ -1102,7 +1081,7 @@ static void armv7pmu_reset(void *info) } /* The counter and interrupt enable registers are unknown at reset. */ - for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) { armv7_pmnc_disable_counter(idx); armv7_pmnc_disable_intens(idx); } @@ -1181,20 +1160,22 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu) static void armv7_read_num_pmnc_events(void *info) { - int *nb_cnt = info; + int nb_cnt; + struct arm_pmu *cpu_pmu = info; /* Read the nb of CNTx counters supported from PMNC */ - *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; + nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; + bitmap_set(cpu_pmu->cntr_mask, 0, nb_cnt); /* Add the CPU cycles counter */ - *nb_cnt += 1; + set_bit(ARMV7_IDX_CYCLE_COUNTER, cpu_pmu->cntr_mask); } static int armv7_probe_num_events(struct arm_pmu *arm_pmu) { return smp_call_function_any(&arm_pmu->supported_cpus, armv7_read_num_pmnc_events, - &arm_pmu->num_events, 1); + arm_pmu, 1); } static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) @@ -1556,7 +1537,7 @@ static void krait_pmu_reset(void *info) { u32 vval, fval; struct arm_pmu *cpu_pmu = info; - u32 idx, nb_cnt = cpu_pmu->num_events; + u32 idx; armv7pmu_reset(info); @@ -1570,7 +1551,7 @@ static void krait_pmu_reset(void *info) venum_post_pmresr(vval, fval); /* Reset PMxEVNCTCR to sane default */ - for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) { armv7_pmnc_select_counter(idx); asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); } @@ -1594,7 +1575,7 @@ static int krait_event_to_bit(struct perf_event *event, unsigned int region, * Lower bits are reserved for use by the counters (see * armv7pmu_get_event_idx() for more info) */ - bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; + bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX); return bit; } @@ -1889,7 +1870,7 @@ static void scorpion_pmu_reset(void *info) { u32 vval, fval; struct arm_pmu *cpu_pmu = info; - u32 idx, nb_cnt = cpu_pmu->num_events; + u32 idx; armv7pmu_reset(info); @@ -1904,7 +1885,7 @@ static void scorpion_pmu_reset(void *info) venum_post_pmresr(vval, fval); /* Reset PMxEVNCTCR to sane default */ - for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) { armv7_pmnc_select_counter(idx); asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); } @@ -1927,7 +1908,7 @@ static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, * Lower bits are reserved for use by the counters (see * armv7pmu_get_event_idx() for more info) */ - bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; + bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX); return bit; } diff --git a/drivers/perf/arm_xscale_pmu.c b/drivers/perf/arm_xscale_pmu.c index 9ac30d51dd6f..bedf494e23c1 100644 --- a/drivers/perf/arm_xscale_pmu.c +++ b/drivers/perf/arm_xscale_pmu.c @@ -53,6 +53,8 @@ enum xscale_counters { XSCALE_COUNTER2, XSCALE_COUNTER3, }; +#define XSCALE1_NUM_COUNTERS 3 +#define XSCALE2_NUM_COUNTERS 5 static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, @@ -168,7 +170,7 @@ xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) regs = get_irq_regs(); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE1_NUM_COUNTERS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -378,7 +380,8 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->start = xscale1pmu_start; cpu_pmu->stop = xscale1pmu_stop; cpu_pmu->map_event = xscale_map_event; - cpu_pmu->num_events = 3; + + bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE1_NUM_COUNTERS); return 0; } @@ -514,7 +517,7 @@ xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) regs = get_irq_regs(); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE2_NUM_COUNTERS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -747,7 +750,8 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->start = xscale2pmu_start; cpu_pmu->stop = xscale2pmu_stop; cpu_pmu->map_event = xscale_map_event; - cpu_pmu->num_events = 5; + + bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE2_NUM_COUNTERS); return 0; } diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index e382c9332b4b..dcfd4f818430 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -102,7 +102,7 @@ struct arm_pmu { void (*stop)(struct arm_pmu *); void (*reset)(void *); int (*map_event)(struct perf_event *event); - int num_events; + DECLARE_BITMAP(cntr_mask, ARMPMU_MAX_HWEVENTS); bool secure_access; /* 32-bit ARM only */ #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index f523916de014..651b1bc93d4d 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -6,6 +6,7 @@ #ifndef __PERF_ARM_PMUV3_H #define __PERF_ARM_PMUV3_H +#define ARMV8_PMU_MAX_GENERAL_COUNTERS 31 #define ARMV8_PMU_MAX_COUNTERS 32 #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) -- Gitee From e3dd5d741d3120657eedf28de445e2156755c49a Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 31 Jul 2024 10:51:19 -0600 Subject: [PATCH 44/75] perf: arm_pmuv3: Prepare for more than 32 counters ANBZ: #25252 commit a4a6e2078d85a9d94bcc7eab77845cb8cd39f680 upstream. Various PMUv3 registers which are a mask of counters are 64-bit registers, but the accessor functions take a u32. This has been fine as the upper 32-bits have been RES0 as there has been a maximum of 32 counters prior to Armv9.4/8.9. With Armv9.4/8.9, a 33rd counter is added. Update the accessor functions to use a u64 instead. Acked-by: Mark Rutland Signed-off-by: Rob Herring (Arm) Tested-by: James Clark Link: https://lore.kernel.org/r/20240731-arm-pmu-3-9-icntr-v3-2-280a8d7ff465@kernel.org Signed-off-by: Will Deacon Update kvm_pmu_ops to use u64 for CONFIG_KVM_ARM_HOST_VHE_ONLY. Signed-off-by: Wei Chen --- arch/arm64/include/asm/arm_pmuv3.h | 12 ++++----- arch/arm64/include/asm/kvm_host.h | 12 ++++----- arch/arm64/kvm/pmu.c | 8 +++--- drivers/perf/arm_pmuv3.c | 40 ++++++++++++++++-------------- include/kvm/arm_pmu.h | 4 +-- 5 files changed, 39 insertions(+), 37 deletions(-) diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h index a4697a0b6835..19b3f9150058 100644 --- a/arch/arm64/include/asm/arm_pmuv3.h +++ b/arch/arm64/include/asm/arm_pmuv3.h @@ -71,22 +71,22 @@ static inline u64 read_pmccntr(void) return read_sysreg(pmccntr_el0); } -static inline void write_pmcntenset(u32 val) +static inline void write_pmcntenset(u64 val) { write_sysreg(val, pmcntenset_el0); } -static inline void write_pmcntenclr(u32 val) +static inline void write_pmcntenclr(u64 val) { write_sysreg(val, pmcntenclr_el0); } -static inline void write_pmintenset(u32 val) +static inline void write_pmintenset(u64 val) { write_sysreg(val, pmintenset_el1); } -static inline void write_pmintenclr(u32 val) +static inline void write_pmintenclr(u64 val) { write_sysreg(val, pmintenclr_el1); } @@ -96,12 +96,12 @@ static inline void write_pmccfiltr(u64 val) write_sysreg(val, pmccfiltr_el0); } -static inline void write_pmovsclr(u32 val) +static inline void write_pmovsclr(u64 val) { write_sysreg(val, pmovsclr_el0); } -static inline u32 read_pmovsclr(void) +static inline u64 read_pmovsclr(void) { return read_sysreg(pmovsclr_el0); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 57481e6cabe7..2f88e8aae613 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1134,12 +1134,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); #if IS_ENABLED(CONFIG_KVM) -void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); -void kvm_clr_pmu_events(u32 clr); +void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); +void kvm_clr_pmu_events(u64 clr); bool kvm_set_pmuserenr(u64 val); #else -static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} -static inline void kvm_clr_pmu_events(u32 clr) {} +static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {} +static inline void kvm_clr_pmu_events(u64 clr) {} static inline bool kvm_set_pmuserenr(u64 val) { return false; @@ -1193,8 +1193,8 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM_ARM_HOST_VHE_ONLY struct kvm_pmu_ops { - void (*set_pmu_events)(u32 set, struct perf_event_attr *attr); - void (*clr_pmu_events)(u32 clr); + void (*set_pmu_events)(u64 set, struct perf_event_attr *attr); + void (*clr_pmu_events)(u64 clr); bool (*set_pmuserenr)(u64 val); void (*vcpu_pmu_resync_el0)(void); }; diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 6e41a7e8a856..b17c4098523f 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -35,7 +35,7 @@ struct kvm_pmu_events *kvm_get_pmu_events(void) * Add events to track that we may want to switch at guest entry/exit * time. */ -void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) +void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) { struct kvm_pmu_events *pmu = kvm_get_pmu_events(); @@ -51,7 +51,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) /* * Stop tracking events */ -void kvm_clr_pmu_events(u32 clr) +void kvm_clr_pmu_events(u64 clr) { struct kvm_pmu_events *pmu = kvm_get_pmu_events(); @@ -176,7 +176,7 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events) void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) { struct kvm_pmu_events *pmu; - u32 events_guest, events_host; + u64 events_guest, events_host; if (!kvm_arm_support_pmu_v3() || !has_vhe()) return; @@ -197,7 +197,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) { struct kvm_pmu_events *pmu; - u32 events_guest, events_host; + u64 events_guest, events_host; if (!kvm_arm_support_pmu_v3() || !has_vhe()) return; diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 3d24cfec007c..7c548f0e39a6 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -505,14 +505,14 @@ static void armv8pmu_pmcr_write(u64 val) write_pmcr(val); } -static int armv8pmu_has_overflowed(u32 pmovsr) +static int armv8pmu_has_overflowed(u64 pmovsr) { - return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; + return !!(pmovsr & ARMV8_PMU_OVERFLOWED_MASK); } -static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) +static int armv8pmu_counter_has_overflowed(u64 pmnc, int idx) { - return pmnc & BIT(idx); + return !!(pmnc & BIT(idx)); } static u64 armv8pmu_read_evcntr(int idx) @@ -651,17 +651,17 @@ static void armv8pmu_write_event_type(struct perf_event *event) } } -static u32 armv8pmu_event_cnten_mask(struct perf_event *event) +static u64 armv8pmu_event_cnten_mask(struct perf_event *event) { int counter = event->hw.idx; - u32 mask = BIT(counter); + u64 mask = BIT(counter); if (armv8pmu_event_is_chained(event)) mask |= BIT(counter - 1); return mask; } -static void armv8pmu_enable_counter(u32 mask) +static void armv8pmu_enable_counter(u64 mask) { /* * Make sure event configuration register writes are visible before we @@ -674,7 +674,7 @@ static void armv8pmu_enable_counter(u32 mask) static void armv8pmu_enable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; - u32 mask = armv8pmu_event_cnten_mask(event); + u64 mask = armv8pmu_event_cnten_mask(event); host_kvm_set_pmu_events(mask, attr); @@ -683,7 +683,7 @@ static void armv8pmu_enable_event_counter(struct perf_event *event) armv8pmu_enable_counter(mask); } -static void armv8pmu_disable_counter(u32 mask) +static void armv8pmu_disable_counter(u64 mask) { write_pmcntenclr(mask); /* @@ -696,7 +696,7 @@ static void armv8pmu_disable_counter(u32 mask) static void armv8pmu_disable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; - u32 mask = armv8pmu_event_cnten_mask(event); + u64 mask = armv8pmu_event_cnten_mask(event); host_kvm_clr_pmu_events(mask); @@ -705,7 +705,7 @@ static void armv8pmu_disable_event_counter(struct perf_event *event) armv8pmu_disable_counter(mask); } -static void armv8pmu_enable_intens(u32 mask) +static void armv8pmu_enable_intens(u64 mask) { write_pmintenset(mask); } @@ -715,7 +715,7 @@ static void armv8pmu_enable_event_irq(struct perf_event *event) armv8pmu_enable_intens(BIT(event->hw.idx)); } -static void armv8pmu_disable_intens(u32 mask) +static void armv8pmu_disable_intens(u64 mask) { write_pmintenclr(mask); isb(); @@ -729,9 +729,9 @@ static void armv8pmu_disable_event_irq(struct perf_event *event) armv8pmu_disable_intens(BIT(event->hw.idx)); } -static u32 armv8pmu_getreset_flags(void) +static u64 armv8pmu_getreset_flags(void) { - u32 value; + u64 value; /* Read */ value = read_pmovsclr(); @@ -827,7 +827,7 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu) static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) { - u32 pmovsr; + u64 pmovsr; struct perf_sample_data data; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; @@ -1040,14 +1040,16 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, static void armv8pmu_reset(void *info) { struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; - u64 pmcr; + u64 pmcr, mask; + + bitmap_to_arr64(&mask, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS); /* The counter and interrupt enable registers are unknown at reset. */ - armv8pmu_disable_counter(U32_MAX); - armv8pmu_disable_intens(U32_MAX); + armv8pmu_disable_counter(mask); + armv8pmu_disable_intens(mask); /* Clear the counters we flip at guest entry/exit */ - host_kvm_clr_pmu_events(U32_MAX); + host_kvm_clr_pmu_events(mask); /* * Initialize & Reset PMNC. Request overflow interrupt for diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 9d8a2f11f39b..fe2e97a650e0 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -19,8 +19,8 @@ struct kvm_pmc { }; struct kvm_pmu_events { - u32 events_host; - u32 events_guest; + u64 events_host; + u64 events_guest; }; struct kvm_pmu { -- Gitee From 9c0fb31e97507305274d5fcea2071d5774d406fb Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 31 Jul 2024 10:51:20 -0600 Subject: [PATCH 45/75] KVM: arm64: pmu: Use arm_pmuv3.h register accessors ANBZ: #25252 commit 741ee5284551cf5daae95d9c8c1e34f47382ed3c upstream. Commit df29ddf4f04b ("arm64: perf: Abstract system register accesses away") split off PMU register accessor functions to a standalone header. Let's use it for KVM PMU code and get rid one copy of the ugly switch macro. Acked-by: Mark Rutland Reviewed-by: Marc Zyngier Signed-off-by: Rob Herring (Arm) Tested-by: James Clark Link: https://lore.kernel.org/r/20240731-arm-pmu-3-9-icntr-v3-3-280a8d7ff465@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm64/include/asm/arm_pmuv3.h | 13 ++++++ arch/arm64/kvm/pmu.c | 66 ++++-------------------------- 2 files changed, 21 insertions(+), 58 deletions(-) diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h index 19b3f9150058..36c3e82b4eec 100644 --- a/arch/arm64/include/asm/arm_pmuv3.h +++ b/arch/arm64/include/asm/arm_pmuv3.h @@ -33,6 +33,14 @@ static inline void write_pmevtypern(int n, unsigned long val) PMEVN_SWITCH(n, WRITE_PMEVTYPERN); } +#define RETURN_READ_PMEVTYPERN(n) \ + return read_sysreg(pmevtyper##n##_el0) +static inline unsigned long read_pmevtypern(int n) +{ + PMEVN_SWITCH(n, RETURN_READ_PMEVTYPERN); + return 0; +} + static inline unsigned long read_pmmir(void) { return read_cpuid(PMMIR_EL1); @@ -96,6 +104,11 @@ static inline void write_pmccfiltr(u64 val) write_sysreg(val, pmccfiltr_el0); } +static inline u64 read_pmccfiltr(void) +{ + return read_sysreg(pmccfiltr_el0); +} + static inline void write_pmovsclr(u64 val) { write_sysreg(val, pmovsclr_el0); diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index b17c4098523f..2cebb6619704 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -5,6 +5,7 @@ */ #include #include +#include static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events); @@ -62,63 +63,16 @@ void kvm_clr_pmu_events(u64 clr) pmu->events_guest &= ~clr; } -#define PMEVTYPER_READ_CASE(idx) \ - case idx: \ - return read_sysreg(pmevtyper##idx##_el0) - -#define PMEVTYPER_WRITE_CASE(idx) \ - case idx: \ - write_sysreg(val, pmevtyper##idx##_el0); \ - break - -#define PMEVTYPER_CASES(readwrite) \ - PMEVTYPER_##readwrite##_CASE(0); \ - PMEVTYPER_##readwrite##_CASE(1); \ - PMEVTYPER_##readwrite##_CASE(2); \ - PMEVTYPER_##readwrite##_CASE(3); \ - PMEVTYPER_##readwrite##_CASE(4); \ - PMEVTYPER_##readwrite##_CASE(5); \ - PMEVTYPER_##readwrite##_CASE(6); \ - PMEVTYPER_##readwrite##_CASE(7); \ - PMEVTYPER_##readwrite##_CASE(8); \ - PMEVTYPER_##readwrite##_CASE(9); \ - PMEVTYPER_##readwrite##_CASE(10); \ - PMEVTYPER_##readwrite##_CASE(11); \ - PMEVTYPER_##readwrite##_CASE(12); \ - PMEVTYPER_##readwrite##_CASE(13); \ - PMEVTYPER_##readwrite##_CASE(14); \ - PMEVTYPER_##readwrite##_CASE(15); \ - PMEVTYPER_##readwrite##_CASE(16); \ - PMEVTYPER_##readwrite##_CASE(17); \ - PMEVTYPER_##readwrite##_CASE(18); \ - PMEVTYPER_##readwrite##_CASE(19); \ - PMEVTYPER_##readwrite##_CASE(20); \ - PMEVTYPER_##readwrite##_CASE(21); \ - PMEVTYPER_##readwrite##_CASE(22); \ - PMEVTYPER_##readwrite##_CASE(23); \ - PMEVTYPER_##readwrite##_CASE(24); \ - PMEVTYPER_##readwrite##_CASE(25); \ - PMEVTYPER_##readwrite##_CASE(26); \ - PMEVTYPER_##readwrite##_CASE(27); \ - PMEVTYPER_##readwrite##_CASE(28); \ - PMEVTYPER_##readwrite##_CASE(29); \ - PMEVTYPER_##readwrite##_CASE(30) - /* * Read a value direct from PMEVTYPER where idx is 0-30 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). */ static u64 kvm_vcpu_pmu_read_evtype_direct(int idx) { - switch (idx) { - PMEVTYPER_CASES(READ); - case ARMV8_PMU_CYCLE_IDX: - return read_sysreg(pmccfiltr_el0); - default: - WARN_ON(1); - } + if (idx == ARMV8_PMU_CYCLE_IDX) + return read_pmccfiltr(); - return 0; + return read_pmevtypern(idx); } /* @@ -127,14 +81,10 @@ static u64 kvm_vcpu_pmu_read_evtype_direct(int idx) */ static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val) { - switch (idx) { - PMEVTYPER_CASES(WRITE); - case ARMV8_PMU_CYCLE_IDX: - write_sysreg(val, pmccfiltr_el0); - break; - default: - WARN_ON(1); - } + if (idx == ARMV8_PMU_CYCLE_IDX) + write_pmccfiltr(val); + else + write_pmevtypern(idx, val); } /* -- Gitee From e25f00d0e26c71829cfab50cf0b9b1a555e4d3bb Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 31 Jul 2024 10:51:21 -0600 Subject: [PATCH 46/75] KVM: arm64: pmu: Use generated define for PMSELR_EL0.SEL access ANBZ: #25252 commit f9b11aa00708d94a0cd78bfde34b68c0f95d8b50 upstream. ARMV8_PMU_COUNTER_MASK is really a mask for the PMSELR_EL0.SEL register field. Make that clear by adding a standard sysreg definition for the register, and using it instead. Reviewed-by: Mark Rutland Acked-by: Mark Rutland Reviewed-by: Marc Zyngier Signed-off-by: Rob Herring (Arm) Tested-by: James Clark Link: https://lore.kernel.org/r/20240731-arm-pmu-3-9-icntr-v3-4-280a8d7ff465@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/kvm/sys_regs.c | 10 +++++----- arch/arm64/tools/sysreg | 5 +++++ include/linux/perf/arm_pmuv3.h | 1 - 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c003dbe3fa27..47be326abec1 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -401,7 +401,6 @@ #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) #define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3) #define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4) -#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5) #define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6) #define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7) #define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index a16157649813..105ee83913f9 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -785,7 +785,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { reset_unknown(vcpu, r); - __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK; + __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK; return __vcpu_sys_reg(vcpu, r->reg); } @@ -877,7 +877,7 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, else /* return PMSELR.SEL field */ p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) - & ARMV8_PMU_COUNTER_MASK; + & PMSELR_EL0_SEL_MASK; return true; } @@ -945,8 +945,8 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, if (pmu_access_event_counter_el0_disabled(vcpu)) return false; - idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) - & ARMV8_PMU_COUNTER_MASK; + idx = SYS_FIELD_GET(PMSELR_EL0, SEL, + __vcpu_sys_reg(vcpu, PMSELR_EL0)); } else if (r->Op2 == 0) { /* PMCCNTR_EL0 */ if (pmu_access_cycle_counter_el0_disabled(vcpu)) @@ -996,7 +996,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { /* PMXEVTYPER_EL0 */ - idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; + idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0)); reg = PMEVTYPER0_EL0 + idx; } else if (r->CRn == 14 && (r->CRm & 12) == 12) { idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index ae2a7486c5e5..357626cd614c 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1998,6 +1998,11 @@ Field 4 P Field 3:0 ALIGN EndSysreg +Sysreg PMSELR_EL0 3 3 9 12 5 +Res0 63:5 +Field 4:0 SEL +EndSysreg + SysregFields CONTEXTIDR_ELx Res0 63:32 Field 31:0 PROCID diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 651b1bc93d4d..e359252940e6 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -8,7 +8,6 @@ #define ARMV8_PMU_MAX_GENERAL_COUNTERS 31 #define ARMV8_PMU_MAX_COUNTERS 32 -#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) /* * Common architectural and microarchitectural event numbers. -- Gitee From ddd37d8961603a2bf54823461f3c466c49ae1748 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 31 Jul 2024 10:51:22 -0600 Subject: [PATCH 47/75] arm64: perf/kvm: Use a common PMU cycle counter define ANBZ: #25252 commit 126d7d7cce5e048fb82477a9842d088d10ff0df6 upstream. The PMUv3 and KVM code each have a define for the PMU cycle counter index. Move KVM's define to a shared location and use it for PMUv3 driver. Reviewed-by: Marc Zyngier Acked-by: Mark Rutland Signed-off-by: Rob Herring (Arm) Tested-by: James Clark Link: https://lore.kernel.org/r/20240731-arm-pmu-3-9-icntr-v3-5-280a8d7ff465@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm64/kvm/sys_regs.c | 1 + drivers/perf/arm_pmuv3.c | 19 +++++++------------ include/kvm/arm_pmu.h | 1 - include/linux/perf/arm_pmuv3.h | 3 +++ 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 105ee83913f9..75d08b9ade3a 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 7c548f0e39a6..588c77e3e570 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -451,11 +451,6 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { .attrs = armv8_pmuv3_caps_attrs, }; -/* - * Perf Events' indices - */ -#define ARMV8_IDX_CYCLE_COUNTER 31 - /* * We unconditionally enable ARMv8.5-PMU long event counter support * (64-bit events) where supported. Indicate if this arm_pmu has long @@ -574,7 +569,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event) int idx = hwc->idx; u64 value; - if (idx == ARMV8_IDX_CYCLE_COUNTER) + if (idx == ARMV8_PMU_CYCLE_IDX) value = read_pmccntr(); else value = armv8pmu_read_hw_counter(event); @@ -607,7 +602,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) value = armv8pmu_bias_long_counter(event, value); - if (idx == ARMV8_IDX_CYCLE_COUNTER) + if (idx == ARMV8_PMU_CYCLE_IDX) write_pmccntr(value); else armv8pmu_write_hw_counter(event, value); @@ -644,7 +639,7 @@ static void armv8pmu_write_event_type(struct perf_event *event) armv8pmu_write_evtype(idx - 1, hwc->config_base); armv8pmu_write_evtype(idx, chain_evt); } else { - if (idx == ARMV8_IDX_CYCLE_COUNTER) + if (idx == ARMV8_PMU_CYCLE_IDX) write_pmccfiltr(hwc->config_base); else armv8pmu_write_evtype(idx, hwc->config_base); @@ -772,7 +767,7 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) /* Clear any unused counters to avoid leaking their contents */ for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask, ARMPMU_MAX_HWEVENTS) { - if (i == ARMV8_IDX_CYCLE_COUNTER) + if (i == ARMV8_PMU_CYCLE_IDX) write_pmccntr(0); else armv8pmu_write_evcntr(i, 0); @@ -933,8 +928,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, /* Always prefer to place a cycle counter into the cycle counter. */ if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) && !armv8pmu_event_get_threshold(&event->attr)) { - if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) - return ARMV8_IDX_CYCLE_COUNTER; + if (!test_and_set_bit(ARMV8_PMU_CYCLE_IDX, cpuc->used_mask)) + return ARMV8_PMU_CYCLE_IDX; else if (armv8pmu_event_is_64bit(event) && armv8pmu_event_want_user_access(event) && !armv8pmu_has_long_event(cpu_pmu)) @@ -1196,7 +1191,7 @@ static void __armv8pmu_probe_pmu(void *info) 0, FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read())); /* Add the CPU cycles counter */ - set_bit(ARMV8_IDX_CYCLE_COUNTER, cpu_pmu->cntr_mask); + set_bit(ARMV8_PMU_CYCLE_IDX, cpu_pmu->cntr_mask); pmceid[0] = pmceid_raw[0] = read_pmceid0(); pmceid[1] = pmceid_raw[1] = read_pmceid1(); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index fe2e97a650e0..c0f6d574aa80 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -10,7 +10,6 @@ #include #include -#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) struct kvm_pmc { diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index e359252940e6..8d9b2365dcf0 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -9,6 +9,9 @@ #define ARMV8_PMU_MAX_GENERAL_COUNTERS 31 #define ARMV8_PMU_MAX_COUNTERS 32 +#define ARMV8_PMU_CYCLE_IDX 31 + + /* * Common architectural and microarchitectural event numbers. */ -- Gitee From 7e530bdf2f44aefe385ece5500ba38426819e015 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 31 Jul 2024 10:51:23 -0600 Subject: [PATCH 48/75] KVM: arm64: Refine PMU defines for number of counters ANBZ: #25252 commit 2f62701fa5b0ee94c68d2fcfc470d08aef195441 upstream. There are 2 defines for the number of PMU counters: ARMV8_PMU_MAX_COUNTERS and ARMPMU_MAX_HWEVENTS. Both are the same currently, but Armv9.4/8.9 increases the number of possible counters from 32 to 33. With this change, the maximum number of counters will differ for KVM's PMU emulation which is PMUv3.4. Give KVM PMU emulation its own define to decouple it from the rest of the kernel's number PMU counters. The VHE PMU code needs to match the PMU driver, so switch it to use ARMPMU_MAX_HWEVENTS instead. Acked-by: Mark Rutland Reviewed-by: Marc Zyngier Signed-off-by: Rob Herring (Arm) Tested-by: James Clark Link: https://lore.kernel.org/r/20240731-arm-pmu-3-9-icntr-v3-6-280a8d7ff465@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm64/kvm/pmu-emul.c | 8 ++++---- arch/arm64/kvm/pmu.c | 5 +++-- include/kvm/arm_pmu.h | 3 ++- include/linux/perf/arm_pmuv3.h | 2 -- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 7cd5df4abe8f..91ebac613465 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -222,7 +222,7 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) int i; struct kvm_pmu *pmu = &vcpu->arch.pmu; - for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) + for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) pmu->pmc[i].idx = i; } @@ -249,7 +249,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) { int i; - for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) + for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i)); irq_work_sync(&vcpu->arch.pmu.overflow_work); } @@ -280,7 +280,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val) return; - for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) { struct kvm_pmc *pmc; if (!(val & BIT(i))) @@ -312,7 +312,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) if (!kvm_vcpu_has_pmu(vcpu) || !val) return; - for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) { struct kvm_pmc *pmc; if (!(val & BIT(i))) diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 2cebb6619704..fb3995a6fcb1 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -5,6 +5,7 @@ */ #include #include +#include #include static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events); @@ -95,7 +96,7 @@ static void kvm_vcpu_pmu_enable_el0(unsigned long events) u64 typer; u32 counter; - for_each_set_bit(counter, &events, 32) { + for_each_set_bit(counter, &events, ARMPMU_MAX_HWEVENTS) { typer = kvm_vcpu_pmu_read_evtype_direct(counter); typer &= ~ARMV8_PMU_EXCLUDE_EL0; kvm_vcpu_pmu_write_evtype_direct(counter, typer); @@ -110,7 +111,7 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events) u64 typer; u32 counter; - for_each_set_bit(counter, &events, 32) { + for_each_set_bit(counter, &events, ARMPMU_MAX_HWEVENTS) { typer = kvm_vcpu_pmu_read_evtype_direct(counter); typer |= ARMV8_PMU_EXCLUDE_EL0; kvm_vcpu_pmu_write_evtype_direct(counter, typer); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index c0f6d574aa80..f90fa3dcd6ea 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -10,6 +10,7 @@ #include #include +#define KVM_ARMV8_PMU_MAX_COUNTERS 32 #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) struct kvm_pmc { @@ -25,7 +26,7 @@ struct kvm_pmu_events { struct kvm_pmu { struct irq_work overflow_work; struct kvm_pmu_events events; - struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; + struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS]; int irq_num; bool created; bool irq_level; diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 8d9b2365dcf0..e8783843e7d2 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -7,8 +7,6 @@ #define __PERF_ARM_PMUV3_H #define ARMV8_PMU_MAX_GENERAL_COUNTERS 31 -#define ARMV8_PMU_MAX_COUNTERS 32 - #define ARMV8_PMU_CYCLE_IDX 31 -- Gitee From cfc691c66379cf52c9552a6cd8fa22672acf0294 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 31 Jul 2024 10:51:24 -0600 Subject: [PATCH 49/75] perf: arm_pmuv3: Add support for Armv9.4 PMU instruction counter ANBZ: #25252 commit d8226d8cfbaf5eb9771af8ad8b4e58697e2ffb74 upstream. Armv9.4/8.9 PMU adds optional support for a fixed instruction counter similar to the fixed cycle counter. Support for the feature is indicated in the ID_AA64DFR1_EL1 register PMICNTR field. The counter is not accessible in AArch32. Existing userspace using direct counter access won't know how to handle the fixed instruction counter, so we have to avoid using the counter when user access is requested. Acked-by: Mark Rutland Signed-off-by: Rob Herring (Arm) Tested-by: James Clark Link: https://lore.kernel.org/r/20240731-arm-pmu-3-9-icntr-v3-7-280a8d7ff465@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm/include/asm/arm_pmuv3.h | 20 ++++++++++++++++++++ arch/arm64/include/asm/arm_pmuv3.h | 28 ++++++++++++++++++++++++++++ arch/arm64/kvm/pmu.c | 8 ++++++-- arch/arm64/tools/sysreg | 25 +++++++++++++++++++++++++ drivers/perf/arm_pmuv3.c | 25 +++++++++++++++++++++++++ include/linux/perf/arm_pmu.h | 8 ++++++-- include/linux/perf/arm_pmuv3.h | 6 ++++-- 7 files changed, 114 insertions(+), 6 deletions(-) diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h index a41b503b7dcd..f63ba8986b24 100644 --- a/arch/arm/include/asm/arm_pmuv3.h +++ b/arch/arm/include/asm/arm_pmuv3.h @@ -127,6 +127,12 @@ static inline u32 read_pmuver(void) return (dfr0 >> 24) & 0xf; } +static inline bool pmuv3_has_icntr(void) +{ + /* FEAT_PMUv3_ICNTR not accessible for 32-bit */ + return false; +} + static inline void write_pmcr(u32 val) { write_sysreg(val, PMCR); @@ -152,6 +158,13 @@ static inline u64 read_pmccntr(void) return read_sysreg(PMCCNTR); } +static inline void write_pmicntr(u64 val) {} + +static inline u64 read_pmicntr(void) +{ + return 0; +} + static inline void write_pmcntenset(u32 val) { write_sysreg(val, PMCNTENSET); @@ -177,6 +190,13 @@ static inline void write_pmccfiltr(u32 val) write_sysreg(val, PMCCFILTR); } +static inline void write_pmicfiltr(u64 val) {} + +static inline u64 read_pmicfiltr(void) +{ + return 0; +} + static inline void write_pmovsclr(u32 val) { write_sysreg(val, PMOVSR); diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h index 36c3e82b4eec..468a049bc63b 100644 --- a/arch/arm64/include/asm/arm_pmuv3.h +++ b/arch/arm64/include/asm/arm_pmuv3.h @@ -54,6 +54,14 @@ static inline u32 read_pmuver(void) ID_AA64DFR0_EL1_PMUVer_SHIFT); } +static inline bool pmuv3_has_icntr(void) +{ + u64 dfr1 = read_sysreg(id_aa64dfr1_el1); + + return !!cpuid_feature_extract_unsigned_field(dfr1, + ID_AA64DFR1_EL1_PMICNTR_SHIFT); +} + static inline void write_pmcr(u64 val) { write_sysreg(val, pmcr_el0); @@ -79,6 +87,16 @@ static inline u64 read_pmccntr(void) return read_sysreg(pmccntr_el0); } +static inline void write_pmicntr(u64 val) +{ + write_sysreg_s(val, SYS_PMICNTR_EL0); +} + +static inline u64 read_pmicntr(void) +{ + return read_sysreg_s(SYS_PMICNTR_EL0); +} + static inline void write_pmcntenset(u64 val) { write_sysreg(val, pmcntenset_el0); @@ -109,6 +127,16 @@ static inline u64 read_pmccfiltr(void) return read_sysreg(pmccfiltr_el0); } +static inline void write_pmicfiltr(u64 val) +{ + write_sysreg_s(val, SYS_PMICFILTR_EL0); +} + +static inline u64 read_pmicfiltr(void) +{ + return read_sysreg_s(SYS_PMICFILTR_EL0); +} + static inline void write_pmovsclr(u64 val) { write_sysreg(val, pmovsclr_el0); diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index fb3995a6fcb1..94e7b098002e 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -66,24 +66,28 @@ void kvm_clr_pmu_events(u64 clr) /* * Read a value direct from PMEVTYPER where idx is 0-30 - * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). + * or PMxCFILTR_EL0 where idx is 31-32. */ static u64 kvm_vcpu_pmu_read_evtype_direct(int idx) { if (idx == ARMV8_PMU_CYCLE_IDX) return read_pmccfiltr(); + else if (idx == ARMV8_PMU_INSTR_IDX) + return read_pmicfiltr(); return read_pmevtypern(idx); } /* * Write a value direct to PMEVTYPER where idx is 0-30 - * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). + * or PMxCFILTR_EL0 where idx is 31-32. */ static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val) { if (idx == ARMV8_PMU_CYCLE_IDX) write_pmccfiltr(val); + else if (idx == ARMV8_PMU_INSTR_IDX) + write_pmicfiltr(val); else write_pmevtypern(idx, val); } diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 357626cd614c..caf16917445a 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1874,6 +1874,31 @@ Sysreg FAR_EL1 3 0 6 0 0 Field 63:0 ADDR EndSysreg +Sysreg PMICNTR_EL0 3 3 9 4 0 +Field 63:0 ICNT +EndSysreg + +Sysreg PMICFILTR_EL0 3 3 9 6 0 +Res0 63:59 +Field 58 SYNC +Field 57:56 VS +Res0 55:32 +Field 31 P +Field 30 U +Field 29 NSK +Field 28 NSU +Field 27 NSH +Field 26 M +Res0 25 +Field 24 SH +Field 23 T +Field 22 RLK +Field 21 RLU +Field 20 RLH +Res0 19:16 +Field 15:0 evtCount +EndSysreg + Sysreg PMSCR_EL1 3 0 9 9 0 Res0 63:8 Field 7:6 PCT diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 588c77e3e570..16761158ab3f 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -571,6 +571,8 @@ static u64 armv8pmu_read_counter(struct perf_event *event) if (idx == ARMV8_PMU_CYCLE_IDX) value = read_pmccntr(); + else if (idx == ARMV8_PMU_INSTR_IDX) + value = read_pmicntr(); else value = armv8pmu_read_hw_counter(event); @@ -604,6 +606,8 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) if (idx == ARMV8_PMU_CYCLE_IDX) write_pmccntr(value); + else if (idx == ARMV8_PMU_INSTR_IDX) + write_pmicntr(value); else armv8pmu_write_hw_counter(event, value); } @@ -641,6 +645,8 @@ static void armv8pmu_write_event_type(struct perf_event *event) } else { if (idx == ARMV8_PMU_CYCLE_IDX) write_pmccfiltr(hwc->config_base); + else if (idx == ARMV8_PMU_INSTR_IDX) + write_pmicfiltr(hwc->config_base); else armv8pmu_write_evtype(idx, hwc->config_base); } @@ -769,6 +775,8 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) ARMPMU_MAX_HWEVENTS) { if (i == ARMV8_PMU_CYCLE_IDX) write_pmccntr(0); + else if (i == ARMV8_PMU_INSTR_IDX) + write_pmicntr(0); else armv8pmu_write_evcntr(i, 0); } @@ -936,6 +944,19 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, return -EAGAIN; } + /* + * Always prefer to place a instruction counter into the instruction counter, + * but don't expose the instruction counter to userspace access as userspace + * may not know how to handle it. + */ + if ((evtype == ARMV8_PMUV3_PERFCTR_INST_RETIRED) && + !armv8pmu_event_get_threshold(&event->attr) && + test_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask) && + !armv8pmu_event_want_user_access(event)) { + if (!test_and_set_bit(ARMV8_PMU_INSTR_IDX, cpuc->used_mask)) + return ARMV8_PMU_INSTR_IDX; + } + /* * Otherwise use events counters */ @@ -1193,6 +1214,10 @@ static void __armv8pmu_probe_pmu(void *info) /* Add the CPU cycles counter */ set_bit(ARMV8_PMU_CYCLE_IDX, cpu_pmu->cntr_mask); + /* Add the CPU instructions counter */ + if (pmuv3_has_icntr()) + set_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask); + pmceid[0] = pmceid_raw[0] = read_pmceid0(); pmceid[1] = pmceid_raw[1] = read_pmceid1(); diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index dcfd4f818430..237654404a17 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -17,10 +17,14 @@ #ifdef CONFIG_ARM_PMU /* - * The ARMv7 CPU PMU supports up to 32 event counters. + * The Armv7 and Armv8.8 or less CPU PMU supports up to 32 event counters. + * The Armv8.9/9.4 CPU PMU supports up to 33 event counters. */ +#ifdef CONFIG_ARM #define ARMPMU_MAX_HWEVENTS 32 - +#else +#define ARMPMU_MAX_HWEVENTS 33 +#endif /* * ARM PMU hw_event flags */ diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index e8783843e7d2..a17ae94c996a 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -8,7 +8,7 @@ #define ARMV8_PMU_MAX_GENERAL_COUNTERS 31 #define ARMV8_PMU_CYCLE_IDX 31 - +#define ARMV8_PMU_INSTR_IDX 32 /* Not accessible from AArch32 */ /* * Common architectural and microarchitectural event numbers. @@ -228,8 +228,10 @@ */ #define ARMV8_PMU_OVSR_P GENMASK(30, 0) #define ARMV8_PMU_OVSR_C BIT(31) +#define ARMV8_PMU_OVSR_F BIT_ULL(32) /* arm64 only */ /* Mask for writable bits is both P and C fields */ -#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C) +#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C | \ + ARMV8_PMU_OVSR_F) /* * PMXEVTYPER: Event selection reg -- Gitee From 5508f8a28a0aad7c447e4bcce181f7d33231516e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 1 Mar 2024 11:16:56 +0000 Subject: [PATCH 50/75] KVM: arm64: Add accessor for per-CPU state ANBZ: #25252 commit 87f842c6c6543cf0dd66161fdf4b62cec804479b upstream. In order to facilitate the introduction of new per-CPU state, add a new host_data_ptr() helped that hides some of the per-CPU verbosity, and make it easier to move that state around in the future. Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen openAnolis introduced this_cpu_ptr_wrapper in commit a5331568122064718ba1c4e4dade8b20d1fef4bb to ensure KVM functions correctly when built as a module. In this patch, we introduce this_cpu_ptr_hyp_sym_wrapper for module mode. And then update host_data_ptr to use this_cpu_ptr_wrapper and this_cpu_ptr_hyp_sym_wrapper, aligning its behavior with module mode for consistency. Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_asm.h | 2 ++ arch/arm64/include/asm/kvm_host.h | 37 +++++++++++++++++++++++ arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/hyp/include/hyp/debug-sr.h | 4 +-- arch/arm64/kvm/hyp/include/hyp/switch.h | 8 ++--- arch/arm64/kvm/hyp/nvhe/psci-relay.c | 2 +- arch/arm64/kvm/hyp/nvhe/setup.c | 3 +- arch/arm64/kvm/hyp/nvhe/switch.c | 4 +-- arch/arm64/kvm/hyp/vhe/switch.c | 4 +-- arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 4 +-- arch/arm64/kvm/pmu.c | 2 +- 11 files changed, 55 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index cd1cebfbd083..9e1d8ec62e27 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -119,8 +119,10 @@ enum __kvm_host_smccc_func { #ifdef CONFIG_KVM_ARM_HOST_VHE_ONLY #define this_cpu_ptr_wrapper(sym) this_cpu_ptr(sym) +#define this_cpu_ptr_hyp_sym_wrapper(sym) this_cpu_ptr(sym) #else #define this_cpu_ptr_wrapper(sym) this_cpu_ptr(&sym) +#define this_cpu_ptr_hyp_sym_wrapper(sym) this_cpu_ptr(&sym) #endif #if defined(__KVM_NVHE_HYPERVISOR__) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2f88e8aae613..88a34fd964cf 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -434,6 +434,17 @@ struct kvm_cpu_context { struct kvm_vcpu *__hyp_running_vcpu; }; +/* + * This structure is instantiated on a per-CPU basis, and contains + * data that is: + * + * - tied to a single physical CPU, and + * - either have a lifetime that does not extend past vcpu_put() + * - or is an invariant for the lifetime of the system + * + * Use host_data_ptr(field) as a way to access a pointer to such a + * field. + */ struct kvm_host_data { struct kvm_cpu_context host_ctxt; }; @@ -1081,6 +1092,32 @@ extern struct kvm_host_data __percpu *kvm_host_data; DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); #endif +/* + * How we access per-CPU host data depends on the where we access it from, + * and the mode we're in: + * + * - VHE and nVHE hypervisor bits use their locally defined instance + * + * - the rest of the kernel use either the VHE or nVHE one, depending on + * the mode we're running in. + * + * Unless we're in protected mode, fully deprivileged, and the nVHE + * per-CPU stuff is exclusively accessible to the protected EL2 code. + * In this case, the EL1 code uses the *VHE* data as its private state + * (which makes sense in a way as there shouldn't be any shared state + * between the host and the hypervisor). + * + * Yes, this is all totally trivial. Shoot me now. + */ +#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) +#define host_data_ptr(f) (&this_cpu_ptr_wrapper(kvm_host_data)->f) +#else +#define host_data_ptr(f) \ + (static_branch_unlikely(&kvm_protected_mode_initialized) ? \ + &this_cpu_ptr_wrapper(kvm_host_data)->f : \ + &this_cpu_ptr_hyp_sym_wrapper(kvm_host_data)->f) +#endif + static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) { /* The host's MPIDR is immutable, so let's set it up at boot time */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0a2f06c2a634..8bc23366e406 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1948,7 +1948,7 @@ static void cpu_set_hyp_vector(void) static void cpu_hyp_init_context(void) { - kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); + kvm_init_host_cpu_context(host_data_ptr(host_ctxt)); if (!is_kernel_in_hyp_mode()) cpu_init_hyp_mode(); diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h index e52a92a28606..eec0f8ccda56 100644 --- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h @@ -135,7 +135,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu) if (!vcpu_get_flag(vcpu, DEBUG_DIRTY)) return; - host_ctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + host_ctxt = host_data_ptr(host_ctxt); guest_ctxt = &vcpu->arch.ctxt; host_dbg = &vcpu->arch.host_debug_state.regs; guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); @@ -154,7 +154,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu) if (!vcpu_get_flag(vcpu, DEBUG_DIRTY)) return; - host_ctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + host_ctxt = host_data_ptr(host_ctxt); guest_ctxt = &vcpu->arch.ctxt; host_dbg = &vcpu->arch.host_debug_state.regs; guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 68db1d026529..aca6eeee145d 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -82,7 +82,7 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *hctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; u64 r_val, w_val; @@ -157,7 +157,7 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *hctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); if (!cpus_have_final_cap(ARM64_HAS_FGT)) return; @@ -218,7 +218,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) write_sysreg(0, pmselr_el0); - hctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + hctxt = host_data_ptr(host_ctxt); ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0); write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); @@ -253,7 +253,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) if (kvm_arm_support_pmu_v3()) { struct kvm_cpu_context *hctxt; - hctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + hctxt = host_data_ptr(host_ctxt); write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); } diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index d57bcb6ab94d..dfe8fe0f7eaf 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -205,7 +205,7 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on) struct psci_boot_args *boot_args; struct kvm_cpu_context *host_ctxt; - host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; + host_ctxt = host_data_ptr(host_ctxt); if (is_cpu_on) boot_args = this_cpu_ptr(&cpu_on_args); diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 0d5e0a89ddce..c6edba9c66b6 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -257,8 +257,7 @@ static int fix_hyp_pgtable_refcnt(void) void __noreturn __pkvm_init_finalise(void) { - struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data); - struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt; + struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt); unsigned long nr_pages, reserved_pages, pfn; int ret; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index f71dd323f8dd..1533b9e6fcfd 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -312,7 +312,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) pmr_sync(); } - host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; + host_ctxt = host_data_ptr(host_ctxt); host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; @@ -417,7 +417,7 @@ asmlinkage void __noreturn hyp_panic(void) struct kvm_cpu_context *host_ctxt; struct kvm_vcpu *vcpu; - host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; + host_ctxt = host_data_ptr(host_ctxt); vcpu = host_ctxt->__hyp_running_vcpu; if (vcpu) { diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 535c01cdade4..ba6069c886a6 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -211,7 +211,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) struct kvm_cpu_context *guest_ctxt; u64 exit_code; - host_ctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + host_ctxt = host_data_ptr(host_ctxt); host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; @@ -306,7 +306,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par) struct kvm_cpu_context *host_ctxt; struct kvm_vcpu *vcpu; - host_ctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + host_ctxt = host_data_ptr(host_ctxt); vcpu = host_ctxt->__hyp_running_vcpu; __deactivate_traps(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index 2397c0f81753..5cb4b70e0aef 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -67,7 +67,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *host_ctxt; - host_ctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + host_ctxt = host_data_ptr(host_ctxt); __sysreg_save_user_state(host_ctxt); /* @@ -113,7 +113,7 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu) struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *host_ctxt; - host_ctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + host_ctxt = host_data_ptr(host_ctxt); deactivate_traps_vhe_put(vcpu); __sysreg_save_el1_state(guest_ctxt); diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 94e7b098002e..0b3adf3e17b4 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -187,7 +187,7 @@ bool kvm_set_pmuserenr(u64 val) if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU)) return false; - hctxt = &this_cpu_ptr_wrapper(kvm_host_data)->host_ctxt; + hctxt = host_data_ptr(host_ctxt); ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val; return true; } -- Gitee From 0fc8d4c1f5a7aa732099546cace6fc452400067e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 26 Feb 2024 15:58:46 +0000 Subject: [PATCH 51/75] KVM: arm64: Exclude host_debug_data from vcpu_arch ANBZ: #25252 commit 6db55734ec4008da39e10d2fffa913fd9751ccaa upstream. Keeping host_debug_state on a per-vcpu basis is completely pointless. The lifetime of this data is only that of the inner run-loop, which means it is never accessed outside of the core EL2 code. Move the structure into kvm_host_data, and save over 500 bytes per vcpu. Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_host.h | 31 +++++++++++++---------- arch/arm64/kvm/hyp/include/hyp/debug-sr.h | 4 +-- arch/arm64/kvm/hyp/nvhe/debug-sr.c | 8 +++--- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 88a34fd964cf..f660f2695a62 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -447,6 +447,19 @@ struct kvm_cpu_context { */ struct kvm_host_data { struct kvm_cpu_context host_ctxt; + + /* + * host_debug_state contains the host registers which are + * saved and restored during world switches. + */ + struct { + /* {Break,watch}point registers */ + struct kvm_guest_debug_arch regs; + /* Statistical profiling extension */ + u64 pmscr_el1; + /* Self-hosted trace */ + u64 trfcr_el1; + } host_debug_state; }; struct kvm_host_psci_config { @@ -539,11 +552,10 @@ struct kvm_vcpu_arch { * We maintain more than a single set of debug registers to support * debugging the guest from the host and to maintain separate host and * guest state during world switches. vcpu_debug_state are the debug - * registers of the vcpu as the guest sees them. host_debug_state are - * the host registers which are saved and restored during - * world switches. external_debug_state contains the debug - * values we want to debug the guest. This is set via the - * KVM_SET_GUEST_DEBUG ioctl. + * registers of the vcpu as the guest sees them. + * + * external_debug_state contains the debug values we want to debug the + * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl. * * debug_ptr points to the set of debug registers that should be loaded * onto the hardware when running the guest. @@ -554,15 +566,6 @@ struct kvm_vcpu_arch { struct task_struct *parent_task; - struct { - /* {Break,watch}point registers */ - struct kvm_guest_debug_arch regs; - /* Statistical profiling extension */ - u64 pmscr_el1; - /* Self-hosted trace */ - u64 trfcr_el1; - } host_debug_state; - /* VGIC state */ struct vgic_cpu vgic_cpu; struct arch_timer_cpu timer_cpu; diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h index eec0f8ccda56..d00093699aaf 100644 --- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h @@ -137,7 +137,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu) host_ctxt = host_data_ptr(host_ctxt); guest_ctxt = &vcpu->arch.ctxt; - host_dbg = &vcpu->arch.host_debug_state.regs; + host_dbg = host_data_ptr(host_debug_state.regs); guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); __debug_save_state(host_dbg, host_ctxt); @@ -156,7 +156,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu) host_ctxt = host_data_ptr(host_ctxt); guest_ctxt = &vcpu->arch.ctxt; - host_dbg = &vcpu->arch.host_debug_state.regs; + host_dbg = host_data_ptr(host_debug_state.regs); guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); __debug_save_state(guest_dbg, guest_ctxt); diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index 4558c02eb352..d89eaeae1c2a 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -83,10 +83,10 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) { /* Disable and flush SPE data generation */ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE)) - __debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1); + __debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1)); /* Disable and flush Self-Hosted Trace generation */ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE)) - __debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1); + __debug_save_trace(host_data_ptr(host_debug_state.trfcr_el1)); } void __debug_switch_to_guest(struct kvm_vcpu *vcpu) @@ -97,9 +97,9 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu) void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) { if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE)) - __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1); + __debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1)); if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE)) - __debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1); + __debug_restore_trace(*host_data_ptr(host_debug_state.trfcr_el1)); } void __debug_switch_to_host(struct kvm_vcpu *vcpu) -- Gitee From 6ec91e7d212da9184c0026efdd77480829c882d1 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 27 Feb 2024 08:27:54 +0000 Subject: [PATCH 52/75] KVM: arm64: Exclude mdcr_el2_host from kvm_vcpu_arch ANBZ: #25252 commit 4bacd723705a6b6c8386daf3d5148aca66135f3c upstream. As for the rest of the host debug state, the host copy of mdcr_el2 has little to do in the vcpu, and is better placed in the host_data structure. Reviewed-by : Suzuki K Poulose Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_host.h | 5 ++--- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f660f2695a62..12fd065b03c0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -459,6 +459,8 @@ struct kvm_host_data { u64 pmscr_el1; /* Self-hosted trace */ u64 trfcr_el1; + /* Values of trap registers for the host before guest entry. */ + u64 mdcr_el2; } host_debug_state; }; @@ -517,9 +519,6 @@ struct kvm_vcpu_arch { u64 hcr_el2; u64 mdcr_el2; - /* Values of trap registers for the host before guest entry. */ - u64 mdcr_el2_host; - /* Exception Information */ struct kvm_vcpu_fault_info fault; diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index aca6eeee145d..102547d942e8 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -224,7 +224,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); } - vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); + *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); if (cpus_have_final_cap(ARM64_HAS_HCX)) { @@ -247,7 +247,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) { - write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); + write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); write_sysreg(0, hstr_el2); if (kvm_arm_support_pmu_v3()) { -- Gitee From 3378e9ddccd2ffea100479e210b4aab4a758162d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 1 Mar 2024 12:06:44 +0000 Subject: [PATCH 53/75] KVM: arm64: Exclude host_fpsimd_state pointer from kvm_vcpu_arch ANBZ: #25252 commit 51e09b5572d665645ce394f94f24a7d6ec32bda9 upstream. As the name of the field indicates, host_fpsimd_state is strictly a host piece of data, and we reset this pointer on each PID change. So let's move it where it belongs, and set it at load-time. Although this is slightly more often, it is a well defined life-cycle which matches other pieces of data. Reviewed-by: Mark Brown Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/fpsimd.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 12fd065b03c0..32382c417798 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -447,6 +447,7 @@ struct kvm_cpu_context { */ struct kvm_host_data { struct kvm_cpu_context host_ctxt; + struct user_fpsimd_state *fpsimd_state; /* hyp VA */ /* * host_debug_state contains the host registers which are diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index e57db49ea468..b89d8f6eb8f4 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -87,6 +87,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) */ fpsimd_save_and_flush_cpu_state(); vcpu->arch.fp_state = FP_STATE_FREE; + *host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state); } /* -- Gitee From 2d7461fb3f975a4ce4362a9e2fad74293c18b8ff Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 1 Mar 2024 17:42:31 +0000 Subject: [PATCH 54/75] KVM: arm64: Exclude FP ownership from kvm_vcpu_arch ANBZ: #25252 commit 5294afdbf45aced5295fe5941c58b40c41c23800 upstream. In retrospect, it is fairly obvious that the FP state ownership is only meaningful for a given CPU, and that locating this information in the vcpu was just a mistake. Move the ownership tracking into the host data structure, and rename it from fp_state to fp_owner, which is a better description (name suggested by Mark Brown). Reviewed-by: Mark Brown Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_emulate.h | 4 ++-- arch/arm64/include/asm/kvm_host.h | 14 +++++++------- arch/arm64/kvm/arm.c | 6 ------ arch/arm64/kvm/fpsimd.c | 8 ++++---- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++-- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 2 -- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- arch/arm64/kvm/hyp/vhe/switch.c | 2 +- 8 files changed, 17 insertions(+), 25 deletions(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 3d6725ff0bf6..c0cb2c174848 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -592,7 +592,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); if (!vcpu_has_sve(vcpu) || - (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)) + (*host_data_ptr(fp_owner) != FP_STATE_GUEST_OWNED)) val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN; if (cpus_have_final_cap(ARM64_SME)) val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN; @@ -600,7 +600,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) val = CPTR_NVHE_EL2_RES1; if (vcpu_has_sve(vcpu) && - (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)) + (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED)) val |= CPTR_EL2_TZ; if (cpus_have_final_cap(ARM64_SME)) val &= ~CPTR_EL2_TSM; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 32382c417798..d03e4dbf563b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -449,6 +449,13 @@ struct kvm_host_data { struct kvm_cpu_context host_ctxt; struct user_fpsimd_state *fpsimd_state; /* hyp VA */ + /* Ownership of the FP regs */ + enum { + FP_STATE_FREE, + FP_STATE_HOST_OWNED, + FP_STATE_GUEST_OWNED, + } fp_owner; + /* * host_debug_state contains the host registers which are * saved and restored during world switches. @@ -523,13 +530,6 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* Ownership of the FP regs */ - enum { - FP_STATE_FREE, - FP_STATE_HOST_OWNED, - FP_STATE_GUEST_OWNED, - } fp_state; - /* Configuration flags, set once and for all before the vcpu can run */ u8 cflags; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 8bc23366e406..d1ecde20d9d5 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -372,12 +372,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; - /* - * Default value for the FP state, will be overloaded at load - * time if we support FP (pretty likely) - */ - vcpu->arch.fp_state = FP_STATE_FREE; - /* Set up the timer */ kvm_timer_vcpu_init(vcpu); diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index b89d8f6eb8f4..c4437cb9a084 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -86,7 +86,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) * that PSTATE.{SM,ZA} == {0,0}. */ fpsimd_save_and_flush_cpu_state(); - vcpu->arch.fp_state = FP_STATE_FREE; + *host_data_ptr(fp_owner) = FP_STATE_FREE; *host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state); } @@ -100,7 +100,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) { if (test_thread_flag(TIF_FOREIGN_FPSTATE)) - vcpu->arch.fp_state = FP_STATE_FREE; + *host_data_ptr(fp_owner) = FP_STATE_FREE; } /* @@ -116,7 +116,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) WARN_ON_ONCE(!irqs_disabled()); - if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { + if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) { /* * Currently we do not support SME guests so SVCR is @@ -152,7 +152,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) local_irq_save(flags); - if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { + if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) { /* * Flush (save and invalidate) the fpsimd/sve state so that if * the host tries to use fpsimd/sve, it's not using stale data diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 102547d942e8..d274244e1b44 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -42,7 +42,7 @@ extern struct kvm_exception_table_entry __stop___kvm_ex_table; /* Check whether the FP regs are owned by the guest */ static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu) { - return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED; + return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED; } /* Save the 32-bit only FPSIMD system register state */ @@ -418,7 +418,7 @@ static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) if (!(read_sysreg(hcr_el2) & HCR_RW)) write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2); - vcpu->arch.fp_state = FP_STATE_GUEST_OWNED; + *host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED; return true; } diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 350d1775a5ce..90c7621fa239 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -39,7 +39,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2; hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags; - hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state; hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr); @@ -62,7 +61,6 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault; host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags; - host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state; host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr; for (i = 0; i < hyp_cpu_if->used_lrs; ++i) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 1533b9e6fcfd..668a0c339289 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -387,7 +387,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __sysreg_restore_state_nvhe(host_ctxt); - if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) + if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) __fpsimd_save_fpexc32(vcpu); __debug_switch_to_host(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index ba6069c886a6..f8425843e547 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -258,7 +258,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) sysreg_restore_host_state_vhe(host_ctxt); - if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) + if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) __fpsimd_save_fpexc32(vcpu); __debug_switch_to_host(vcpu); -- Gitee From 40d3358ae63d6e2113b1071385ecc1e9451ef6bb Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 19 Dec 2024 14:40:58 -0800 Subject: [PATCH 55/75] KVM: arm64: Drop MDSCR_EL1_DEBUG_MASK ANBZ: #25252 commit 8ca19c40c47d80af369c222662445bbf593666b1 upstream. Nothing is using this macro, get rid of it. Tested-by: James Clark Signed-off-by: Oliver Upton Link: https://lore.kernel.org/r/20241219224116.3941496-2-oliver.upton@linux.dev Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/kvm/debug.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 8725291cb00a..e8ad96a911eb 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -16,11 +16,6 @@ #include "trace.h" -/* These are the bits of MDSCR_EL1 we may manipulate */ -#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \ - DBG_MDSCR_KDE | \ - DBG_MDSCR_MDE) - static DEFINE_PER_CPU(u64, mdcr_el2); /** -- Gitee From 6e1d7c8470d5b95afa8fad33b0871112b0795957 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 19 Dec 2024 14:40:59 -0800 Subject: [PATCH 56/75] KVM: arm64: Get rid of __kvm_get_mdcr_el2() and related warts ANBZ: #25252 commit 2417218f2f234fd7880fb193ebf3ae5fcccfa29b upstream. KVM caches MDCR_EL2 on a per-CPU basis in order to preserve the configuration of MDCR_EL2.HPMN while running a guest. This is a bit gross, since we're relying on some baked configuration rather than the hardware definition of implemented counters. Discover the number of implemented counters by reading PMCR_EL0.N instead. This works because: - In VHE the kernel runs at EL2, and N always returns the number of counters implemented in hardware - In {n,h}VHE, the EL2 setup code programs MDCR_EL2.HPMN with the EL2 view of PMCR_EL0.N for the host Lastly, avoid traps under nested virtualization by saving PMCR_EL0.N in host data. Tested-by: James Clark Signed-off-by: Oliver Upton Link: https://lore.kernel.org/r/20241219224116.3941496-3-oliver.upton@linux.dev Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_asm.h | 5 +---- arch/arm64/include/asm/kvm_host.h | 7 +++++-- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/debug.c | 29 +++++++++++------------------ arch/arm64/kvm/hyp/nvhe/debug-sr.c | 5 ----- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 6 ------ arch/arm64/kvm/hyp/vhe/debug-sr.c | 5 ----- 7 files changed, 18 insertions(+), 41 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 9e1d8ec62e27..508f3b61a695 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -52,8 +52,7 @@ enum __kvm_host_smccc_func { /* Hypercalls available only prior to pKVM finalisation */ /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ - __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, - __KVM_HOST_SMCCC_FUNC___pkvm_init, + __KVM_HOST_SMCCC_FUNC___pkvm_init = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping, __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector, __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, @@ -260,8 +259,6 @@ extern u64 __vgic_v3_read_vmcr(void); extern void __vgic_v3_write_vmcr(u32 vmcr); extern void __vgic_v3_init_lrs(void); -extern u64 __kvm_get_mdcr_el2(void); - #define __KVM_EXTABLE(from, to) \ " .pushsection __kvm_ex_table, \"a\"\n" \ " .align 3\n" \ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d03e4dbf563b..170debcaa6f9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -460,7 +460,7 @@ struct kvm_host_data { * host_debug_state contains the host registers which are * saved and restored during world switches. */ - struct { + struct { /* {Break,watch}point registers */ struct kvm_guest_debug_arch regs; /* Statistical profiling extension */ @@ -470,6 +470,9 @@ struct kvm_host_data { /* Values of trap registers for the host before guest entry. */ u64 mdcr_el2; } host_debug_state; + + /* Number of programmable event counters (PMCR_EL0.N) for this CPU */ + unsigned int nr_event_counters; }; struct kvm_host_psci_config { @@ -1135,7 +1138,7 @@ static inline bool kvm_system_needs_idmapped_vectors(void) static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -void kvm_arm_init_debug(void); +void kvm_init_host_debug_data(void); void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index d1ecde20d9d5..e14f5f15d173 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1943,6 +1943,7 @@ static void cpu_set_hyp_vector(void) static void cpu_hyp_init_context(void) { kvm_init_host_cpu_context(host_data_ptr(host_ctxt)); + kvm_init_host_debug_data(); if (!is_kernel_in_hyp_mode()) cpu_init_hyp_mode(); @@ -1952,7 +1953,6 @@ static void cpu_hyp_init_context(void) static void cpu_hyp_init_features(void) { cpu_set_hyp_vector(); - kvm_arm_init_debug(); if (is_kernel_in_hyp_mode()) kvm_timer_init_vhe(); diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index e8ad96a911eb..968bdc299a4b 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -16,8 +16,6 @@ #include "trace.h" -static DEFINE_PER_CPU(u64, mdcr_el2); - /** * save/restore_guest_debug_regs * @@ -60,21 +58,6 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; } -/** - * kvm_arm_init_debug - grab what we need for debug - * - * Currently the sole task of this function is to retrieve the initial - * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has - * presumably been set-up by some knowledgeable bootcode. - * - * It is called once per-cpu during CPU hyp initialisation. - */ - -void kvm_arm_init_debug(void) -{ - __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2)); -} - /** * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value * @@ -94,7 +77,8 @@ static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu) * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK * to disable guest access to the profiling and trace buffers */ - vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; + vcpu->arch.mdcr_el2 = FIELD_PREP(MDCR_EL2_HPMN, + *host_data_ptr(nr_event_counters)); vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | MDCR_EL2_TPMS | MDCR_EL2_TTRF | @@ -337,3 +321,12 @@ void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu) vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE); vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE); } + +void kvm_init_host_debug_data(void) +{ + u64 dfr0 = read_sysreg(id_aa64dfr0_el1); + + if (cpuid_feature_extract_signed_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT) > 0) + *host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N, + read_sysreg(pmcr_el0)); +} diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index d89eaeae1c2a..c8f657736228 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -106,8 +106,3 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu) { __debug_switch_to_host_common(vcpu); } - -u64 __kvm_get_mdcr_el2(void) -{ - return read_sysreg(mdcr_el2); -} diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 90c7621fa239..72d9edb7dd9b 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -191,11 +191,6 @@ static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt) __vgic_v3_init_lrs(); } -static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt) -{ - cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2(); -} - static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1); @@ -318,7 +313,6 @@ typedef void (*hcall_t)(struct kvm_cpu_context *); static const hcall_t host_hcall[] = { /* ___kvm_hyp_init */ - HANDLE_FUNC(__kvm_get_mdcr_el2), HANDLE_FUNC(__pkvm_init), HANDLE_FUNC(__pkvm_create_private_mapping), HANDLE_FUNC(__pkvm_cpu_set_vector), diff --git a/arch/arm64/kvm/hyp/vhe/debug-sr.c b/arch/arm64/kvm/hyp/vhe/debug-sr.c index 289689b2682d..0100339b09e0 100644 --- a/arch/arm64/kvm/hyp/vhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/vhe/debug-sr.c @@ -19,8 +19,3 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu) { __debug_switch_to_host_common(vcpu); } - -u64 __kvm_get_mdcr_el2(void) -{ - return read_sysreg(mdcr_el2); -} -- Gitee From ef16d705557e06db20b7e9880be6383c976a0f06 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 19 Dec 2024 14:41:00 -0800 Subject: [PATCH 57/75] KVM: arm64: Track presence of SPE/TRBE in kvm_host_data instead of vCPU ANBZ: #25252 commit 38131c02a53ff691e4c496d65d2d087a5ed42bbf upstream. Add flags to kvm_host_data to track if SPE/TRBE is present + programmable on a per-CPU basis. Set the flags up at init rather than vcpu_load() as the programmability of these buffers is unlikely to change. Reviewed-by: James Clark Tested-by: James Clark Signed-off-by: Oliver Upton Link: https://lore.kernel.org/r/20241219224116.3941496-4-oliver.upton@linux.dev Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_host.h | 19 +++++++++------- arch/arm64/kvm/arm.c | 2 -- arch/arm64/kvm/debug.c | 36 ++++++++---------------------- arch/arm64/kvm/hyp/nvhe/debug-sr.c | 8 +++---- 4 files changed, 24 insertions(+), 41 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 170debcaa6f9..f80253de214e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -446,6 +446,10 @@ struct kvm_cpu_context { * field. */ struct kvm_host_data { +#define KVM_HOST_DATA_FLAG_HAS_SPE 0 +#define KVM_HOST_DATA_FLAG_HAS_TRBE 1 + unsigned long flags; + struct kvm_cpu_context host_ctxt; struct user_fpsimd_state *fpsimd_state; /* hyp VA */ @@ -741,10 +745,6 @@ struct kvm_vcpu_arch { #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7) /* Guest debug is live */ #define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4)) -/* Save SPE context if active */ -#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5)) -/* Save TRBE context if active */ -#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6)) /* vcpu running in HYP context */ #define VCPU_HYP_CONTEXT __vcpu_single_flag(iflags, BIT(7)) @@ -1098,6 +1098,13 @@ extern struct kvm_host_data __percpu *kvm_host_data; DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); #endif +#define host_data_test_flag(flag) \ + (test_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))) +#define host_data_set_flag(flag) \ + set_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) +#define host_data_clear_flag(flag) \ + clear_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) + /* * How we access per-CPU host data depends on the where we access it from, * and the mode we're in: @@ -1172,10 +1179,6 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) return (!has_vhe() && attr->exclude_host); } -/* Flags for host debug state */ -void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); -void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); - #if IS_ENABLED(CONFIG_KVM) void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u64 clr); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e14f5f15d173..b4eaeabccda6 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -459,7 +459,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); - kvm_arch_vcpu_load_debug_state_flags(vcpu); if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) vcpu_set_on_unsupported_cpu(vcpu); @@ -470,7 +469,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - kvm_arch_vcpu_put_debug_state_flags(vcpu); kvm_arch_vcpu_put_fp(vcpu); if (has_vhe()) kvm_vcpu_put_sysregs_vhe(vcpu); diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 968bdc299a4b..6e55eb1cca72 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -293,40 +293,22 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) } } -void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) +void kvm_init_host_debug_data(void) { - u64 dfr0; + u64 dfr0 = read_sysreg(id_aa64dfr0_el1); + + if (cpuid_feature_extract_signed_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT) > 0) + *host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N, + read_sysreg(pmcr_el0)); - /* For VHE, there is nothing to do */ if (has_vhe()) return; - dfr0 = read_sysreg(id_aa64dfr0_el1); - /* - * If SPE is present on this CPU and is available at current EL, - * we may need to check if the host state needs to be saved. - */ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && - !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT))) - vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); + !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P)) + host_data_set_flag(HAS_SPE); - /* Check if we have TRBE implemented and available at the host */ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) && !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P)) - vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); -} - -void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu) -{ - vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE); - vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE); -} - -void kvm_init_host_debug_data(void) -{ - u64 dfr0 = read_sysreg(id_aa64dfr0_el1); - - if (cpuid_feature_extract_signed_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT) > 0) - *host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N, - read_sysreg(pmcr_el0)); + host_data_set_flag(HAS_TRBE); } diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index c8f657736228..762db8db0466 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -82,10 +82,10 @@ static void __debug_restore_trace(u64 trfcr_el1) void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) { /* Disable and flush SPE data generation */ - if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE)) + if (host_data_test_flag(HAS_SPE)) __debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1)); /* Disable and flush Self-Hosted Trace generation */ - if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE)) + if (host_data_test_flag(HAS_TRBE)) __debug_save_trace(host_data_ptr(host_debug_state.trfcr_el1)); } @@ -96,9 +96,9 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu) void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) { - if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE)) + if (host_data_test_flag(HAS_SPE)) __debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1)); - if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE)) + if (host_data_test_flag(HAS_TRBE)) __debug_restore_trace(*host_data_ptr(host_debug_state.trfcr_el1)); } -- Gitee From b26d779fd0641a5cadc4a82598fc0cf7ea8ef72c Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 19 Dec 2024 14:41:01 -0800 Subject: [PATCH 58/75] KVM: arm64: Move host SME/SVE tracking flags to host data ANBZ: #25252 commit d381e53384a69e35aac417cd6e66afc6c8c11583 upstream. The SME/SVE state tracking flags have no business in the vCPU. Move them to kvm_host_data. Tested-by: James Clark Signed-off-by: Oliver Upton Link: https://lore.kernel.org/r/20241219224116.3941496-5-oliver.upton@linux.dev Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_host.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f80253de214e..854c2ea30200 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -446,8 +446,10 @@ struct kvm_cpu_context { * field. */ struct kvm_host_data { -#define KVM_HOST_DATA_FLAG_HAS_SPE 0 -#define KVM_HOST_DATA_FLAG_HAS_TRBE 1 +#define KVM_HOST_DATA_FLAG_HAS_SPE 0 +#define KVM_HOST_DATA_FLAG_HAS_TRBE 1 +#define KVM_HOST_DATA_FLAG_HOST_SVE_ENABLED 2 +#define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3 unsigned long flags; struct kvm_cpu_context host_ctxt; @@ -749,17 +751,17 @@ struct kvm_vcpu_arch { #define VCPU_HYP_CONTEXT __vcpu_single_flag(iflags, BIT(7)) /* Physical CPU not in supported_cpus */ -#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2)) +#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0)) /* WFIT instruction trapped */ -#define IN_WFIT __vcpu_single_flag(sflags, BIT(3)) +#define IN_WFIT __vcpu_single_flag(sflags, BIT(1)) /* vcpu system registers loaded on physical CPU */ -#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4)) +#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(2)) /* Software step state is Active-pending */ -#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) +#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(3)) /* PMUSERENR for the guest EL0 is on physical CPU */ -#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) +#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(4)) /* WFI instruction trapped */ -#define IN_WFI __vcpu_single_flag(sflags, BIT(7)) +#define IN_WFI __vcpu_single_flag(sflags, BIT(5)) /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ -- Gitee From 28650d71ffb8962b56c0eb4988ce8853481b4010 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 13 Jan 2025 15:19:52 +0000 Subject: [PATCH 59/75] KVM: arm64: Fix selftests after sysreg field name update ANBZ: #25252 commit 9fb4267a759ce50e633a56df6dfdb32bafc24203 upstream. Fix KVM selftests that check for EL0's 64bit-ness, and use a now removed definition. Kindly point them at the new one. Reported-by: Mark Brown Signed-off-by: Marc Zyngier [ Drop the changes for aarch64/set_id_regs. ] Signed-offby: Wei Chen --- tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c index 8e5bd07a3727..bd182be48c1b 100644 --- a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c +++ b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c @@ -147,7 +147,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu) vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val); el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); - return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY; + return el0 == ID_AA64PFR0_EL1_EL0_IMP; } int main(void) -- Gitee From 525953333b5f741cce7cea16d762e3c67a58bcf4 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 6 Jan 2025 14:24:37 +0000 Subject: [PATCH 60/75] tools: arm64: Update sysreg.h header files ANBZ: #25252 commit 38138762faffeb923d9f49efbcc09884f1530786 upstream. Created with the following: cp include/linux/kasan-tags.h tools/include/linux/ cp arch/arm64/include/asm/sysreg.h tools/arch/arm64/include/asm/ Update the tools copy of sysreg.h so that the next commit to add a new register doesn't have unrelated changes in it. Because the new version of sysreg.h includes kasan-tags.h, that file also now needs to be copied into tools. Acked-by: Mark Brown Reviewed-by: Suzuki K Poulose Signed-off-by: James Clark Signed-off-by: James Clark Link: https://lore.kernel.org/r/20250106142446.628923-3-james.clark@linaro.org Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- tools/arch/arm64/include/asm/sysreg.h | 398 +++++++++++++++++++++++++- tools/include/linux/kasan-tags.h | 15 + 2 files changed, 405 insertions(+), 8 deletions(-) create mode 100644 tools/include/linux/kasan-tags.h diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h index ccc13e991376..b35396d3fff1 100644 --- a/tools/arch/arm64/include/asm/sysreg.h +++ b/tools/arch/arm64/include/asm/sysreg.h @@ -11,6 +11,7 @@ #include #include +#include #include @@ -108,6 +109,9 @@ #define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x)) #define set_pstate_dit(x) asm volatile(SET_PSTATE_DIT(x)) +/* Register-based PAN access, for save/restore purposes */ +#define SYS_PSTATE_PAN sys_reg(3, 0, 4, 2, 3) + #define __SYS_BARRIER_INSN(CRm, op2, Rt) \ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) @@ -123,6 +127,37 @@ #define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4) #define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6) +#define SYS_IC_IALLUIS sys_insn(1, 0, 7, 1, 0) +#define SYS_IC_IALLU sys_insn(1, 0, 7, 5, 0) +#define SYS_IC_IVAU sys_insn(1, 3, 7, 5, 1) + +#define SYS_DC_IVAC sys_insn(1, 0, 7, 6, 1) +#define SYS_DC_IGVAC sys_insn(1, 0, 7, 6, 3) +#define SYS_DC_IGDVAC sys_insn(1, 0, 7, 6, 5) + +#define SYS_DC_CVAC sys_insn(1, 3, 7, 10, 1) +#define SYS_DC_CGVAC sys_insn(1, 3, 7, 10, 3) +#define SYS_DC_CGDVAC sys_insn(1, 3, 7, 10, 5) + +#define SYS_DC_CVAU sys_insn(1, 3, 7, 11, 1) + +#define SYS_DC_CVAP sys_insn(1, 3, 7, 12, 1) +#define SYS_DC_CGVAP sys_insn(1, 3, 7, 12, 3) +#define SYS_DC_CGDVAP sys_insn(1, 3, 7, 12, 5) + +#define SYS_DC_CVADP sys_insn(1, 3, 7, 13, 1) +#define SYS_DC_CGVADP sys_insn(1, 3, 7, 13, 3) +#define SYS_DC_CGDVADP sys_insn(1, 3, 7, 13, 5) + +#define SYS_DC_CIVAC sys_insn(1, 3, 7, 14, 1) +#define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3) +#define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5) + +/* Data cache zero operations */ +#define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1) +#define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3) +#define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4) + /* * Automatically generated definitions for system registers, the * manual encodings below are in the process of being converted to @@ -162,6 +197,84 @@ #define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0) #define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0) +#define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0)) +#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0) +#define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1)) +#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1) +#define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2)) +#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2) +#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2) + +#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0) +#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1) +#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0) + +#define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3) +#define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3))) +#define SYS_TRCACVR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (0 | (m >> 3))) +#define SYS_TRCAUTHSTATUS sys_reg(2, 1, 7, 14, 6) +#define SYS_TRCAUXCTLR sys_reg(2, 1, 0, 6, 0) +#define SYS_TRCBBCTLR sys_reg(2, 1, 0, 15, 0) +#define SYS_TRCCCCTLR sys_reg(2, 1, 0, 14, 0) +#define SYS_TRCCIDCCTLR0 sys_reg(2, 1, 3, 0, 2) +#define SYS_TRCCIDCCTLR1 sys_reg(2, 1, 3, 1, 2) +#define SYS_TRCCIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 0) +#define SYS_TRCCLAIMCLR sys_reg(2, 1, 7, 9, 6) +#define SYS_TRCCLAIMSET sys_reg(2, 1, 7, 8, 6) +#define SYS_TRCCNTCTLR(m) sys_reg(2, 1, 0, (4 | (m & 3)), 5) +#define SYS_TRCCNTRLDVR(m) sys_reg(2, 1, 0, (0 | (m & 3)), 5) +#define SYS_TRCCNTVR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 5) +#define SYS_TRCCONFIGR sys_reg(2, 1, 0, 4, 0) +#define SYS_TRCDEVARCH sys_reg(2, 1, 7, 15, 6) +#define SYS_TRCDEVID sys_reg(2, 1, 7, 2, 7) +#define SYS_TRCEVENTCTL0R sys_reg(2, 1, 0, 8, 0) +#define SYS_TRCEVENTCTL1R sys_reg(2, 1, 0, 9, 0) +#define SYS_TRCEXTINSELR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 4) +#define SYS_TRCIDR0 sys_reg(2, 1, 0, 8, 7) +#define SYS_TRCIDR10 sys_reg(2, 1, 0, 2, 6) +#define SYS_TRCIDR11 sys_reg(2, 1, 0, 3, 6) +#define SYS_TRCIDR12 sys_reg(2, 1, 0, 4, 6) +#define SYS_TRCIDR13 sys_reg(2, 1, 0, 5, 6) +#define SYS_TRCIDR1 sys_reg(2, 1, 0, 9, 7) +#define SYS_TRCIDR2 sys_reg(2, 1, 0, 10, 7) +#define SYS_TRCIDR3 sys_reg(2, 1, 0, 11, 7) +#define SYS_TRCIDR4 sys_reg(2, 1, 0, 12, 7) +#define SYS_TRCIDR5 sys_reg(2, 1, 0, 13, 7) +#define SYS_TRCIDR6 sys_reg(2, 1, 0, 14, 7) +#define SYS_TRCIDR7 sys_reg(2, 1, 0, 15, 7) +#define SYS_TRCIDR8 sys_reg(2, 1, 0, 0, 6) +#define SYS_TRCIDR9 sys_reg(2, 1, 0, 1, 6) +#define SYS_TRCIMSPEC(m) sys_reg(2, 1, 0, (m & 7), 7) +#define SYS_TRCITEEDCR sys_reg(2, 1, 0, 2, 1) +#define SYS_TRCOSLSR sys_reg(2, 1, 1, 1, 4) +#define SYS_TRCPRGCTLR sys_reg(2, 1, 0, 1, 0) +#define SYS_TRCQCTLR sys_reg(2, 1, 0, 1, 1) +#define SYS_TRCRSCTLR(m) sys_reg(2, 1, 1, (m & 15), (0 | (m >> 4))) +#define SYS_TRCRSR sys_reg(2, 1, 0, 10, 0) +#define SYS_TRCSEQEVR(m) sys_reg(2, 1, 0, (m & 3), 4) +#define SYS_TRCSEQRSTEVR sys_reg(2, 1, 0, 6, 4) +#define SYS_TRCSEQSTR sys_reg(2, 1, 0, 7, 4) +#define SYS_TRCSSCCR(m) sys_reg(2, 1, 1, (m & 7), 2) +#define SYS_TRCSSCSR(m) sys_reg(2, 1, 1, (8 | (m & 7)), 2) +#define SYS_TRCSSPCICR(m) sys_reg(2, 1, 1, (m & 7), 3) +#define SYS_TRCSTALLCTLR sys_reg(2, 1, 0, 11, 0) +#define SYS_TRCSTATR sys_reg(2, 1, 0, 3, 0) +#define SYS_TRCSYNCPR sys_reg(2, 1, 0, 13, 0) +#define SYS_TRCTRACEIDR sys_reg(2, 1, 0, 0, 1) +#define SYS_TRCTSCTLR sys_reg(2, 1, 0, 12, 0) +#define SYS_TRCVICTLR sys_reg(2, 1, 0, 0, 2) +#define SYS_TRCVIIECTLR sys_reg(2, 1, 0, 1, 2) +#define SYS_TRCVIPCSSCTLR sys_reg(2, 1, 0, 3, 2) +#define SYS_TRCVISSCTLR sys_reg(2, 1, 0, 2, 2) +#define SYS_TRCVMIDCCTLR0 sys_reg(2, 1, 3, 2, 2) +#define SYS_TRCVMIDCCTLR1 sys_reg(2, 1, 3, 3, 2) +#define SYS_TRCVMIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 1) + +/* ETM */ +#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4) + +#define SYS_BRBCR_EL2 sys_reg(2, 4, 9, 0, 0) + #define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0) #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) @@ -202,15 +315,38 @@ #define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1) #define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2) #define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3) +#define SYS_ERXPFGF_EL1 sys_reg(3, 0, 5, 4, 4) +#define SYS_ERXPFGCTL_EL1 sys_reg(3, 0, 5, 4, 5) +#define SYS_ERXPFGCDN_EL1 sys_reg(3, 0, 5, 4, 6) #define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0) #define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1) +#define SYS_ERXMISC2_EL1 sys_reg(3, 0, 5, 5, 2) +#define SYS_ERXMISC3_EL1 sys_reg(3, 0, 5, 5, 3) #define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0) #define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) #define SYS_PAR_EL1_F BIT(0) +/* When PAR_EL1.F == 1 */ #define SYS_PAR_EL1_FST GENMASK(6, 1) +#define SYS_PAR_EL1_PTW BIT(8) +#define SYS_PAR_EL1_S BIT(9) +#define SYS_PAR_EL1_AssuredOnly BIT(12) +#define SYS_PAR_EL1_TopLevel BIT(13) +#define SYS_PAR_EL1_Overlay BIT(14) +#define SYS_PAR_EL1_DirtyBit BIT(15) +#define SYS_PAR_EL1_F1_IMPDEF GENMASK_ULL(63, 48) +#define SYS_PAR_EL1_F1_RES0 (BIT(7) | BIT(10) | GENMASK_ULL(47, 16)) +#define SYS_PAR_EL1_RES1 BIT(11) +/* When PAR_EL1.F == 0 */ +#define SYS_PAR_EL1_SH GENMASK_ULL(8, 7) +#define SYS_PAR_EL1_NS BIT(9) +#define SYS_PAR_EL1_F0_IMPDEF BIT(10) +#define SYS_PAR_EL1_NSE BIT(11) +#define SYS_PAR_EL1_PA GENMASK_ULL(51, 12) +#define SYS_PAR_EL1_ATTR GENMASK_ULL(63, 56) +#define SYS_PAR_EL1_F0_RES0 (GENMASK_ULL(6, 1) | GENMASK_ULL(55, 52)) /*** Statistical Profiling Extension ***/ #define PMSEVFR_EL1_RES0_IMP \ @@ -274,6 +410,8 @@ #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) +#define SYS_ACCDATA_EL1 sys_reg(3, 0, 13, 0, 5) + #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) @@ -286,7 +424,6 @@ #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) #define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3) #define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4) -#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5) #define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6) #define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7) #define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0) @@ -369,6 +506,7 @@ #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) #define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1) +#define SYS_SCTLR2_EL2 sys_reg(3, 4, 1, 0, 3) #define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0) #define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1) #define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2) @@ -382,12 +520,15 @@ #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) -#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) -#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) +#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) #define SYS_SP_EL1 sys_reg(3, 4, 4, 1, 0) +#define SYS_SPSR_irq sys_reg(3, 4, 4, 3, 0) +#define SYS_SPSR_abt sys_reg(3, 4, 4, 3, 1) +#define SYS_SPSR_und sys_reg(3, 4, 4, 3, 2) +#define SYS_SPSR_fiq sys_reg(3, 4, 4, 3, 3) #define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) #define SYS_AFSR0_EL2 sys_reg(3, 4, 5, 1, 0) #define SYS_AFSR1_EL2 sys_reg(3, 4, 5, 1, 1) @@ -449,24 +590,49 @@ #define SYS_CONTEXTIDR_EL2 sys_reg(3, 4, 13, 0, 1) #define SYS_TPIDR_EL2 sys_reg(3, 4, 13, 0, 2) +#define SYS_SCXTNUM_EL2 sys_reg(3, 4, 13, 0, 7) + +#define __AMEV_op2(m) (m & 0x7) +#define __AMEV_CRm(n, m) (n | ((m & 0x8) >> 3)) +#define __SYS__AMEVCNTVOFF0n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0x8, m), __AMEV_op2(m)) +#define SYS_AMEVCNTVOFF0n_EL2(m) __SYS__AMEVCNTVOFF0n_EL2(m) +#define __SYS__AMEVCNTVOFF1n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0xA, m), __AMEV_op2(m)) +#define SYS_AMEVCNTVOFF1n_EL2(m) __SYS__AMEVCNTVOFF1n_EL2(m) #define SYS_CNTVOFF_EL2 sys_reg(3, 4, 14, 0, 3) #define SYS_CNTHCTL_EL2 sys_reg(3, 4, 14, 1, 0) +#define SYS_CNTHP_TVAL_EL2 sys_reg(3, 4, 14, 2, 0) +#define SYS_CNTHP_CTL_EL2 sys_reg(3, 4, 14, 2, 1) +#define SYS_CNTHP_CVAL_EL2 sys_reg(3, 4, 14, 2, 2) +#define SYS_CNTHV_TVAL_EL2 sys_reg(3, 4, 14, 3, 0) +#define SYS_CNTHV_CTL_EL2 sys_reg(3, 4, 14, 3, 1) +#define SYS_CNTHV_CVAL_EL2 sys_reg(3, 4, 14, 3, 2) /* VHE encodings for architectural EL0/1 system registers */ +#define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0) #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) +#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) +#define SYS_SCTLR2_EL12 sys_reg(3, 5, 1, 0, 3) +#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) +#define SYS_TRFCR_EL12 sys_reg(3, 5, 1, 2, 1) +#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) +#define SYS_TCR2_EL12 sys_reg(3, 5, 2, 0, 3) #define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0) #define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1) #define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0) #define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) #define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) +#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) +#define SYS_PMSCR_EL12 sys_reg(3, 5, 9, 9, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) +#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) +#define SYS_SCXTNUM_EL12 sys_reg(3, 5, 13, 0, 7) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) #define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1) @@ -477,6 +643,183 @@ #define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0) +/* AT instructions */ +#define AT_Op0 1 +#define AT_CRn 7 + +#define OP_AT_S1E1R sys_insn(AT_Op0, 0, AT_CRn, 8, 0) +#define OP_AT_S1E1W sys_insn(AT_Op0, 0, AT_CRn, 8, 1) +#define OP_AT_S1E0R sys_insn(AT_Op0, 0, AT_CRn, 8, 2) +#define OP_AT_S1E0W sys_insn(AT_Op0, 0, AT_CRn, 8, 3) +#define OP_AT_S1E1RP sys_insn(AT_Op0, 0, AT_CRn, 9, 0) +#define OP_AT_S1E1WP sys_insn(AT_Op0, 0, AT_CRn, 9, 1) +#define OP_AT_S1E1A sys_insn(AT_Op0, 0, AT_CRn, 9, 2) +#define OP_AT_S1E2R sys_insn(AT_Op0, 4, AT_CRn, 8, 0) +#define OP_AT_S1E2W sys_insn(AT_Op0, 4, AT_CRn, 8, 1) +#define OP_AT_S12E1R sys_insn(AT_Op0, 4, AT_CRn, 8, 4) +#define OP_AT_S12E1W sys_insn(AT_Op0, 4, AT_CRn, 8, 5) +#define OP_AT_S12E0R sys_insn(AT_Op0, 4, AT_CRn, 8, 6) +#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7) +#define OP_AT_S1E2A sys_insn(AT_Op0, 4, AT_CRn, 9, 2) + +/* TLBI instructions */ +#define TLBI_Op0 1 + +#define TLBI_Op1_EL1 0 /* Accessible from EL1 or higher */ +#define TLBI_Op1_EL2 4 /* Accessible from EL2 or higher */ + +#define TLBI_CRn_XS 8 /* Extra Slow (the common one) */ +#define TLBI_CRn_nXS 9 /* not Extra Slow (which nobody uses)*/ + +#define TLBI_CRm_IPAIS 0 /* S2 Inner-Shareable */ +#define TLBI_CRm_nROS 1 /* non-Range, Outer-Sharable */ +#define TLBI_CRm_RIS 2 /* Range, Inner-Sharable */ +#define TLBI_CRm_nRIS 3 /* non-Range, Inner-Sharable */ +#define TLBI_CRm_IPAONS 4 /* S2 Outer and Non-Shareable */ +#define TLBI_CRm_ROS 5 /* Range, Outer-Sharable */ +#define TLBI_CRm_RNS 6 /* Range, Non-Sharable */ +#define TLBI_CRm_nRNS 7 /* non-Range, Non-Sharable */ + +#define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0) +#define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1) +#define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2) +#define OP_TLBI_VAAE1OS sys_insn(1, 0, 8, 1, 3) +#define OP_TLBI_VALE1OS sys_insn(1, 0, 8, 1, 5) +#define OP_TLBI_VAALE1OS sys_insn(1, 0, 8, 1, 7) +#define OP_TLBI_RVAE1IS sys_insn(1, 0, 8, 2, 1) +#define OP_TLBI_RVAAE1IS sys_insn(1, 0, 8, 2, 3) +#define OP_TLBI_RVALE1IS sys_insn(1, 0, 8, 2, 5) +#define OP_TLBI_RVAALE1IS sys_insn(1, 0, 8, 2, 7) +#define OP_TLBI_VMALLE1IS sys_insn(1, 0, 8, 3, 0) +#define OP_TLBI_VAE1IS sys_insn(1, 0, 8, 3, 1) +#define OP_TLBI_ASIDE1IS sys_insn(1, 0, 8, 3, 2) +#define OP_TLBI_VAAE1IS sys_insn(1, 0, 8, 3, 3) +#define OP_TLBI_VALE1IS sys_insn(1, 0, 8, 3, 5) +#define OP_TLBI_VAALE1IS sys_insn(1, 0, 8, 3, 7) +#define OP_TLBI_RVAE1OS sys_insn(1, 0, 8, 5, 1) +#define OP_TLBI_RVAAE1OS sys_insn(1, 0, 8, 5, 3) +#define OP_TLBI_RVALE1OS sys_insn(1, 0, 8, 5, 5) +#define OP_TLBI_RVAALE1OS sys_insn(1, 0, 8, 5, 7) +#define OP_TLBI_RVAE1 sys_insn(1, 0, 8, 6, 1) +#define OP_TLBI_RVAAE1 sys_insn(1, 0, 8, 6, 3) +#define OP_TLBI_RVALE1 sys_insn(1, 0, 8, 6, 5) +#define OP_TLBI_RVAALE1 sys_insn(1, 0, 8, 6, 7) +#define OP_TLBI_VMALLE1 sys_insn(1, 0, 8, 7, 0) +#define OP_TLBI_VAE1 sys_insn(1, 0, 8, 7, 1) +#define OP_TLBI_ASIDE1 sys_insn(1, 0, 8, 7, 2) +#define OP_TLBI_VAAE1 sys_insn(1, 0, 8, 7, 3) +#define OP_TLBI_VALE1 sys_insn(1, 0, 8, 7, 5) +#define OP_TLBI_VAALE1 sys_insn(1, 0, 8, 7, 7) +#define OP_TLBI_VMALLE1OSNXS sys_insn(1, 0, 9, 1, 0) +#define OP_TLBI_VAE1OSNXS sys_insn(1, 0, 9, 1, 1) +#define OP_TLBI_ASIDE1OSNXS sys_insn(1, 0, 9, 1, 2) +#define OP_TLBI_VAAE1OSNXS sys_insn(1, 0, 9, 1, 3) +#define OP_TLBI_VALE1OSNXS sys_insn(1, 0, 9, 1, 5) +#define OP_TLBI_VAALE1OSNXS sys_insn(1, 0, 9, 1, 7) +#define OP_TLBI_RVAE1ISNXS sys_insn(1, 0, 9, 2, 1) +#define OP_TLBI_RVAAE1ISNXS sys_insn(1, 0, 9, 2, 3) +#define OP_TLBI_RVALE1ISNXS sys_insn(1, 0, 9, 2, 5) +#define OP_TLBI_RVAALE1ISNXS sys_insn(1, 0, 9, 2, 7) +#define OP_TLBI_VMALLE1ISNXS sys_insn(1, 0, 9, 3, 0) +#define OP_TLBI_VAE1ISNXS sys_insn(1, 0, 9, 3, 1) +#define OP_TLBI_ASIDE1ISNXS sys_insn(1, 0, 9, 3, 2) +#define OP_TLBI_VAAE1ISNXS sys_insn(1, 0, 9, 3, 3) +#define OP_TLBI_VALE1ISNXS sys_insn(1, 0, 9, 3, 5) +#define OP_TLBI_VAALE1ISNXS sys_insn(1, 0, 9, 3, 7) +#define OP_TLBI_RVAE1OSNXS sys_insn(1, 0, 9, 5, 1) +#define OP_TLBI_RVAAE1OSNXS sys_insn(1, 0, 9, 5, 3) +#define OP_TLBI_RVALE1OSNXS sys_insn(1, 0, 9, 5, 5) +#define OP_TLBI_RVAALE1OSNXS sys_insn(1, 0, 9, 5, 7) +#define OP_TLBI_RVAE1NXS sys_insn(1, 0, 9, 6, 1) +#define OP_TLBI_RVAAE1NXS sys_insn(1, 0, 9, 6, 3) +#define OP_TLBI_RVALE1NXS sys_insn(1, 0, 9, 6, 5) +#define OP_TLBI_RVAALE1NXS sys_insn(1, 0, 9, 6, 7) +#define OP_TLBI_VMALLE1NXS sys_insn(1, 0, 9, 7, 0) +#define OP_TLBI_VAE1NXS sys_insn(1, 0, 9, 7, 1) +#define OP_TLBI_ASIDE1NXS sys_insn(1, 0, 9, 7, 2) +#define OP_TLBI_VAAE1NXS sys_insn(1, 0, 9, 7, 3) +#define OP_TLBI_VALE1NXS sys_insn(1, 0, 9, 7, 5) +#define OP_TLBI_VAALE1NXS sys_insn(1, 0, 9, 7, 7) +#define OP_TLBI_IPAS2E1IS sys_insn(1, 4, 8, 0, 1) +#define OP_TLBI_RIPAS2E1IS sys_insn(1, 4, 8, 0, 2) +#define OP_TLBI_IPAS2LE1IS sys_insn(1, 4, 8, 0, 5) +#define OP_TLBI_RIPAS2LE1IS sys_insn(1, 4, 8, 0, 6) +#define OP_TLBI_ALLE2OS sys_insn(1, 4, 8, 1, 0) +#define OP_TLBI_VAE2OS sys_insn(1, 4, 8, 1, 1) +#define OP_TLBI_ALLE1OS sys_insn(1, 4, 8, 1, 4) +#define OP_TLBI_VALE2OS sys_insn(1, 4, 8, 1, 5) +#define OP_TLBI_VMALLS12E1OS sys_insn(1, 4, 8, 1, 6) +#define OP_TLBI_RVAE2IS sys_insn(1, 4, 8, 2, 1) +#define OP_TLBI_RVALE2IS sys_insn(1, 4, 8, 2, 5) +#define OP_TLBI_ALLE2IS sys_insn(1, 4, 8, 3, 0) +#define OP_TLBI_VAE2IS sys_insn(1, 4, 8, 3, 1) +#define OP_TLBI_ALLE1IS sys_insn(1, 4, 8, 3, 4) +#define OP_TLBI_VALE2IS sys_insn(1, 4, 8, 3, 5) +#define OP_TLBI_VMALLS12E1IS sys_insn(1, 4, 8, 3, 6) +#define OP_TLBI_IPAS2E1OS sys_insn(1, 4, 8, 4, 0) +#define OP_TLBI_IPAS2E1 sys_insn(1, 4, 8, 4, 1) +#define OP_TLBI_RIPAS2E1 sys_insn(1, 4, 8, 4, 2) +#define OP_TLBI_RIPAS2E1OS sys_insn(1, 4, 8, 4, 3) +#define OP_TLBI_IPAS2LE1OS sys_insn(1, 4, 8, 4, 4) +#define OP_TLBI_IPAS2LE1 sys_insn(1, 4, 8, 4, 5) +#define OP_TLBI_RIPAS2LE1 sys_insn(1, 4, 8, 4, 6) +#define OP_TLBI_RIPAS2LE1OS sys_insn(1, 4, 8, 4, 7) +#define OP_TLBI_RVAE2OS sys_insn(1, 4, 8, 5, 1) +#define OP_TLBI_RVALE2OS sys_insn(1, 4, 8, 5, 5) +#define OP_TLBI_RVAE2 sys_insn(1, 4, 8, 6, 1) +#define OP_TLBI_RVALE2 sys_insn(1, 4, 8, 6, 5) +#define OP_TLBI_ALLE2 sys_insn(1, 4, 8, 7, 0) +#define OP_TLBI_VAE2 sys_insn(1, 4, 8, 7, 1) +#define OP_TLBI_ALLE1 sys_insn(1, 4, 8, 7, 4) +#define OP_TLBI_VALE2 sys_insn(1, 4, 8, 7, 5) +#define OP_TLBI_VMALLS12E1 sys_insn(1, 4, 8, 7, 6) +#define OP_TLBI_IPAS2E1ISNXS sys_insn(1, 4, 9, 0, 1) +#define OP_TLBI_RIPAS2E1ISNXS sys_insn(1, 4, 9, 0, 2) +#define OP_TLBI_IPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 5) +#define OP_TLBI_RIPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 6) +#define OP_TLBI_ALLE2OSNXS sys_insn(1, 4, 9, 1, 0) +#define OP_TLBI_VAE2OSNXS sys_insn(1, 4, 9, 1, 1) +#define OP_TLBI_ALLE1OSNXS sys_insn(1, 4, 9, 1, 4) +#define OP_TLBI_VALE2OSNXS sys_insn(1, 4, 9, 1, 5) +#define OP_TLBI_VMALLS12E1OSNXS sys_insn(1, 4, 9, 1, 6) +#define OP_TLBI_RVAE2ISNXS sys_insn(1, 4, 9, 2, 1) +#define OP_TLBI_RVALE2ISNXS sys_insn(1, 4, 9, 2, 5) +#define OP_TLBI_ALLE2ISNXS sys_insn(1, 4, 9, 3, 0) +#define OP_TLBI_VAE2ISNXS sys_insn(1, 4, 9, 3, 1) +#define OP_TLBI_ALLE1ISNXS sys_insn(1, 4, 9, 3, 4) +#define OP_TLBI_VALE2ISNXS sys_insn(1, 4, 9, 3, 5) +#define OP_TLBI_VMALLS12E1ISNXS sys_insn(1, 4, 9, 3, 6) +#define OP_TLBI_IPAS2E1OSNXS sys_insn(1, 4, 9, 4, 0) +#define OP_TLBI_IPAS2E1NXS sys_insn(1, 4, 9, 4, 1) +#define OP_TLBI_RIPAS2E1NXS sys_insn(1, 4, 9, 4, 2) +#define OP_TLBI_RIPAS2E1OSNXS sys_insn(1, 4, 9, 4, 3) +#define OP_TLBI_IPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 4) +#define OP_TLBI_IPAS2LE1NXS sys_insn(1, 4, 9, 4, 5) +#define OP_TLBI_RIPAS2LE1NXS sys_insn(1, 4, 9, 4, 6) +#define OP_TLBI_RIPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 7) +#define OP_TLBI_RVAE2OSNXS sys_insn(1, 4, 9, 5, 1) +#define OP_TLBI_RVALE2OSNXS sys_insn(1, 4, 9, 5, 5) +#define OP_TLBI_RVAE2NXS sys_insn(1, 4, 9, 6, 1) +#define OP_TLBI_RVALE2NXS sys_insn(1, 4, 9, 6, 5) +#define OP_TLBI_ALLE2NXS sys_insn(1, 4, 9, 7, 0) +#define OP_TLBI_VAE2NXS sys_insn(1, 4, 9, 7, 1) +#define OP_TLBI_ALLE1NXS sys_insn(1, 4, 9, 7, 4) +#define OP_TLBI_VALE2NXS sys_insn(1, 4, 9, 7, 5) +#define OP_TLBI_VMALLS12E1NXS sys_insn(1, 4, 9, 7, 6) + +/* Misc instructions */ +#define OP_GCSPUSHX sys_insn(1, 0, 7, 7, 4) +#define OP_GCSPOPCX sys_insn(1, 0, 7, 7, 5) +#define OP_GCSPOPX sys_insn(1, 0, 7, 7, 6) +#define OP_GCSPUSHM sys_insn(1, 3, 7, 7, 0) + +#define OP_BRB_IALL sys_insn(1, 1, 7, 2, 4) +#define OP_BRB_INJ sys_insn(1, 1, 7, 2, 5) +#define OP_CFP_RCTX sys_insn(1, 3, 7, 3, 4) +#define OP_DVP_RCTX sys_insn(1, 3, 7, 3, 5) +#define OP_COSP_RCTX sys_insn(1, 3, 7, 3, 6) +#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7) + /* Common SCTLR_ELx flags. */ #define SCTLR_ELx_ENTP2 (BIT(60)) #define SCTLR_ELx_DSSBS (BIT(44)) @@ -555,16 +898,14 @@ /* Position the attr at the correct index */ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) -/* id_aa64pfr0 */ -#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 -#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 - /* id_aa64mmfr0 */ #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1 +#define ID_AA64MMFR0_EL1_TGRAN16_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf #define ARM64_MIN_PARANGE_BITS 32 @@ -572,6 +913,7 @@ #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7 #ifdef CONFIG_ARM64_PA_BITS_52 @@ -582,11 +924,13 @@ #if defined(CONFIG_ARM64_4K_PAGES) #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT #elif defined(CONFIG_ARM64_16K_PAGES) #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT @@ -610,6 +954,19 @@ #define SYS_GCR_EL1_RRND (BIT(16)) #define SYS_GCR_EL1_EXCL_MASK 0xffffUL +#ifdef CONFIG_KASAN_HW_TAGS +/* + * KASAN always uses a whole byte for its tags. With CONFIG_KASAN_HW_TAGS it + * only uses tags in the range 0xF0-0xFF, which we map to MTE tags 0x0-0xF. + */ +#define __MTE_TAG_MIN (KASAN_TAG_MIN & 0xf) +#define __MTE_TAG_MAX (KASAN_TAG_MAX & 0xf) +#define __MTE_TAG_INCL GENMASK(__MTE_TAG_MAX, __MTE_TAG_MIN) +#define KERNEL_GCR_EL1_EXCL (SYS_GCR_EL1_EXCL_MASK & ~__MTE_TAG_INCL) +#else +#define KERNEL_GCR_EL1_EXCL SYS_GCR_EL1_EXCL_MASK +#endif + #define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL) /* RGSR_EL1 Definitions */ @@ -716,6 +1073,22 @@ #define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4)) +/* + * Permission Overlay Extension (POE) permission encodings. + */ +#define POE_NONE UL(0x0) +#define POE_R UL(0x1) +#define POE_X UL(0x2) +#define POE_RX UL(0x3) +#define POE_W UL(0x4) +#define POE_RW UL(0x5) +#define POE_XW UL(0x6) +#define POE_RXW UL(0x7) +#define POE_MASK UL(0xf) + +/* Initial value for Permission Overlay Extension for EL0 */ +#define POR_EL0_INIT POE_RXW + #define ARM64_FEATURE_FIELD_BITS 4 /* Defined for compatibility only, do not add new users. */ @@ -789,15 +1162,21 @@ /* * For registers without architectural names, or simply unsupported by * GAS. + * + * __check_r forces warnings to be generated by the compiler when + * evaluating r which wouldn't normally happen due to being passed to + * the assembler via __stringify(r). */ #define read_sysreg_s(r) ({ \ u64 __val; \ + u32 __maybe_unused __check_r = (u32)(r); \ asm volatile(__mrs_s("%0", r) : "=r" (__val)); \ __val; \ }) #define write_sysreg_s(v, r) do { \ u64 __val = (u64)(v); \ + u32 __maybe_unused __check_r = (u32)(r); \ asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \ } while (0) @@ -827,6 +1206,8 @@ par; \ }) +#define SYS_FIELD_VALUE(reg, field, val) reg##_##field##_##val + #define SYS_FIELD_GET(reg, field, val) \ FIELD_GET(reg##_##field##_MASK, val) @@ -834,7 +1215,8 @@ FIELD_PREP(reg##_##field##_MASK, val) #define SYS_FIELD_PREP_ENUM(reg, field, val) \ - FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val) + FIELD_PREP(reg##_##field##_MASK, \ + SYS_FIELD_VALUE(reg, field, val)) #endif diff --git a/tools/include/linux/kasan-tags.h b/tools/include/linux/kasan-tags.h new file mode 100644 index 000000000000..4f85f562512c --- /dev/null +++ b/tools/include/linux/kasan-tags.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_TAGS_H +#define _LINUX_KASAN_TAGS_H + +#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */ +#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ +#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ + +#ifdef CONFIG_KASAN_HW_TAGS +#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */ +#else +#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */ +#endif + +#endif /* LINUX_KASAN_TAGS_H */ -- Gitee From 6d3dcfea1b8fb9d0dc3a957922a73449f974c67c Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 6 Jan 2025 14:24:38 +0000 Subject: [PATCH 61/75] arm64/sysreg/tools: Move TRFCR definitions to sysreg ANBZ: #25252 commit c382ee674c8b5005798606267d660cf995218b18 upstream. Convert TRFCR to automatic generation. Add separate definitions for ELx and EL2 as TRFCR_EL1 doesn't have CX. This also mirrors the previous definition so no code change is required. Also add TRFCR_EL12 which will start to be used in a later commit. Unfortunately, to avoid breaking the Perf build with duplicate definition errors, the tools copy of the sysreg.h header needs to be updated at the same time rather than the usual second commit. This is because the generated version of sysreg (arch/arm64/include/generated/asm/sysreg-defs.h), is currently shared and tools/ does not have its own copy. Reviewed-by: Mark Brown Signed-off-by: James Clark Signed-off-by: James Clark Link: https://lore.kernel.org/r/20250106142446.628923-4-james.clark@linaro.org Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/include/asm/sysreg.h | 12 --------- arch/arm64/tools/sysreg | 36 +++++++++++++++++++++++++++ tools/arch/arm64/include/asm/sysreg.h | 12 --------- 3 files changed, 36 insertions(+), 24 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 47be326abec1..a546d675c94a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -278,8 +278,6 @@ #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) -#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) - #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) #define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) @@ -495,7 +493,6 @@ #define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) -#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) @@ -911,15 +908,6 @@ /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) -#define TRFCR_ELx_TS_SHIFT 5 -#define TRFCR_ELx_TS_MASK ((0x3UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_EL2_CX BIT(3) -#define TRFCR_ELx_ExTRE BIT(1) -#define TRFCR_ELx_E0TRE BIT(0) - /* GIC Hypervisor interface registers */ /* ICH_MISR_EL2 bit definitions */ #define ICH_MISR_EOI (1 << 0) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index caf16917445a..106c84e72a2b 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1836,6 +1836,22 @@ Sysreg CPACR_EL1 3 0 1 0 2 Fields CPACR_ELx EndSysreg +SysregFields TRFCR_ELx +Res0 63:7 +UnsignedEnum 6:5 TS + 0b0001 VIRTUAL + 0b0010 GUEST_PHYSICAL + 0b0011 PHYSICAL +EndEnum +Res0 4:2 +Field 1 ExTRE +Field 0 E0TRE +EndSysregFields + +Sysreg TRFCR_EL1 3 0 1 2 1 +Fields TRFCR_ELx +EndSysreg + Sysreg SMPRI_EL1 3 0 1 2 4 Res0 63:4 Field 3:0 PRIORITY @@ -2373,6 +2389,22 @@ Field 1 ICIALLU Field 0 ICIALLUIS EndSysreg +Sysreg TRFCR_EL2 3 4 1 2 1 +Res0 63:7 +UnsignedEnum 6:5 TS + 0b0000 USE_TRFCR_EL1_TS + 0b0001 VIRTUAL + 0b0010 GUEST_PHYSICAL + 0b0011 PHYSICAL +EndEnum +Res0 4 +Field 3 CX +Res0 2 +Field 1 E2TRE +Field 0 E0HTRE +EndSysreg + + Sysreg HDFGRTR_EL2 3 4 3 1 4 Field 63 PMBIDR_EL1 Field 62 nPMSNEVFR_EL1 @@ -2610,6 +2642,10 @@ Sysreg ZCR_EL12 3 5 1 2 0 Fields ZCR_ELx EndSysreg +Sysreg TRFCR_EL12 3 5 1 2 1 +Fields TRFCR_ELx +EndSysreg + Sysreg SMCR_EL12 3 5 1 2 6 Fields SMCR_ELx EndSysreg diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h index b35396d3fff1..2817a14ef681 100644 --- a/tools/arch/arm64/include/asm/sysreg.h +++ b/tools/arch/arm64/include/asm/sysreg.h @@ -283,8 +283,6 @@ #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) -#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) - #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) #define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) @@ -519,7 +517,6 @@ #define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) -#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) @@ -983,15 +980,6 @@ /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) -#define TRFCR_ELx_TS_SHIFT 5 -#define TRFCR_ELx_TS_MASK ((0x3UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT) -#define TRFCR_EL2_CX BIT(3) -#define TRFCR_ELx_ExTRE BIT(1) -#define TRFCR_ELx_E0TRE BIT(0) - /* GIC Hypervisor interface registers */ /* ICH_MISR_EL2 bit definitions */ #define ICH_MISR_EOI (1 << 0) -- Gitee From 4b93d50d45da877efab8aaeddb80fff9c5d4a987 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 6 Jan 2025 14:24:39 +0000 Subject: [PATCH 62/75] coresight: trbe: Remove redundant disable call ANBZ: #25252 commit a2b579c41fe9c295804abd167751f9fdc73c7006 upstream. trbe_drain_and_disable_local() just clears TRBLIMITR and drains. TRBLIMITR is already cleared on the next line after this call, so replace it with only drain. This is so we can make a kvm call that has a preempt enabled warning from set_trbe_disabled() in the next commit, where trbe_reset_local() is called from a preemptible hotplug path. Signed-off-by: James Clark Link: https://lore.kernel.org/r/20250106142446.628923-5-james.clark@linaro.org Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- drivers/hwtracing/coresight/coresight-trbe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index a3954be7b1f3..9ff2ada74952 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -252,8 +252,8 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata) static void trbe_reset_local(struct trbe_cpudata *cpudata) { - trbe_drain_and_disable_local(cpudata); write_sysreg_s(0, SYS_TRBLIMITR_EL1); + trbe_drain_buffer(); write_sysreg_s(0, SYS_TRBPTR_EL1); write_sysreg_s(0, SYS_TRBBASER_EL1); write_sysreg_s(0, SYS_TRBSR_EL1); -- Gitee From ede43d4162f8d470453c36fcf3816af27d1c3c8f Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 6 Jan 2025 14:24:40 +0000 Subject: [PATCH 63/75] KVM: arm64: coresight: Give TRBE enabled state to KVM ANBZ: #25252 commit a665e3bc88081dd65642d83fc22a1abdb6a901bc upstream. Currently in nVHE, KVM has to check if TRBE is enabled on every guest switch even if it was never used. Because it's a debug feature and is more likely to not be used than used, give KVM the TRBE buffer status to allow a much simpler and faster do-nothing path in the hyp. Protected mode now disables trace regardless of TRBE (because trfcr_while_in_guest is always 0), which was not previously done. However, it continues to flush whenever the buffer is enabled regardless of the filter status. This avoids the hypothetical case of a host that had disabled the filter but not flushed which would arise if only doing the flush when the filter was enabled. Signed-off-by: James Clark Link: https://lore.kernel.org/r/20250106142446.628923-6-james.clark@linaro.org Signed-off-by: Marc Zyngier Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_host.h | 9 +++ arch/arm64/kvm/debug.c | 32 +++++++++- arch/arm64/kvm/hyp/nvhe/debug-sr.c | 63 ++++++++++++-------- drivers/hwtracing/coresight/coresight-trbe.c | 3 + 4 files changed, 79 insertions(+), 28 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 854c2ea30200..a06bb38d8008 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -450,6 +450,8 @@ struct kvm_host_data { #define KVM_HOST_DATA_FLAG_HAS_TRBE 1 #define KVM_HOST_DATA_FLAG_HOST_SVE_ENABLED 2 #define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3 +#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4 +#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 unsigned long flags; struct kvm_cpu_context host_ctxt; @@ -477,6 +479,9 @@ struct kvm_host_data { u64 mdcr_el2; } host_debug_state; + /* Guest trace filter value */ + u64 trfcr_while_in_guest; + /* Number of programmable event counters (PMCR_EL0.N) for this CPU */ unsigned int nr_event_counters; }; @@ -1185,6 +1190,8 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u64 clr); bool kvm_set_pmuserenr(u64 val); +void kvm_enable_trbe(void); +void kvm_disable_trbe(void); #else static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u64 clr) {} @@ -1192,6 +1199,8 @@ static inline bool kvm_set_pmuserenr(u64 val) { return false; } +static inline void kvm_enable_trbe(void) {} +static inline void kvm_disable_trbe(void) {} #endif void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 6e55eb1cca72..28eb9f17ea7a 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -308,7 +308,33 @@ void kvm_init_host_debug_data(void) !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P)) host_data_set_flag(HAS_SPE); - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) && - !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P)) - host_data_set_flag(HAS_TRBE); + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) { + /* Force disable trace in protected mode in case of no TRBE */ + if (is_protected_kvm_enabled()) + host_data_set_flag(EL1_TRACING_CONFIGURED); + + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) && + !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P)) + host_data_set_flag(HAS_TRBE); + } +} + +void kvm_enable_trbe(void) +{ + if (has_vhe() || is_protected_kvm_enabled() || + WARN_ON_ONCE(preemptible())) + return; + + host_data_set_flag(TRBE_ENABLED); +} +EXPORT_SYMBOL_GPL(kvm_enable_trbe); + +void kvm_disable_trbe(void) +{ + if (has_vhe() || is_protected_kvm_enabled() || + WARN_ON_ONCE(preemptible())) + return; + + host_data_clear_flag(TRBE_ENABLED); } +EXPORT_SYMBOL_GPL(kvm_disable_trbe); diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index 762db8db0466..dd4184d81611 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -51,32 +51,45 @@ static void __debug_restore_spe(u64 pmscr_el1) write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1); } -static void __debug_save_trace(u64 *trfcr_el1) +static void __trace_do_switch(u64 *saved_trfcr, u64 new_trfcr) { - *trfcr_el1 = 0; + *saved_trfcr = read_sysreg_el1(SYS_TRFCR); + write_sysreg_el1(new_trfcr, SYS_TRFCR); +} - /* Check if the TRBE is enabled */ - if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E)) - return; - /* - * Prohibit trace generation while we are in guest. - * Since access to TRFCR_EL1 is trapped, the guest can't - * modify the filtering set by the host. - */ - *trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1); - write_sysreg_s(0, SYS_TRFCR_EL1); - isb(); - /* Drain the trace buffer to memory */ - tsb_csync(); +static bool __trace_needs_drain(void) +{ + if (is_protected_kvm_enabled() && host_data_test_flag(HAS_TRBE)) + return read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E; + + return host_data_test_flag(TRBE_ENABLED); } -static void __debug_restore_trace(u64 trfcr_el1) +static bool __trace_needs_switch(void) { - if (!trfcr_el1) - return; + return host_data_test_flag(TRBE_ENABLED) || + host_data_test_flag(EL1_TRACING_CONFIGURED); +} + +static void __trace_switch_to_guest(void) +{ + /* Unsupported with TRBE so disable */ + if (host_data_test_flag(TRBE_ENABLED)) + *host_data_ptr(trfcr_while_in_guest) = 0; + + __trace_do_switch(host_data_ptr(host_debug_state.trfcr_el1), + *host_data_ptr(trfcr_while_in_guest)); - /* Restore trace filter controls */ - write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1); + if (__trace_needs_drain()) { + isb(); + tsb_csync(); + } +} + +static void __trace_switch_to_host(void) +{ + __trace_do_switch(host_data_ptr(trfcr_while_in_guest), + *host_data_ptr(host_debug_state.trfcr_el1)); } void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) @@ -84,9 +97,9 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) /* Disable and flush SPE data generation */ if (host_data_test_flag(HAS_SPE)) __debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1)); - /* Disable and flush Self-Hosted Trace generation */ - if (host_data_test_flag(HAS_TRBE)) - __debug_save_trace(host_data_ptr(host_debug_state.trfcr_el1)); + + if (__trace_needs_switch()) + __trace_switch_to_guest(); } void __debug_switch_to_guest(struct kvm_vcpu *vcpu) @@ -98,8 +111,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) { if (host_data_test_flag(HAS_SPE)) __debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1)); - if (host_data_test_flag(HAS_TRBE)) - __debug_restore_trace(*host_data_ptr(host_debug_state.trfcr_el1)); + if (__trace_needs_switch()) + __trace_switch_to_host(); } void __debug_switch_to_host(struct kvm_vcpu *vcpu) diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 9ff2ada74952..ad6ee763c4a8 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -17,6 +17,7 @@ #include #include +#include #include "coresight-self-hosted-trace.h" #include "coresight-trbe.h" @@ -220,6 +221,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr) */ trblimitr |= TRBLIMITR_EL1_E; write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + kvm_enable_trbe(); /* Synchronize the TRBE enable event */ isb(); @@ -238,6 +240,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata) */ trblimitr &= ~TRBLIMITR_EL1_E; write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + kvm_disable_trbe(); if (trbe_needs_drain_after_disable(cpudata)) trbe_drain_buffer(); -- Gitee From 0c4ee933e0f24e68a89bf3703fd6d6f538f2e85e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 14 May 2025 11:34:49 +0100 Subject: [PATCH 64/75] KVM: arm64: nv: Don't adjust PSTATE.M when L2 is nesting ANBZ: #25252 commit bd914a981446df475be27ef9c5e86961e6f39c5a upstream. We currently check for HCR_EL2.NV being set to decide whether we need to repaint PSTATE.M to say EL2 instead of EL1 on exit. However, this isn't correct when L2 is itself a hypervisor, and that L1 as set its own HCR_EL2.NV. That's because we "flatten" the state and inherit parts of the guest's own setup. In that case, we shouldn't adjust PSTATE.M, as this is really EL1 for both us and the guest. Instead of trying to try and work out how we ended-up with HCR_EL2.NV being set by introspecting both the host and guest states, use a per-CPU flag to remember the context (HYP or not), and use that information to decide whether PSTATE needs tweaking. Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20250514103501.2225951-7-maz@kernel.org Signed-off-by: Marc Zyngier [KVM: arm64: nv: Drop the changes for compute_hcr ] Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/hyp/vhe/switch.c | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a06bb38d8008..a02ce026b8dc 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -452,6 +452,7 @@ struct kvm_host_data { #define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3 #define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4 #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 +#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6 unsigned long flags; struct kvm_cpu_context host_ctxt; diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index f8425843e547..b2f0f1d5f095 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -183,9 +183,12 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) /* * If we were in HYP context on entry, adjust the PSTATE view - * so that the usual helpers work correctly. + * so that the usual helpers work correctly. This enforces our + * invariant that the guest's HYP context status is preserved + * across a run. */ - if (unlikely(vcpu_get_flag(vcpu, VCPU_HYP_CONTEXT))) { + if (vcpu_has_nv(vcpu) && + unlikely(host_data_test_flag(VCPU_IN_HYP_CONTEXT))) { u64 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT); switch (mode) { @@ -201,6 +204,10 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) *vcpu_cpsr(vcpu) |= mode; } + /* Apply extreme paranoia! */ + BUG_ON(vcpu_has_nv(vcpu) && + !!host_data_test_flag(VCPU_IN_HYP_CONTEXT) != is_hyp_ctxt(vcpu)); + return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers); } -- Gitee From 858715629ec3dede1aad775a109b97fc69a60821 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 14 May 2025 11:34:53 +0100 Subject: [PATCH 65/75] KVM: arm64: nv: Handle mapping of VNCR_EL2 at EL2 ANBZ: #25252 commit 2a359e072596fcb2e9e85017a865e3618a2fe5b5 upstream. Now that we can handle faults triggered through VNCR_EL2, we need to map the corresponding page at EL2. But where, you'll ask? Since each CPU in the system can run a vcpu, we need a per-CPU mapping. For that, we carve a NR_CPUS range in the fixmap, giving us a per-CPU va at which to map the guest's VNCR's page. The mapping occurs both on vcpu load and on the back of a fault, both generating a request that will take care of the mapping. That mapping will also get dropped on vcpu put. Yes, this is a bit heavy handed, but it is simple. Eventually, we may want to have a per-VM, per-CPU mapping, which would avoid all the TLBI overhead. Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20250514103501.2225951-11-maz@kernel.org Signed-off-by: Marc Zyngier [ KVM: arm64: nv: Drop changes for nested.c ] Signed-off-by: Wei Chen --- arch/arm64/include/asm/fixmap.h | 6 ++++++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_nested.h | 7 +++++++ 3 files changed, 14 insertions(+) diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 58c294a96676..ae7eaca910de 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -48,6 +48,12 @@ enum fixed_addresses { FIX_EARLYCON_MEM_BASE, FIX_TEXT_POKE0, +#ifdef CONFIG_KVM + /* One slot per CPU, mapping the guest's VNCR page at EL2. */ + FIX_VNCR_END, + FIX_VNCR = FIX_VNCR_END + NR_CPUS, +#endif + #ifdef CONFIG_ACPI_APEI_GHES /* Used for GHES mapping from assorted contexts */ FIX_APEI_GHES_IRQ, diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a02ce026b8dc..cc03e86cafbe 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -453,6 +453,7 @@ struct kvm_host_data { #define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4 #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 #define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6 +#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7 unsigned long flags; struct kvm_cpu_context host_ctxt; diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index fa23cc9c2adc..b59d402cf88f 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -19,4 +19,11 @@ struct sys_reg_desc; void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p, const struct sys_reg_desc *r); +#define vncr_fixmap(c) \ + ({ \ + u32 __c = (c); \ + BUG_ON(__c >= NR_CPUS); \ + (FIX_VNCR - __c); \ + }) + #endif /* __ARM64_KVM_NESTED_H */ -- Gitee From 61118a85e80c84d9c921df4c44ff0aea6c3a9626 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 14 Mar 2025 10:26:57 -0700 Subject: [PATCH 66/75] perf: Supply task information to sched_task() ANBZ: #25252 commit d57e94f5b891925e4f2796266eba31edd5a01903 upstream. To save/restore LBR call stack data in system-wide mode, the task_struct information is required. Extend the parameters of sched_task() to supply task_struct information. When schedule in, the LBR call stack data for new task will be restored. When schedule out, the LBR call stack data for old task will be saved. Only need to pass the required task_struct information. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250314172700.438923-4-kan.liang@linux.intel.com Signed-off-by: Wei Chen --- arch/powerpc/perf/core-book3s.c | 8 ++++++-- arch/s390/kernel/perf_pai_crypto.c | 3 ++- arch/s390/kernel/perf_pai_ext.c | 3 ++- arch/x86/events/amd/brs.c | 3 ++- arch/x86/events/amd/lbr.c | 3 ++- arch/x86/events/core.c | 5 +++-- arch/x86/events/intel/core.c | 4 ++-- arch/x86/events/intel/lbr.c | 3 ++- arch/x86/events/perf_event.h | 14 +++++++++----- include/linux/perf_event.h | 2 +- kernel/events/core.c | 20 +++++++++++--------- 11 files changed, 42 insertions(+), 26 deletions(-) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index d28ba2f45ee2..bc173628238b 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -132,7 +132,10 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) static inline void power_pmu_bhrb_enable(struct perf_event *event) {} static inline void power_pmu_bhrb_disable(struct perf_event *event) {} -static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {} +static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) +{ +} static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {} static void pmao_restore_workaround(bool ebb) { } #endif /* CONFIG_PPC32 */ @@ -451,7 +454,8 @@ static void power_pmu_bhrb_disable(struct perf_event *event) /* Called from ctxsw to prevent one process's branch entries to * mingle with the other process's entries during context switch. */ -static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { if (!ppmu->bhrb_nr) return; diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c index 044fba8332b2..b24ee5fbf958 100644 --- a/arch/s390/kernel/perf_pai_crypto.c +++ b/arch/s390/kernel/perf_pai_crypto.c @@ -378,7 +378,8 @@ static int paicrypt_push_sample(void) /* Called on schedule-in and schedule-out. No access to event structure, * but for sampling only event CRYPTO_ALL is allowed. */ -static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { /* We started with a clean page on event installation. So read out * results on schedule_out and if page was dirty, clear values. diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c index 089bd3104b39..3b6f18675ab4 100644 --- a/arch/s390/kernel/perf_pai_ext.c +++ b/arch/s390/kernel/perf_pai_ext.c @@ -467,7 +467,8 @@ static int paiext_push_sample(void) /* Called on schedule-in and schedule-out. No access to event structure, * but for sampling only event NNPA_ALL is allowed. */ -static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { /* We started with a clean page on event installation. So read out * results on schedule_out and if page was dirty, clear values. diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c index ed308719236c..f6513fd2d9ec 100644 --- a/arch/x86/events/amd/brs.c +++ b/arch/x86/events/amd/brs.c @@ -381,7 +381,8 @@ static void amd_brs_poison_buffer(void) * On ctxswin, sched_in = true, called after the PMU has started * On ctxswout, sched_in = false, called before the PMU is stopped */ -void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c index 5149830c7c4f..fc57d998726b 100644 --- a/arch/x86/events/amd/lbr.c +++ b/arch/x86/events/amd/lbr.c @@ -375,7 +375,8 @@ void amd_pmu_lbr_del(struct perf_event *event) perf_sched_cb_dec(event->pmu); } -void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 84f8c0bc15a0..196b39345d97 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2639,9 +2639,10 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { NULL, }; -static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { - static_call_cond(x86_pmu_sched_task)(pmu_ctx, sched_in); + static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in); } static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc, diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 498143b312fa..ecfd63bab24d 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5499,10 +5499,10 @@ static void intel_pmu_cpu_dead(int cpu) } static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, - bool sched_in) + struct task_struct *task, bool sched_in) { intel_pmu_pebs_sched_task(pmu_ctx, sched_in); - intel_pmu_lbr_sched_task(pmu_ctx, sched_in); + intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in); } static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc, diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 5f677447b812..91fa2154323e 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -538,7 +538,8 @@ void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, task_context_opt(next_ctx_data)->lbr_callstack_users); } -void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); void *task_ctx; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index e5237c126e4b..3ab108490a95 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -895,7 +895,7 @@ struct x86_pmu { void (*check_microcode)(void); void (*sched_task)(struct perf_event_pmu_context *pmu_ctx, - bool sched_in); + struct task_struct *task, bool sched_in); /* * Intel Arch Perfmon v2+ @@ -1430,7 +1430,8 @@ void amd_pmu_lbr_reset(void); void amd_pmu_lbr_read(void); void amd_pmu_lbr_add(struct perf_event *event); void amd_pmu_lbr_del(struct perf_event *event); -void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); +void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in); void amd_pmu_lbr_enable_all(void); void amd_pmu_lbr_disable_all(void); int amd_pmu_lbr_hw_config(struct perf_event *event); @@ -1471,7 +1472,8 @@ static inline void amd_pmu_brs_del(struct perf_event *event) perf_sched_cb_dec(event->pmu); } -void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); +void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in); #else static inline int amd_brs_init(void) { @@ -1496,7 +1498,8 @@ static inline void amd_pmu_brs_del(struct perf_event *event) { } -static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { } @@ -1681,7 +1684,8 @@ void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, struct perf_event_pmu_context *next_epc); -void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); +void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in); u64 lbr_from_signext_quirk_wr(u64 val); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1009ea4da946..4ff3ef045721 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -473,7 +473,7 @@ struct pmu { * context-switches callback */ void (*sched_task) (struct perf_event_pmu_context *pmu_ctx, - bool sched_in); + struct task_struct *task, bool sched_in); /* * Kmem cache of PMU specific data diff --git a/kernel/events/core.c b/kernel/events/core.c index e7076609fd93..7bb2c6cbb3ce 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3519,7 +3519,8 @@ static void perf_event_swap_task_ctx_data(struct perf_event_context *prev_ctx, } } -static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in) +static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, + struct task_struct *task, bool sched_in) { struct perf_event_pmu_context *pmu_ctx; struct perf_cpu_pmu_context *cpc; @@ -3528,7 +3529,7 @@ static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context); if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task) - pmu_ctx->pmu->sched_task(pmu_ctx, sched_in); + pmu_ctx->pmu->sched_task(pmu_ctx, task, sched_in); } } @@ -3586,7 +3587,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next) WRITE_ONCE(ctx->task, next); WRITE_ONCE(next_ctx->task, task); - perf_ctx_sched_task_cb(ctx, false); + perf_ctx_sched_task_cb(ctx, task, false); perf_event_swap_task_ctx_data(ctx, next_ctx); perf_ctx_enable(ctx, false); @@ -3616,7 +3617,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next) perf_ctx_disable(ctx, false); inside_switch: - perf_ctx_sched_task_cb(ctx, false); + perf_ctx_sched_task_cb(ctx, task, false); task_ctx_sched_out(ctx, EVENT_ALL); perf_ctx_enable(ctx, false); @@ -3658,7 +3659,8 @@ void perf_sched_cb_inc(struct pmu *pmu) * PEBS requires this to provide PID/TID information. This requires we flush * all queued PEBS records before we context switch to a new task. */ -static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_in) +static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, + struct task_struct *task, bool sched_in) { struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); struct pmu *pmu; @@ -3672,7 +3674,7 @@ static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_i perf_ctx_lock(cpuctx, cpuctx->task_ctx); perf_pmu_disable(pmu); - pmu->sched_task(cpc->task_epc, sched_in); + pmu->sched_task(cpc->task_epc, task, sched_in); perf_pmu_enable(pmu); perf_ctx_unlock(cpuctx, cpuctx->task_ctx); @@ -3690,7 +3692,7 @@ static void perf_pmu_sched_task(struct task_struct *prev, return; list_for_each_entry(cpc, this_cpu_ptr(&sched_cb_list), sched_cb_entry) - __perf_pmu_sched_task(cpc, sched_in); + __perf_pmu_sched_task(cpc, sched_in ? next : prev, sched_in); } static void perf_event_switch(struct task_struct *task, @@ -3994,7 +3996,7 @@ static void perf_event_context_sched_in(struct task_struct *task) perf_ctx_lock(cpuctx, ctx); perf_ctx_disable(ctx, false); - perf_ctx_sched_task_cb(ctx, true); + perf_ctx_sched_task_cb(ctx, task, true); perf_ctx_enable(ctx, false); perf_ctx_unlock(cpuctx, ctx); @@ -4025,7 +4027,7 @@ static void perf_event_context_sched_in(struct task_struct *task) perf_event_sched_in(cpuctx, ctx); - perf_ctx_sched_task_cb(cpuctx->task_ctx, true); + perf_ctx_sched_task_cb(cpuctx->task_ctx, task, true); if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) perf_ctx_enable(&cpuctx->ctx, false); -- Gitee From 1a95b96551cb4be63b4abcd05c65caf95100fd0d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 18 Feb 2025 14:39:58 -0600 Subject: [PATCH 67/75] perf: arm_pmuv3: Don't disable counter in armv8pmu_enable_event() ANBZ: #25252 commit 4b0567ad0be52b9e7a36de94193b89e94487363f upstream. Currently armv8pmu_enable_event() starts by disabling the event counter it has been asked to enable. This should not be necessary as the counter (and the PMU as a whole) should not be active when armv8pmu_enable_event() is called. Remove the redundant call to armv8pmu_disable_event_counter(). At the same time, remove the comment immeditately above as everything it says is obvious from the function names below. Signed-off-by: Mark Rutland Signed-off-by: Rob Herring (Arm) Reviewed-by: Anshuman Khandual Tested-by: James Clark Link: https://lore.kernel.org/r/20250218-arm-brbe-v19-v20-3-4e9922fc2e8e@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/arm_pmuv3.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 16761158ab3f..2acbd02816a4 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -786,11 +786,6 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) static void armv8pmu_enable_event(struct perf_event *event) { - /* - * Enable counter and interrupt, and set the counter to count - * the event that we're interested in. - */ - armv8pmu_disable_event_counter(event); armv8pmu_write_event_type(event); armv8pmu_enable_event_irq(event); armv8pmu_enable_event_counter(event); -- Gitee From 934b006950ebb7b17e771d46367c52ce7ea208d6 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Tue, 18 Feb 2025 14:40:01 -0600 Subject: [PATCH 68/75] perf: apple_m1: Don't disable counter in m1_pmu_enable_event() ANBZ: #25252 commit c2e793da59fc297b4844669e57208c66f45fce0e upstream. Currently m1_pmu_enable_event() starts by disabling the event counter it has been asked to enable. This should not be necessary as the counter (and the PMU as a whole) should not be active when m1_pmu_enable_event() is called. Cc: Marc Zyngier Signed-off-by: Rob Herring (Arm) Reviewed-by: Anshuman Khandual Tested-by: James Clark Link: https://lore.kernel.org/r/20250218-arm-brbe-v19-v20-6-4e9922fc2e8e@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/apple_m1_cpu_pmu.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index c8f607912567..f9938f715b59 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -362,10 +362,6 @@ static void m1_pmu_enable_event(struct perf_event *event) user = event->hw.config_base & M1_PMU_CFG_COUNT_USER; kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL; - m1_pmu_disable_counter_interrupt(event->hw.idx); - m1_pmu_disable_counter(event->hw.idx); - isb(); - m1_pmu_configure_counter(event->hw.idx, evt, user, kernel); m1_pmu_enable_counter(event->hw.idx); m1_pmu_enable_counter_interrupt(event->hw.idx); -- Gitee From ad366dad1647f802c0e9e7a7044686ad1f3ffc6e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 18 Feb 2025 14:40:02 -0600 Subject: [PATCH 69/75] perf: arm_pmu: Move PMUv3-specific data ANBZ: #25252 commit dc4d58a752ea6cb0821d889e8412c22d5289f3d3 upstream. A few fields in struct arm_pmu are only used with PMUv3, and soon we will need to add more for BRBE. Group the fields together so that we have a logical place to add more data in future. At the same time, remove the comment for reg_pmmir as it doesn't convey anything useful. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Rob Herring (Arm) Reviewed-by: Anshuman Khandual Tested-by: James Clark Link: https://lore.kernel.org/r/20250218-arm-brbe-v19-v20-7-4e9922fc2e8e@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- include/linux/perf/arm_pmu.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 237654404a17..62093f5c342e 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -90,7 +90,6 @@ struct arm_pmu { struct pmu pmu; cpumask_t supported_cpus; char *name; - int pmuver; irqreturn_t (*handle_irq)(struct arm_pmu *pmu); void (*enable)(struct perf_event *event); void (*disable)(struct perf_event *event); @@ -108,18 +107,20 @@ struct arm_pmu { int (*map_event)(struct perf_event *event); DECLARE_BITMAP(cntr_mask, ARMPMU_MAX_HWEVENTS); bool secure_access; /* 32-bit ARM only */ -#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 - DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); -#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000 - DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); struct platform_device *plat_device; struct pmu_hw_events __percpu *hw_events; struct hlist_node node; struct notifier_block cpu_pm_nb; /* the attr_groups array must be NULL-terminated */ const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1]; - /* store the PMMIR_EL1 to expose slots */ + + /* PMUv3 only */ + int pmuver; u64 reg_pmmir; +#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 + DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); +#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000 + DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); /* Only to be used by ACPI probing code */ unsigned long acpi_cpuid; -- Gitee From 221c7d7d7f0e1b5699ea5c95cc81688d421c006d Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 11 Jun 2025 13:01:11 -0500 Subject: [PATCH 70/75] arm64/sysreg: Add BRBE registers and fields ANBZ: #25252 commit 52e4a56ab8b85a11ffc017eaec5b2f38642a43ba upstream. This patch adds definitions related to the Branch Record Buffer Extension (BRBE) as per ARM DDI 0487K.a. These will be used by KVM and a BRBE driver in subsequent patches. Some existing BRBE definitions in asm/sysreg.h are replaced with equivalent generated definitions. Cc: Marc Zyngier Reviewed-by: Mark Brown Signed-off-by: Anshuman Khandual Signed-off-by: Mark Rutland Tested-by: James Clark Acked-by: Will Deacon Signed-off-by: Rob Herring (Arm) tested-by: Adam Young Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20250611-arm-brbe-v19-v23-1-e7775563036e@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm64/include/asm/sysreg.h | 14 ++-- arch/arm64/tools/sysreg | 132 ++++++++++++++++++++++++++++++++ 2 files changed, 138 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index a546d675c94a..752e45c29afa 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -195,16 +195,8 @@ #define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0) #define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0)) -#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0) #define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1)) -#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1) #define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2)) -#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2) -#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2) - -#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0) -#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1) -#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0) #define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3) #define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3))) @@ -746,6 +738,12 @@ #define OP_DVP_RCTX sys_insn(1, 3, 7, 3, 5) #define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7) +/* + * BRBE Instructions + */ +#define BRB_IALL_INSN __emit_inst(0xd5000000 | OP_BRB_IALL | (0x1f)) +#define BRB_INJ_INSN __emit_inst(0xd5000000 | OP_BRB_INJ | (0x1f)) + /* Common SCTLR_ELx flags. */ #define SCTLR_ELx_ENTP2 (BIT(60)) #define SCTLR_ELx_DSSBS (BIT(44)) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 106c84e72a2b..72cf4d46139a 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1002,6 +1002,138 @@ UnsignedEnum 3:0 BT EndEnum EndSysreg + +SysregFields BRBINFx_EL1 +Res0 63:47 +Field 46 CCU +Field 45:40 CC_EXP +Field 39:32 CC_MANT +Res0 31:18 +Field 17 LASTFAILED +Field 16 T +Res0 15:14 +Enum 13:8 TYPE + 0b000000 DIRECT_UNCOND + 0b000001 INDIRECT + 0b000010 DIRECT_LINK + 0b000011 INDIRECT_LINK + 0b000101 RET + 0b000111 ERET + 0b001000 DIRECT_COND + 0b100001 DEBUG_HALT + 0b100010 CALL + 0b100011 TRAP + 0b100100 SERROR + 0b100110 INSN_DEBUG + 0b100111 DATA_DEBUG + 0b101010 ALIGN_FAULT + 0b101011 INSN_FAULT + 0b101100 DATA_FAULT + 0b101110 IRQ + 0b101111 FIQ + 0b110000 IMPDEF_TRAP_EL3 + 0b111001 DEBUG_EXIT +EndEnum +Enum 7:6 EL + 0b00 EL0 + 0b01 EL1 + 0b10 EL2 + 0b11 EL3 +EndEnum +Field 5 MPRED +Res0 4:2 +Enum 1:0 VALID + 0b00 NONE + 0b01 TARGET + 0b10 SOURCE + 0b11 FULL +EndEnum +EndSysregFields + +SysregFields BRBCR_ELx +Res0 63:24 +Field 23 EXCEPTION +Field 22 ERTN +Res0 21:10 +Field 9 FZPSS +Field 8 FZP +Res0 7 +Enum 6:5 TS + 0b01 VIRTUAL + 0b10 GUEST_PHYSICAL + 0b11 PHYSICAL +EndEnum +Field 4 MPRED +Field 3 CC +Res0 2 +Field 1 ExBRE +Field 0 E0BRE +EndSysregFields + +Sysreg BRBCR_EL1 2 1 9 0 0 +Fields BRBCR_ELx +EndSysreg + +Sysreg BRBFCR_EL1 2 1 9 0 1 +Res0 63:30 +Enum 29:28 BANK + 0b00 BANK_0 + 0b01 BANK_1 +EndEnum +Res0 27:23 +Field 22 CONDDIR +Field 21 DIRCALL +Field 20 INDCALL +Field 19 RTN +Field 18 INDIRECT +Field 17 DIRECT +Field 16 EnI +Res0 15:8 +Field 7 PAUSED +Field 6 LASTFAILED +Res0 5:0 +EndSysreg + +Sysreg BRBTS_EL1 2 1 9 0 2 +Field 63:0 TS +EndSysreg + +Sysreg BRBINFINJ_EL1 2 1 9 1 0 +Fields BRBINFx_EL1 +EndSysreg + +Sysreg BRBSRCINJ_EL1 2 1 9 1 1 +Field 63:0 ADDRESS +EndSysreg + +Sysreg BRBTGTINJ_EL1 2 1 9 1 2 +Field 63:0 ADDRESS +EndSysreg + +Sysreg BRBIDR0_EL1 2 1 9 2 0 +Res0 63:16 +Enum 15:12 CC + 0b0101 20_BIT +EndEnum +Enum 11:8 FORMAT + 0b0000 FORMAT_0 +EndEnum +Enum 7:0 NUMREC + 0b00001000 8 + 0b00010000 16 + 0b00100000 32 + 0b01000000 64 +EndEnum +EndSysreg + +Sysreg BRBCR_EL2 2 4 9 0 0 +Fields BRBCR_ELx +EndSysreg + +Sysreg BRBCR_EL12 2 5 9 0 0 +Fields BRBCR_ELx +EndSysreg + Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4 Res0 63:60 UnsignedEnum 59:56 F64MM -- Gitee From 81a661cbb9a93f44baf9cc235c491641dee4977f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 10 Oct 2024 16:13:26 +0100 Subject: [PATCH 71/75] KVM: arm64: Shave a few bytes from the EL2 idmap code ANBZ: #25252 commit afa9b48f327c9ef36bfba4c643a29385a633252b upstream. Our idmap is becoming too big, to the point where it doesn't fit in a 4kB page anymore. There are some low-hanging fruits though, such as the el2_init_state horror that is expanded 3 times in the kernel. Let's at least limit ourselves to two copies, which makes the kernel link again. At some point, we'll have to have a better way of doing this. Reported-by: Nathan Chancellor Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20241009204903.GA3353168@thelio-3990X --- arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kvm/hyp/nvhe/hyp-init.S | 52 +++++++++++++++++------------- 3 files changed, 31 insertions(+), 23 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 508f3b61a695..3641f4e2d3a7 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -193,6 +193,7 @@ struct kvm_nvhe_init_params { unsigned long hcr_el2; unsigned long vttbr; unsigned long vtcr; + unsigned long tmp; }; /* diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 71ab5836d253..ae7be438060c 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -147,6 +147,7 @@ int main(void) DEFINE(NVHE_INIT_HCR_EL2, offsetof(struct kvm_nvhe_init_params, hcr_el2)); DEFINE(NVHE_INIT_VTTBR, offsetof(struct kvm_nvhe_init_params, vttbr)); DEFINE(NVHE_INIT_VTCR, offsetof(struct kvm_nvhe_init_params, vtcr)); + DEFINE(NVHE_INIT_TMP, offsetof(struct kvm_nvhe_init_params, tmp)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 1cc06e6797bd..0f9d8422cf80 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -23,28 +23,25 @@ .align 11 SYM_CODE_START(__kvm_hyp_init) - ventry __invalid // Synchronous EL2t - ventry __invalid // IRQ EL2t - ventry __invalid // FIQ EL2t - ventry __invalid // Error EL2t + ventry . // Synchronous EL2t + ventry . // IRQ EL2t + ventry . // FIQ EL2t + ventry . // Error EL2t - ventry __invalid // Synchronous EL2h - ventry __invalid // IRQ EL2h - ventry __invalid // FIQ EL2h - ventry __invalid // Error EL2h + ventry . // Synchronous EL2h + ventry . // IRQ EL2h + ventry . // FIQ EL2h + ventry . // Error EL2h ventry __do_hyp_init // Synchronous 64-bit EL1 - ventry __invalid // IRQ 64-bit EL1 - ventry __invalid // FIQ 64-bit EL1 - ventry __invalid // Error 64-bit EL1 + ventry . // IRQ 64-bit EL1 + ventry . // FIQ 64-bit EL1 + ventry . // Error 64-bit EL1 - ventry __invalid // Synchronous 32-bit EL1 - ventry __invalid // IRQ 32-bit EL1 - ventry __invalid // FIQ 32-bit EL1 - ventry __invalid // Error 32-bit EL1 - -__invalid: - b . + ventry . // Synchronous 32-bit EL1 + ventry . // IRQ 32-bit EL1 + ventry . // FIQ 32-bit EL1 + ventry . // Error 32-bit EL1 /* * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers. @@ -75,6 +72,13 @@ __do_hyp_init: eret SYM_CODE_END(__kvm_hyp_init) +SYM_CODE_START_LOCAL(__kvm_init_el2_state) + /* Initialize EL2 CPU state to sane values. */ + init_el2_state // Clobbers x0..x2 + finalise_el2_state + ret +SYM_CODE_END(__kvm_init_el2_state) + /* * Initialize the hypervisor in EL2. * @@ -101,9 +105,12 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init) // TPIDR_EL2 is used to preserve x0 across the macro maze... isb msr tpidr_el2, x0 - init_el2_state - finalise_el2_state + str lr, [x0, #NVHE_INIT_TMP] + + bl __kvm_init_el2_state + mrs x0, tpidr_el2 + ldr lr, [x0, #NVHE_INIT_TMP] 1: ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] @@ -202,9 +209,8 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) 2: msr SPsel, #1 // We want to use SP_EL{1,2} - /* Initialize EL2 CPU state to sane values. */ - init_el2_state // Clobbers x0..x2 - finalise_el2_state + bl __kvm_init_el2_state + __init_el2_nvhe_prepare_eret /* Enable MMU, set vectors and stack. */ -- Gitee From 3c74e5d3cee7dd76638157b4dc21bb6bd3d89a03 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 20 May 2025 16:06:32 -0500 Subject: [PATCH 72/75] arm64: Handle BRBE booting requirements ANBZ: #25252 commit ae344bcb0d4967f6aa5f7b02f86bcfd389e513e4 upstream. To use the Branch Record Buffer Extension (BRBE), some configuration is necessary at EL3 and EL2. This patch documents the requirements and adds the initial EL2 setup code, which largely consists of configuring the fine-grained traps and initializing a couple of BRBE control registers. Before this patch, __init_el2_fgt() would initialize HDFGRTR_EL2 and HDFGWTR_EL2 with the same value, relying on the read/write trap controls for a register occupying the same bit position in either register. The 'nBRBIDR' trap control only exists in bit 59 of HDFGRTR_EL2, while bit 59 of HDFGWTR_EL2 is RES0, and so this assumption no longer holds. To handle HDFGRTR_EL2 and HDFGWTR_EL2 having (slightly) different bit layouts, __init_el2_fgt() is changed to accumulate the HDFGRTR_EL2 and HDFGWTR_EL2 control bits separately. While making this change the open-coded value (1 << 62) is replaced with HDFG{R,W}TR_EL2_nPMSNEVFR_EL1_MASK. The BRBCR_EL1 and BRBCR_EL2 registers are unusual and require special initialisation: even though they are subject to E2H renaming, both have an effect regardless of HCR_EL2.TGE, even when running at EL2. So we must initialize BRBCR_EL2 in case we run in nVHE mode. This is handled in __init_el2_brbe() with a comment to explain the situation. Cc: Marc Zyngier Cc: Oliver Upton Reviewed-by: Leo Yan Tested-by: James Clark Signed-off-by: Anshuman Khandual [Mark: rewrite commit message, fix typo in comment] Signed-off-by: Mark Rutland Co-developed-by: "Rob Herring (Arm)" Signed-off-by: "Rob Herring (Arm)" Signed-off-by: Wei Chen --- Documentation/arch/arm64/booting.rst | 21 ++++++++ arch/arm64/include/asm/el2_setup.h | 75 ++++++++++++++++++++++++++-- 2 files changed, 92 insertions(+), 4 deletions(-) diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst index b57776a68f15..d049a8714e02 100644 --- a/Documentation/arch/arm64/booting.rst +++ b/Documentation/arch/arm64/booting.rst @@ -379,6 +379,27 @@ Before jumping into the kernel, the following conditions must be met: - SMCR_EL2.EZT0 (bit 30) must be initialised to 0b1. + For CPUs with the Branch Record Buffer Extension (FEAT_BRBE): + + - If EL3 is present: + + - MDCR_EL3.SBRBE (bits 33:32) must be initialised to 0b01 or 0b11. + + - If the kernel is entered at EL1 and EL2 is present: + + - BRBCR_EL2.CC (bit 3) must be initialised to 0b1. + - BRBCR_EL2.MPRED (bit 4) must be initialised to 0b1. + + - HDFGRTR_EL2.nBRBDATA (bit 61) must be initialised to 0b1. + - HDFGRTR_EL2.nBRBCTL (bit 60) must be initialised to 0b1. + - HDFGRTR_EL2.nBRBIDR (bit 59) must be initialised to 0b1. + + - HDFGWTR_EL2.nBRBDATA (bit 61) must be initialised to 0b1. + - HDFGWTR_EL2.nBRBCTL (bit 60) must be initialised to 0b1. + + - HFGITR_EL2.nBRBIALL (bit 56) must be initialised to 0b1. + - HFGITR_EL2.nBRBINJ (bit 55) must be initialised to 0b1. + For CPUs with Memory Copy and Memory Set instructions (FEAT_MOPS): - If the kernel is entered at EL1 and EL2 is present: diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 1e2181820a0a..d93c2b5b0b59 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -154,6 +154,28 @@ .Lskip_set_cptr_\@: .endm +/* + * Configure BRBE to permit recording cycle counts and branch mispredicts. + * + * At any EL, to record cycle counts BRBE requires that both BRBCR_EL2.CC=1 and + * BRBCR_EL1.CC=1. + * + * At any EL, to record branch mispredicts BRBE requires that both + * BRBCR_EL2.MPRED=1 and BRBCR_EL1.MPRED=1. + * + * Set {CC,MPRED} in BRBCR_EL2 in case nVHE mode is used and we are + * executing in EL1. + */ +.macro __init_el2_brbe + mrs x1, id_aa64dfr0_el1 + ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 + cbz x1, .Lskip_brbe_\@ + + mov_q x0, BRBCR_ELx_CC | BRBCR_ELx_MPRED + msr_s SYS_BRBCR_EL2, x0 +.Lskip_brbe_\@: +.endm + /* Disable any fine grained traps */ .macro __init_el2_fgt mrs x1, id_aa64mmfr0_el1 @@ -161,18 +183,62 @@ cbz x1, .Lskip_fgt_\@ mov x0, xzr + mov x2, xzr mrs x1, id_aa64dfr0_el1 ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 cmp x1, #3 - b.lt .Lset_debug_fgt_\@ + b.lt .Lskip_spe_fgt_\@ /* Disable PMSNEVFR_EL1 read and write traps */ - orr x0, x0, #(1 << 62) + orr x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK + orr x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK + +.Lskip_spe_fgt_\@: + mrs x1, id_aa64dfr0_el1 + ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 + cbz x1, .Lskip_brbe_fgt_\@ + + /* + * Disable read traps for the following registers + * + * [BRBSRC|BRBTGT|RBINF]_EL1 + * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 + */ + orr x0, x0, #HDFGRTR_EL2_nBRBDATA_MASK + + /* + * Disable write traps for the following registers + * + * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1 + */ + orr x2, x2, #HDFGWTR_EL2_nBRBDATA_MASK + + /* Disable read and write traps for [BRBCR|BRBFCR]_EL1 */ + orr x0, x0, #HDFGRTR_EL2_nBRBCTL_MASK + orr x2, x2, #HDFGWTR_EL2_nBRBCTL_MASK + + /* Disable read traps for BRBIDR_EL1 */ + orr x0, x0, #HDFGRTR_EL2_nBRBIDR_MASK + +.Lskip_brbe_fgt_\@: .Lset_debug_fgt_\@: msr_s SYS_HDFGRTR_EL2, x0 - msr_s SYS_HDFGWTR_EL2, x0 + msr_s SYS_HDFGWTR_EL2, x2 mov x0, xzr + mov x2, xzr + + mrs x1, id_aa64dfr0_el1 + ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4 + cbz x1, .Lskip_brbe_insn_fgt_\@ + + /* Disable traps for BRBIALL instruction */ + orr x2, x2, #HFGITR_EL2_nBRBIALL_MASK + + /* Disable traps for BRBINJ instruction */ + orr x2, x2, #HFGITR_EL2_nBRBINJ_MASK + +.Lskip_brbe_insn_fgt_\@: mrs x1, id_aa64pfr1_el1 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 cbz x1, .Lset_pie_fgt_\@ @@ -193,7 +259,7 @@ .Lset_fgt_\@: msr_s SYS_HFGRTR_EL2, x0 msr_s SYS_HFGWTR_EL2, x0 - msr_s SYS_HFGITR_EL2, xzr + msr_s SYS_HFGITR_EL2, x2 mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 @@ -236,6 +302,7 @@ __init_el2_hcrx __init_el2_timers __init_el2_debug + __init_el2_brbe __init_el2_lor __init_el2_stage2 __init_el2_gicv3 -- Gitee From 40ff28afbf1b029a35da5a3db7d5d059e5a0ab6b Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 11 Jun 2025 13:01:13 -0500 Subject: [PATCH 73/75] KVM: arm64: nvhe: Disable branch generation in nVHE guests ANBZ: #25252 commit d7567e9b9ba53861390830ee18b9fb2035ca81c4 upstream. While BRBE can record branches within guests, the host recording branches in guests is not supported by perf (though events are). Support for BRBE in guests will supported by providing direct access to BRBE within the guests. That is how x86 LBR works for guests. Therefore, BRBE needs to be disabled on guest entry and restored on exit. For nVHE, this requires explicit handling for guests. Before entering a guest, save the BRBE state and disable the it. When returning to the host, restore the state. For VHE, it is not necessary. We initialize BRBCR_EL1.{E1BRE,E0BRE}=={0,0} at boot time, and HCR_EL2.TGE==1 while running in the host. We configure BRBCR_EL2.{E2BRE,E0HBRE} to enable branch recording in the host. When entering the guest, we set HCR_EL2.TGE==0 which means BRBCR_EL1 is used instead of BRBCR_EL2. Consequently for VHE, BRBE recording is disabled at EL1 and EL0 when running a guest. Should recording in guests (by the host) ever be desired, the perf ABI will need to be extended to distinguish guest addresses (struct perf_branch_entry.priv) for starters. BRBE records would also need to be invalidated on guest entry/exit as guest/host EL1 and EL0 records can't be distinguished. Signed-off-by: Anshuman Khandual Signed-off-by: Mark Rutland Co-developed-by: Rob Herring (Arm) Signed-off-by: Rob Herring (Arm) Tested-by: James Clark Reviewed-by: Leo Yan Reviewed-by: Suzuki K Poulose Acked-by: Marc Zyngier Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20250611-arm-brbe-v19-v23-3-e7775563036e@kernel.org Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/debug.c | 4 ++++ arch/arm64/kvm/hyp/nvhe/debug-sr.c | 32 ++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- 4 files changed, 39 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cc03e86cafbe..5d9a47e0e261 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -454,6 +454,7 @@ struct kvm_host_data { #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 #define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6 #define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7 +#define KVM_HOST_DATA_FLAG_HAS_BRBE 8 unsigned long flags; struct kvm_cpu_context host_ctxt; @@ -479,6 +480,7 @@ struct kvm_host_data { u64 trfcr_el1; /* Values of trap registers for the host before guest entry. */ u64 mdcr_el2; + u64 brbcr_el1; } host_debug_state; /* Guest trace filter value */ diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 28eb9f17ea7a..d99ab9483692 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -308,6 +308,10 @@ void kvm_init_host_debug_data(void) !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P)) host_data_set_flag(HAS_SPE); + /* Check if we have BRBE implemented and available at the host */ + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT)) + host_data_set_flag(HAS_BRBE); + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) { /* Force disable trace in protected mode in case of no TRBE */ if (is_protected_kvm_enabled()) diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index dd4184d81611..54d43c215645 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -92,12 +92,42 @@ static void __trace_switch_to_host(void) *host_data_ptr(host_debug_state.trfcr_el1)); } +static void __debug_save_brbe(u64 *brbcr_el1) +{ + *brbcr_el1 = 0; + + /* Check if the BRBE is enabled */ + if (!(read_sysreg_el1(SYS_BRBCR) & (BRBCR_ELx_E0BRE | BRBCR_ELx_ExBRE))) + return; + + /* + * Prohibit branch record generation while we are in guest. + * Since access to BRBCR_EL1 is trapped, the guest can't + * modify the filtering set by the host. + */ + *brbcr_el1 = read_sysreg_el1(SYS_BRBCR); + write_sysreg_el1(0, SYS_BRBCR); +} + +static void __debug_restore_brbe(u64 brbcr_el1) +{ + if (!brbcr_el1) + return; + + /* Restore BRBE controls */ + write_sysreg_el1(brbcr_el1, SYS_BRBCR); +} + void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) { /* Disable and flush SPE data generation */ if (host_data_test_flag(HAS_SPE)) __debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1)); + /* Disable BRBE branch records */ + if (host_data_test_flag(HAS_BRBE)) + __debug_save_brbe(host_data_ptr(host_debug_state.brbcr_el1)); + if (__trace_needs_switch()) __trace_switch_to_guest(); } @@ -111,6 +141,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) { if (host_data_test_flag(HAS_SPE)) __debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1)); + if (host_data_test_flag(HAS_BRBE)) + __debug_restore_brbe(*host_data_ptr(host_debug_state.brbcr_el1)); if (__trace_needs_switch()) __trace_switch_to_host(); } diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 668a0c339289..024a7871892f 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -332,7 +332,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) * We're about to restore some new MMU state. Make sure * ongoing page-table walks that have started before we * trapped to EL2 have completed. This also synchronises the - * above disabling of SPE and TRBE. + * above disabling of BRBE, SPE and TRBE. * * See DDI0487I.a D8.1.5 "Out-of-context translation regimes", * rule R_LFHQG and subsequent information statements. -- Gitee From d7855177c2137dfcd1b2ce361c909cd990eb5bc8 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 11 Jun 2025 13:01:14 -0500 Subject: [PATCH 74/75] perf: arm_pmuv3: Add support for the Branch Record Buffer Extension (BRBE) ANBZ: #25252 commit 58074a0fce66c6c97b35ce8a28ed4e7b780f9a8f upstream. The ARMv9.2 architecture introduces the optional Branch Record Buffer Extension (BRBE), which records information about branches as they are executed into set of branch record registers. BRBE is similar to x86's Last Branch Record (LBR) and PowerPC's Branch History Rolling Buffer (BHRB). BRBE supports filtering by exception level and can filter just the source or target address if excluded to avoid leaking privileged addresses. The h/w filter would be sufficient except when there are multiple events with disjoint filtering requirements. In this case, BRBE is configured with a union of all the events' desired branches, and then the recorded branches are filtered based on each event's filter. For example, with one event capturing kernel events and another event capturing user events, BRBE will be configured to capture both kernel and user branches. When handling event overflow, the branch records have to be filtered by software to only include kernel or user branch addresses for that event. In contrast, x86 simply configures LBR using the last installed event which seems broken. It is possible on x86 to configure branch filter such that no branches are ever recorded (e.g. -j save_type). For BRBE, events with a configuration that will result in no samples are rejected. Recording branches in KVM guests is not supported like x86. However, perf on x86 allows requesting branch recording in guests. The guest events are recorded, but the resulting branches are all from the host. For BRBE, events with branch recording and "exclude_host" set are rejected. Requiring "exclude_guest" to be set did not work. The default for the perf tool does set "exclude_guest" if no exception level options are specified. However, specifying kernel or user events defaults to including both host and guest. In this case, only host branches are recorded. BRBE can support some additional exception branch types compared to x86. On x86, all exceptions other than syscalls are recorded as IRQ. With BRBE, it is possible to better categorize these exceptions. One limitation relative to x86 is we cannot distinguish a syscall return from other exception returns. So all exception returns are recorded as ERET type. The FIQ branch type is omitted as the only FIQ user is Apple platforms which don't support BRBE. The debug branch types are omitted as there is no clear need for them. BRBE records are invalidated whenever events are reconfigured, a new task is scheduled in, or after recording is paused (and the records have been recorded for the event). The architecture allows branch records to be invalidated by the PE under implementation defined conditions. It is expected that these conditions are rare. Cc: Catalin Marinas Co-developed-by: Anshuman Khandual Signed-off-by: Anshuman Khandual Co-developed-by: Mark Rutland Signed-off-by: Mark Rutland Tested-by: James Clark Signed-off-by: Rob Herring (Arm) tested-by: Adam Young Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20250611-arm-brbe-v19-v23-4-e7775563036e@kernel.org [will: Fix sparse warnings about mixed declarations and code. Fix C99 comment syntax.] Signed-off-by: Will Deacon Signed-off-by: Wei Chen --- drivers/perf/Kconfig | 11 + drivers/perf/Makefile | 1 + drivers/perf/arm_brbe.c | 805 +++++++++++++++++++++++++++++++++++ drivers/perf/arm_brbe.h | 47 ++ drivers/perf/arm_pmu.c | 16 +- drivers/perf/arm_pmuv3.c | 107 ++++- include/linux/perf/arm_pmu.h | 8 + 7 files changed, 990 insertions(+), 5 deletions(-) create mode 100644 drivers/perf/arm_brbe.c create mode 100644 drivers/perf/arm_brbe.h diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 8ded2ca9fbc6..e485005e8c4d 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -192,6 +192,17 @@ config ARM_SPE_PMU Extension, which provides periodic sampling of operations in the CPU pipeline and reports this via the perf AUX interface. +config ARM64_BRBE + bool "Enable support for branch stack sampling using FEAT_BRBE" + depends on ARM_PMUV3 && ARM64 + default y + help + Enable perf support for Branch Record Buffer Extension (BRBE) which + records all branches taken in an execution path. This supports some + branch types and privilege based filtering. It captures additional + relevant information such as cycle count, misprediction and branch + type, branch privilege level etc. + config ARM_DMC620_PMU tristate "Enable PMU support for the ARM DMC-620 memory controller" depends on (ARM64 && ACPI) || COMPILE_TEST diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 16611eb02a8a..723932e83869 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_RISCV_PMU_SBI) += riscv_pmu_sbi.o obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o +obj-$(CONFIG_ARM64_BRBE) += arm_brbe.o obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o diff --git a/drivers/perf/arm_brbe.c b/drivers/perf/arm_brbe.c new file mode 100644 index 000000000000..ba554e0c846c --- /dev/null +++ b/drivers/perf/arm_brbe.c @@ -0,0 +1,805 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Branch Record Buffer Extension Driver. + * + * Copyright (C) 2022-2025 ARM Limited + * + * Author: Anshuman Khandual + */ +#include +#include +#include +#include "arm_brbe.h" + +#define BRBFCR_EL1_BRANCH_FILTERS (BRBFCR_EL1_DIRECT | \ + BRBFCR_EL1_INDIRECT | \ + BRBFCR_EL1_RTN | \ + BRBFCR_EL1_INDCALL | \ + BRBFCR_EL1_DIRCALL | \ + BRBFCR_EL1_CONDDIR) + +/* + * BRBTS_EL1 is currently not used for branch stack implementation + * purpose but BRBCR_ELx.TS needs to have a valid value from all + * available options. BRBCR_ELx_TS_VIRTUAL is selected for this. + */ +#define BRBCR_ELx_DEFAULT_TS FIELD_PREP(BRBCR_ELx_TS_MASK, BRBCR_ELx_TS_VIRTUAL) + +/* + * BRBE Buffer Organization + * + * BRBE buffer is arranged as multiple banks of 32 branch record + * entries each. An individual branch record in a given bank could + * be accessed, after selecting the bank in BRBFCR_EL1.BANK and + * accessing the registers i.e [BRBSRC, BRBTGT, BRBINF] set with + * indices [0..31]. + * + * Bank 0 + * + * --------------------------------- ------ + * | 00 | BRBSRC | BRBTGT | BRBINF | | 00 | + * --------------------------------- ------ + * | 01 | BRBSRC | BRBTGT | BRBINF | | 01 | + * --------------------------------- ------ + * | .. | BRBSRC | BRBTGT | BRBINF | | .. | + * --------------------------------- ------ + * | 31 | BRBSRC | BRBTGT | BRBINF | | 31 | + * --------------------------------- ------ + * + * Bank 1 + * + * --------------------------------- ------ + * | 32 | BRBSRC | BRBTGT | BRBINF | | 00 | + * --------------------------------- ------ + * | 33 | BRBSRC | BRBTGT | BRBINF | | 01 | + * --------------------------------- ------ + * | .. | BRBSRC | BRBTGT | BRBINF | | .. | + * --------------------------------- ------ + * | 63 | BRBSRC | BRBTGT | BRBINF | | 31 | + * --------------------------------- ------ + */ +#define BRBE_BANK_MAX_ENTRIES 32 + +struct brbe_regset { + u64 brbsrc; + u64 brbtgt; + u64 brbinf; +}; + +#define PERF_BR_ARM64_MAX (PERF_BR_MAX + PERF_BR_NEW_MAX) + +struct brbe_hw_attr { + int brbe_version; + int brbe_cc; + int brbe_nr; + int brbe_format; +}; + +#define BRBE_REGN_CASE(n, case_macro) \ + case n: case_macro(n); break + +#define BRBE_REGN_SWITCH(x, case_macro) \ + do { \ + switch (x) { \ + BRBE_REGN_CASE(0, case_macro); \ + BRBE_REGN_CASE(1, case_macro); \ + BRBE_REGN_CASE(2, case_macro); \ + BRBE_REGN_CASE(3, case_macro); \ + BRBE_REGN_CASE(4, case_macro); \ + BRBE_REGN_CASE(5, case_macro); \ + BRBE_REGN_CASE(6, case_macro); \ + BRBE_REGN_CASE(7, case_macro); \ + BRBE_REGN_CASE(8, case_macro); \ + BRBE_REGN_CASE(9, case_macro); \ + BRBE_REGN_CASE(10, case_macro); \ + BRBE_REGN_CASE(11, case_macro); \ + BRBE_REGN_CASE(12, case_macro); \ + BRBE_REGN_CASE(13, case_macro); \ + BRBE_REGN_CASE(14, case_macro); \ + BRBE_REGN_CASE(15, case_macro); \ + BRBE_REGN_CASE(16, case_macro); \ + BRBE_REGN_CASE(17, case_macro); \ + BRBE_REGN_CASE(18, case_macro); \ + BRBE_REGN_CASE(19, case_macro); \ + BRBE_REGN_CASE(20, case_macro); \ + BRBE_REGN_CASE(21, case_macro); \ + BRBE_REGN_CASE(22, case_macro); \ + BRBE_REGN_CASE(23, case_macro); \ + BRBE_REGN_CASE(24, case_macro); \ + BRBE_REGN_CASE(25, case_macro); \ + BRBE_REGN_CASE(26, case_macro); \ + BRBE_REGN_CASE(27, case_macro); \ + BRBE_REGN_CASE(28, case_macro); \ + BRBE_REGN_CASE(29, case_macro); \ + BRBE_REGN_CASE(30, case_macro); \ + BRBE_REGN_CASE(31, case_macro); \ + default: WARN(1, "Invalid BRB* index %d\n", x); \ + } \ + } while (0) + +#define RETURN_READ_BRBSRCN(n) \ + return read_sysreg_s(SYS_BRBSRC_EL1(n)) +static inline u64 get_brbsrc_reg(int idx) +{ + BRBE_REGN_SWITCH(idx, RETURN_READ_BRBSRCN); + return 0; +} + +#define RETURN_READ_BRBTGTN(n) \ + return read_sysreg_s(SYS_BRBTGT_EL1(n)) +static u64 get_brbtgt_reg(int idx) +{ + BRBE_REGN_SWITCH(idx, RETURN_READ_BRBTGTN); + return 0; +} + +#define RETURN_READ_BRBINFN(n) \ + return read_sysreg_s(SYS_BRBINF_EL1(n)) +static u64 get_brbinf_reg(int idx) +{ + BRBE_REGN_SWITCH(idx, RETURN_READ_BRBINFN); + return 0; +} + +static u64 brbe_record_valid(u64 brbinf) +{ + return FIELD_GET(BRBINFx_EL1_VALID_MASK, brbinf); +} + +static bool brbe_invalid(u64 brbinf) +{ + return brbe_record_valid(brbinf) == BRBINFx_EL1_VALID_NONE; +} + +static bool brbe_record_is_complete(u64 brbinf) +{ + return brbe_record_valid(brbinf) == BRBINFx_EL1_VALID_FULL; +} + +static bool brbe_record_is_source_only(u64 brbinf) +{ + return brbe_record_valid(brbinf) == BRBINFx_EL1_VALID_SOURCE; +} + +static bool brbe_record_is_target_only(u64 brbinf) +{ + return brbe_record_valid(brbinf) == BRBINFx_EL1_VALID_TARGET; +} + +static int brbinf_get_in_tx(u64 brbinf) +{ + return FIELD_GET(BRBINFx_EL1_T_MASK, brbinf); +} + +static int brbinf_get_mispredict(u64 brbinf) +{ + return FIELD_GET(BRBINFx_EL1_MPRED_MASK, brbinf); +} + +static int brbinf_get_lastfailed(u64 brbinf) +{ + return FIELD_GET(BRBINFx_EL1_LASTFAILED_MASK, brbinf); +} + +static u16 brbinf_get_cycles(u64 brbinf) +{ + u32 exp, mant, cycles; + /* + * Captured cycle count is unknown and hence + * should not be passed on to userspace. + */ + if (brbinf & BRBINFx_EL1_CCU) + return 0; + + exp = FIELD_GET(BRBINFx_EL1_CC_EXP_MASK, brbinf); + mant = FIELD_GET(BRBINFx_EL1_CC_MANT_MASK, brbinf); + + if (!exp) + return mant; + + cycles = (mant | 0x100) << (exp - 1); + + return min(cycles, U16_MAX); +} + +static int brbinf_get_type(u64 brbinf) +{ + return FIELD_GET(BRBINFx_EL1_TYPE_MASK, brbinf); +} + +static int brbinf_get_el(u64 brbinf) +{ + return FIELD_GET(BRBINFx_EL1_EL_MASK, brbinf); +} + +void brbe_invalidate(void) +{ + /* Ensure all branches before this point are recorded */ + isb(); + asm volatile(BRB_IALL_INSN); + /* Ensure all branch records are invalidated after this point */ + isb(); +} + +static bool valid_brbe_nr(int brbe_nr) +{ + return brbe_nr == BRBIDR0_EL1_NUMREC_8 || + brbe_nr == BRBIDR0_EL1_NUMREC_16 || + brbe_nr == BRBIDR0_EL1_NUMREC_32 || + brbe_nr == BRBIDR0_EL1_NUMREC_64; +} + +static bool valid_brbe_cc(int brbe_cc) +{ + return brbe_cc == BRBIDR0_EL1_CC_20_BIT; +} + +static bool valid_brbe_format(int brbe_format) +{ + return brbe_format == BRBIDR0_EL1_FORMAT_FORMAT_0; +} + +static bool valid_brbidr(u64 brbidr) +{ + int brbe_format, brbe_cc, brbe_nr; + + brbe_format = FIELD_GET(BRBIDR0_EL1_FORMAT_MASK, brbidr); + brbe_cc = FIELD_GET(BRBIDR0_EL1_CC_MASK, brbidr); + brbe_nr = FIELD_GET(BRBIDR0_EL1_NUMREC_MASK, brbidr); + + return valid_brbe_format(brbe_format) && valid_brbe_cc(brbe_cc) && valid_brbe_nr(brbe_nr); +} + +static bool valid_brbe_version(int brbe_version) +{ + return brbe_version == ID_AA64DFR0_EL1_BRBE_IMP || + brbe_version == ID_AA64DFR0_EL1_BRBE_BRBE_V1P1; +} + +static void select_brbe_bank(int bank) +{ + u64 brbfcr; + + brbfcr = read_sysreg_s(SYS_BRBFCR_EL1); + brbfcr &= ~BRBFCR_EL1_BANK_MASK; + brbfcr |= SYS_FIELD_PREP(BRBFCR_EL1, BANK, bank); + write_sysreg_s(brbfcr, SYS_BRBFCR_EL1); + /* + * Arm ARM (DDI 0487K.a) D.18.4 rule PPBZP requires explicit sync + * between setting BANK and accessing branch records. + */ + isb(); +} + +static bool __read_brbe_regset(struct brbe_regset *entry, int idx) +{ + entry->brbinf = get_brbinf_reg(idx); + + if (brbe_invalid(entry->brbinf)) + return false; + + entry->brbsrc = get_brbsrc_reg(idx); + entry->brbtgt = get_brbtgt_reg(idx); + return true; +} + +/* + * Generic perf branch filters supported on BRBE + * + * New branch filters need to be evaluated whether they could be supported on + * BRBE. This ensures that such branch filters would not just be accepted, to + * fail silently. PERF_SAMPLE_BRANCH_HV is a special case that is selectively + * supported only on platforms where kernel is in hyp mode. + */ +#define BRBE_EXCLUDE_BRANCH_FILTERS (PERF_SAMPLE_BRANCH_ABORT_TX | \ + PERF_SAMPLE_BRANCH_IN_TX | \ + PERF_SAMPLE_BRANCH_NO_TX | \ + PERF_SAMPLE_BRANCH_CALL_STACK | \ + PERF_SAMPLE_BRANCH_COUNTERS) + +#define BRBE_ALLOWED_BRANCH_TYPES (PERF_SAMPLE_BRANCH_ANY | \ + PERF_SAMPLE_BRANCH_ANY_CALL | \ + PERF_SAMPLE_BRANCH_ANY_RETURN | \ + PERF_SAMPLE_BRANCH_IND_CALL | \ + PERF_SAMPLE_BRANCH_COND | \ + PERF_SAMPLE_BRANCH_IND_JUMP | \ + PERF_SAMPLE_BRANCH_CALL) + + +#define BRBE_ALLOWED_BRANCH_FILTERS (PERF_SAMPLE_BRANCH_USER | \ + PERF_SAMPLE_BRANCH_KERNEL | \ + PERF_SAMPLE_BRANCH_HV | \ + BRBE_ALLOWED_BRANCH_TYPES | \ + PERF_SAMPLE_BRANCH_NO_FLAGS | \ + PERF_SAMPLE_BRANCH_NO_CYCLES | \ + PERF_SAMPLE_BRANCH_TYPE_SAVE | \ + PERF_SAMPLE_BRANCH_HW_INDEX | \ + PERF_SAMPLE_BRANCH_PRIV_SAVE) + +#define BRBE_PERF_BRANCH_FILTERS (BRBE_ALLOWED_BRANCH_FILTERS | \ + BRBE_EXCLUDE_BRANCH_FILTERS) + +/* + * BRBE supports the following functional branch type filters while + * generating branch records. These branch filters can be enabled, + * either individually or as a group i.e ORing multiple filters + * with each other. + * + * BRBFCR_EL1_CONDDIR - Conditional direct branch + * BRBFCR_EL1_DIRCALL - Direct call + * BRBFCR_EL1_INDCALL - Indirect call + * BRBFCR_EL1_INDIRECT - Indirect branch + * BRBFCR_EL1_DIRECT - Direct branch + * BRBFCR_EL1_RTN - Subroutine return + */ +static u64 branch_type_to_brbfcr(int branch_type) +{ + u64 brbfcr = 0; + + if (branch_type & PERF_SAMPLE_BRANCH_ANY) { + brbfcr |= BRBFCR_EL1_BRANCH_FILTERS; + return brbfcr; + } + + if (branch_type & PERF_SAMPLE_BRANCH_ANY_CALL) { + brbfcr |= BRBFCR_EL1_INDCALL; + brbfcr |= BRBFCR_EL1_DIRCALL; + } + + if (branch_type & PERF_SAMPLE_BRANCH_ANY_RETURN) + brbfcr |= BRBFCR_EL1_RTN; + + if (branch_type & PERF_SAMPLE_BRANCH_IND_CALL) + brbfcr |= BRBFCR_EL1_INDCALL; + + if (branch_type & PERF_SAMPLE_BRANCH_COND) + brbfcr |= BRBFCR_EL1_CONDDIR; + + if (branch_type & PERF_SAMPLE_BRANCH_IND_JUMP) + brbfcr |= BRBFCR_EL1_INDIRECT; + + if (branch_type & PERF_SAMPLE_BRANCH_CALL) + brbfcr |= BRBFCR_EL1_DIRCALL; + + return brbfcr; +} + +/* + * BRBE supports the following privilege mode filters while generating + * branch records. + * + * BRBCR_ELx_E0BRE - EL0 branch records + * BRBCR_ELx_ExBRE - EL1/EL2 branch records + * + * BRBE also supports the following additional functional branch type + * filters while generating branch records. + * + * BRBCR_ELx_EXCEPTION - Exception + * BRBCR_ELx_ERTN - Exception return + */ +static u64 branch_type_to_brbcr(int branch_type) +{ + u64 brbcr = BRBCR_ELx_FZP | BRBCR_ELx_DEFAULT_TS; + + if (branch_type & PERF_SAMPLE_BRANCH_USER) + brbcr |= BRBCR_ELx_E0BRE; + + /* + * When running in the hyp mode, writing into BRBCR_EL1 + * actually writes into BRBCR_EL2 instead. Field E2BRE + * is also at the same position as E1BRE. + */ + if (branch_type & PERF_SAMPLE_BRANCH_KERNEL) + brbcr |= BRBCR_ELx_ExBRE; + + if (branch_type & PERF_SAMPLE_BRANCH_HV) { + if (is_kernel_in_hyp_mode()) + brbcr |= BRBCR_ELx_ExBRE; + } + + if (!(branch_type & PERF_SAMPLE_BRANCH_NO_CYCLES)) + brbcr |= BRBCR_ELx_CC; + + if (!(branch_type & PERF_SAMPLE_BRANCH_NO_FLAGS)) + brbcr |= BRBCR_ELx_MPRED; + + /* + * The exception and exception return branches could be + * captured, irrespective of the perf event's privilege. + * If the perf event does not have enough privilege for + * a given exception level, then addresses which falls + * under that exception level will be reported as zero + * for the captured branch record, creating source only + * or target only records. + */ + if (branch_type & PERF_SAMPLE_BRANCH_KERNEL) { + if (branch_type & PERF_SAMPLE_BRANCH_ANY) { + brbcr |= BRBCR_ELx_EXCEPTION; + brbcr |= BRBCR_ELx_ERTN; + } + + if (branch_type & PERF_SAMPLE_BRANCH_ANY_CALL) + brbcr |= BRBCR_ELx_EXCEPTION; + + if (branch_type & PERF_SAMPLE_BRANCH_ANY_RETURN) + brbcr |= BRBCR_ELx_ERTN; + } + return brbcr; +} + +bool brbe_branch_attr_valid(struct perf_event *event) +{ + u64 branch_type = event->attr.branch_sample_type; + + /* + * Ensure both perf branch filter allowed and exclude + * masks are always in sync with the generic perf ABI. + */ + BUILD_BUG_ON(BRBE_PERF_BRANCH_FILTERS != (PERF_SAMPLE_BRANCH_MAX - 1)); + + if (branch_type & BRBE_EXCLUDE_BRANCH_FILTERS) { + pr_debug("requested branch filter not supported 0x%llx\n", branch_type); + return false; + } + + /* Ensure at least 1 branch type is enabled */ + if (!(branch_type & BRBE_ALLOWED_BRANCH_TYPES)) { + pr_debug("no branch type enabled 0x%llx\n", branch_type); + return false; + } + + /* + * No branches are recorded in guests nor nVHE hypervisors, so + * excluding the host or both kernel and user is invalid. + * + * Ideally we'd just require exclude_guest and exclude_hv, but setting + * event filters with perf for kernel or user don't set exclude_guest. + * So effectively, exclude_guest and exclude_hv are ignored. + */ + if (event->attr.exclude_host || (event->attr.exclude_user && event->attr.exclude_kernel)) { + pr_debug("branch filter in hypervisor or guest only not supported 0x%llx\n", branch_type); + return false; + } + + event->hw.branch_reg.config = branch_type_to_brbfcr(event->attr.branch_sample_type); + event->hw.extra_reg.config = branch_type_to_brbcr(event->attr.branch_sample_type); + + return true; +} + +unsigned int brbe_num_branch_records(const struct arm_pmu *armpmu) +{ + return FIELD_GET(BRBIDR0_EL1_NUMREC_MASK, armpmu->reg_brbidr); +} + +void brbe_probe(struct arm_pmu *armpmu) +{ + u64 brbidr, aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1); + u32 brbe; + + brbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT); + if (!valid_brbe_version(brbe)) + return; + + brbidr = read_sysreg_s(SYS_BRBIDR0_EL1); + if (!valid_brbidr(brbidr)) + return; + + armpmu->reg_brbidr = brbidr; +} + +/* + * BRBE is assumed to be disabled/paused on entry + */ +void brbe_enable(const struct arm_pmu *arm_pmu) +{ + struct pmu_hw_events *cpuc = this_cpu_ptr(arm_pmu->hw_events); + u64 brbfcr = 0, brbcr = 0; + + /* + * Discard existing records to avoid a discontinuity, e.g. records + * missed during handling an overflow. + */ + brbe_invalidate(); + + /* + * Merge the permitted branch filters of all events. + */ + for (int i = 0; i < ARMPMU_MAX_HWEVENTS; i++) { + struct perf_event *event = cpuc->events[i]; + + if (event && has_branch_stack(event)) { + brbfcr |= event->hw.branch_reg.config; + brbcr |= event->hw.extra_reg.config; + } + } + + /* + * In VHE mode with MDCR_EL2.HPMN equal to PMCR_EL0.N, BRBCR_EL1.FZP + * controls freezing the branch records on counter overflow rather than + * BRBCR_EL2.FZP (which writes to BRBCR_EL1 are redirected to). + * The exception levels are enabled/disabled in BRBCR_EL2, so keep EL1 + * and EL0 recording disabled for guests. + * + * As BRBCR_EL1 CC and MPRED bits also need to match, use the same + * value for both registers just masking the exception levels. + */ + if (is_kernel_in_hyp_mode()) + write_sysreg_s(brbcr & ~(BRBCR_ELx_ExBRE | BRBCR_ELx_E0BRE), SYS_BRBCR_EL12); + write_sysreg_s(brbcr, SYS_BRBCR_EL1); + /* Ensure BRBCR_ELx settings take effect before unpausing */ + isb(); + + /* Finally write SYS_BRBFCR_EL to unpause BRBE */ + write_sysreg_s(brbfcr, SYS_BRBFCR_EL1); + /* Synchronization in PMCR write ensures ordering WRT PMU enabling */ +} + +void brbe_disable(void) +{ + /* + * No need for synchronization here as synchronization in PMCR write + * ensures ordering and in the interrupt handler this is a NOP as + * we're already paused. + */ + write_sysreg_s(BRBFCR_EL1_PAUSED, SYS_BRBFCR_EL1); + write_sysreg_s(0, SYS_BRBCR_EL1); +} + +static const int brbe_type_to_perf_type_map[BRBINFx_EL1_TYPE_DEBUG_EXIT + 1][2] = { + [BRBINFx_EL1_TYPE_DIRECT_UNCOND] = { PERF_BR_UNCOND, 0 }, + [BRBINFx_EL1_TYPE_INDIRECT] = { PERF_BR_IND, 0 }, + [BRBINFx_EL1_TYPE_DIRECT_LINK] = { PERF_BR_CALL, 0 }, + [BRBINFx_EL1_TYPE_INDIRECT_LINK] = { PERF_BR_IND_CALL, 0 }, + [BRBINFx_EL1_TYPE_RET] = { PERF_BR_RET, 0 }, + [BRBINFx_EL1_TYPE_DIRECT_COND] = { PERF_BR_COND, 0 }, + [BRBINFx_EL1_TYPE_CALL] = { PERF_BR_SYSCALL, 0 }, + [BRBINFx_EL1_TYPE_ERET] = { PERF_BR_ERET, 0 }, + [BRBINFx_EL1_TYPE_IRQ] = { PERF_BR_IRQ, 0 }, + [BRBINFx_EL1_TYPE_TRAP] = { PERF_BR_IRQ, 0 }, + [BRBINFx_EL1_TYPE_SERROR] = { PERF_BR_SERROR, 0 }, + [BRBINFx_EL1_TYPE_ALIGN_FAULT] = { PERF_BR_EXTEND_ABI, PERF_BR_NEW_FAULT_ALGN }, + [BRBINFx_EL1_TYPE_INSN_FAULT] = { PERF_BR_EXTEND_ABI, PERF_BR_NEW_FAULT_INST }, + [BRBINFx_EL1_TYPE_DATA_FAULT] = { PERF_BR_EXTEND_ABI, PERF_BR_NEW_FAULT_DATA }, +}; + +static void brbe_set_perf_entry_type(struct perf_branch_entry *entry, u64 brbinf) +{ + int brbe_type = brbinf_get_type(brbinf); + + if (brbe_type <= BRBINFx_EL1_TYPE_DEBUG_EXIT) { + const int *br_type = brbe_type_to_perf_type_map[brbe_type]; + + entry->type = br_type[0]; + entry->new_type = br_type[1]; + } +} + +static int brbinf_get_perf_priv(u64 brbinf) +{ + int brbe_el = brbinf_get_el(brbinf); + + switch (brbe_el) { + case BRBINFx_EL1_EL_EL0: + return PERF_BR_PRIV_USER; + case BRBINFx_EL1_EL_EL1: + return PERF_BR_PRIV_KERNEL; + case BRBINFx_EL1_EL_EL2: + if (is_kernel_in_hyp_mode()) + return PERF_BR_PRIV_KERNEL; + return PERF_BR_PRIV_HV; + default: + pr_warn_once("%d - unknown branch privilege captured\n", brbe_el); + return PERF_BR_PRIV_UNKNOWN; + } +} + +static bool perf_entry_from_brbe_regset(int index, struct perf_branch_entry *entry, + const struct perf_event *event) +{ + struct brbe_regset bregs; + u64 brbinf; + + if (!__read_brbe_regset(&bregs, index)) + return false; + + brbinf = bregs.brbinf; + perf_clear_branch_entry_bitfields(entry); + if (brbe_record_is_complete(brbinf)) { + entry->from = bregs.brbsrc; + entry->to = bregs.brbtgt; + } else if (brbe_record_is_source_only(brbinf)) { + entry->from = bregs.brbsrc; + entry->to = 0; + } else if (brbe_record_is_target_only(brbinf)) { + entry->from = 0; + entry->to = bregs.brbtgt; + } + + brbe_set_perf_entry_type(entry, brbinf); + + if (!branch_sample_no_cycles(event)) + entry->cycles = brbinf_get_cycles(brbinf); + + if (!branch_sample_no_flags(event)) { + /* Mispredict info is available for source only and complete branch records. */ + if (!brbe_record_is_target_only(brbinf)) { + entry->mispred = brbinf_get_mispredict(brbinf); + entry->predicted = !entry->mispred; + } + + /* + * Currently TME feature is neither implemented in any hardware + * nor it is being supported in the kernel. Just warn here once + * if TME related information shows up rather unexpectedly. + */ + if (brbinf_get_lastfailed(brbinf) || brbinf_get_in_tx(brbinf)) + pr_warn_once("Unknown transaction states\n"); + } + + /* + * Branch privilege level is available for target only and complete + * branch records. + */ + if (!brbe_record_is_source_only(brbinf)) + entry->priv = brbinf_get_perf_priv(brbinf); + + return true; +} + +#define PERF_BR_ARM64_ALL ( \ + BIT(PERF_BR_COND) | \ + BIT(PERF_BR_UNCOND) | \ + BIT(PERF_BR_IND) | \ + BIT(PERF_BR_CALL) | \ + BIT(PERF_BR_IND_CALL) | \ + BIT(PERF_BR_RET)) + +#define PERF_BR_ARM64_ALL_KERNEL ( \ + BIT(PERF_BR_SYSCALL) | \ + BIT(PERF_BR_IRQ) | \ + BIT(PERF_BR_SERROR) | \ + BIT(PERF_BR_MAX + PERF_BR_NEW_FAULT_ALGN) | \ + BIT(PERF_BR_MAX + PERF_BR_NEW_FAULT_DATA) | \ + BIT(PERF_BR_MAX + PERF_BR_NEW_FAULT_INST)) + +static void prepare_event_branch_type_mask(u64 branch_sample, + unsigned long *event_type_mask) +{ + if (branch_sample & PERF_SAMPLE_BRANCH_ANY) { + if (branch_sample & PERF_SAMPLE_BRANCH_KERNEL) + bitmap_from_u64(event_type_mask, + BIT(PERF_BR_ERET) | PERF_BR_ARM64_ALL | + PERF_BR_ARM64_ALL_KERNEL); + else + bitmap_from_u64(event_type_mask, PERF_BR_ARM64_ALL); + return; + } + + bitmap_zero(event_type_mask, PERF_BR_ARM64_MAX); + + if (branch_sample & PERF_SAMPLE_BRANCH_ANY_CALL) { + if (branch_sample & PERF_SAMPLE_BRANCH_KERNEL) + bitmap_from_u64(event_type_mask, PERF_BR_ARM64_ALL_KERNEL); + + set_bit(PERF_BR_CALL, event_type_mask); + set_bit(PERF_BR_IND_CALL, event_type_mask); + } + + if (branch_sample & PERF_SAMPLE_BRANCH_IND_JUMP) + set_bit(PERF_BR_IND, event_type_mask); + + if (branch_sample & PERF_SAMPLE_BRANCH_COND) + set_bit(PERF_BR_COND, event_type_mask); + + if (branch_sample & PERF_SAMPLE_BRANCH_CALL) + set_bit(PERF_BR_CALL, event_type_mask); + + if (branch_sample & PERF_SAMPLE_BRANCH_IND_CALL) + set_bit(PERF_BR_IND_CALL, event_type_mask); + + if (branch_sample & PERF_SAMPLE_BRANCH_ANY_RETURN) { + set_bit(PERF_BR_RET, event_type_mask); + + if (branch_sample & PERF_SAMPLE_BRANCH_KERNEL) + set_bit(PERF_BR_ERET, event_type_mask); + } +} + +/* + * BRBE is configured with an OR of permissions from all events, so there may + * be events which have to be dropped or events where just the source or target + * address has to be zeroed. + */ +static bool filter_branch_privilege(struct perf_branch_entry *entry, u64 branch_sample_type) +{ + bool from_user = access_ok((void __user *)(unsigned long)entry->from, 4); + bool to_user = access_ok((void __user *)(unsigned long)entry->to, 4); + bool exclude_kernel = !((branch_sample_type & PERF_SAMPLE_BRANCH_KERNEL) || + (is_kernel_in_hyp_mode() && (branch_sample_type & PERF_SAMPLE_BRANCH_HV))); + + /* We can only have a half record if permissions have not been expanded */ + if (!entry->from || !entry->to) + return true; + + /* + * If record is within a single exception level, just need to either + * drop or keep the entire record. + */ + if (from_user == to_user) + return ((entry->priv == PERF_BR_PRIV_KERNEL) && !exclude_kernel) || + ((entry->priv == PERF_BR_PRIV_USER) && + (branch_sample_type & PERF_SAMPLE_BRANCH_USER)); + + /* + * Record is across exception levels, mask addresses for the exception + * level we're not capturing. + */ + if (!(branch_sample_type & PERF_SAMPLE_BRANCH_USER)) { + if (from_user) + entry->from = 0; + if (to_user) + entry->to = 0; + } + + if (exclude_kernel) { + if (!from_user) + entry->from = 0; + if (!to_user) + entry->to = 0; + } + + return true; +} + +static bool filter_branch_type(struct perf_branch_entry *entry, + const unsigned long *event_type_mask) +{ + if (entry->type == PERF_BR_EXTEND_ABI) + return test_bit(PERF_BR_MAX + entry->new_type, event_type_mask); + else + return test_bit(entry->type, event_type_mask); +} + +static bool filter_branch_record(struct perf_branch_entry *entry, + u64 branch_sample, + const unsigned long *event_type_mask) +{ + return filter_branch_type(entry, event_type_mask) && + filter_branch_privilege(entry, branch_sample); +} + +void brbe_read_filtered_entries(struct perf_branch_stack *branch_stack, + const struct perf_event *event) +{ + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + int nr_hw = brbe_num_branch_records(cpu_pmu); + int nr_banks = DIV_ROUND_UP(nr_hw, BRBE_BANK_MAX_ENTRIES); + int nr_filtered = 0; + u64 branch_sample_type = event->attr.branch_sample_type; + DECLARE_BITMAP(event_type_mask, PERF_BR_ARM64_MAX); + + prepare_event_branch_type_mask(branch_sample_type, event_type_mask); + + for (int bank = 0; bank < nr_banks; bank++) { + int nr_remaining = nr_hw - (bank * BRBE_BANK_MAX_ENTRIES); + int nr_this_bank = min(nr_remaining, BRBE_BANK_MAX_ENTRIES); + + select_brbe_bank(bank); + + for (int i = 0; i < nr_this_bank; i++) { + struct perf_branch_entry *pbe = &branch_stack->entries[nr_filtered]; + + if (!perf_entry_from_brbe_regset(i, pbe, event)) + goto done; + + if (!filter_branch_record(pbe, branch_sample_type, event_type_mask)) + continue; + + nr_filtered++; + } + } + +done: + branch_stack->nr = nr_filtered; +} diff --git a/drivers/perf/arm_brbe.h b/drivers/perf/arm_brbe.h new file mode 100644 index 000000000000..b7c7d8796c86 --- /dev/null +++ b/drivers/perf/arm_brbe.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Branch Record Buffer Extension Helpers. + * + * Copyright (C) 2022-2025 ARM Limited + * + * Author: Anshuman Khandual + */ + +struct arm_pmu; +struct perf_branch_stack; +struct perf_event; + +#ifdef CONFIG_ARM64_BRBE +void brbe_probe(struct arm_pmu *arm_pmu); +unsigned int brbe_num_branch_records(const struct arm_pmu *armpmu); +void brbe_invalidate(void); + +void brbe_enable(const struct arm_pmu *arm_pmu); +void brbe_disable(void); + +bool brbe_branch_attr_valid(struct perf_event *event); +void brbe_read_filtered_entries(struct perf_branch_stack *branch_stack, + const struct perf_event *event); +#else +static inline void brbe_probe(struct arm_pmu *arm_pmu) { } +static inline unsigned int brbe_num_branch_records(const struct arm_pmu *armpmu) +{ + return 0; +} + +static inline void brbe_invalidate(void) { } + +static inline void brbe_enable(const struct arm_pmu *arm_pmu) { }; +static inline void brbe_disable(void) { }; + +static inline bool brbe_branch_attr_valid(struct perf_event *event) +{ + WARN_ON_ONCE(!has_branch_stack(event)); + return false; +} + +static void brbe_read_filtered_entries(struct perf_branch_stack *branch_stack, + const struct perf_event *event) +{ +} +#endif diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 9f0d32136433..a7352f3fdf1c 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -99,7 +99,7 @@ static const struct pmu_irq_ops percpu_pmunmi_ops = { .free_pmuirq = armpmu_free_percpu_pmunmi }; -static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); +DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); static DEFINE_PER_CPU(int, cpu_irq); static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); @@ -318,6 +318,12 @@ armpmu_del(struct perf_event *event, int flags) int idx = hwc->idx; armpmu_stop(event, PERF_EF_UPDATE); + + if (has_branch_stack(event)) { + hw_events->branch_users--; + perf_sched_cb_dec(event->pmu); + } + hw_events->events[idx] = NULL; armpmu->clear_event_idx(hw_events, event); perf_event_update_userpage(event); @@ -345,6 +351,11 @@ armpmu_add(struct perf_event *event, int flags) /* The newly-allocated counter should be empty */ WARN_ON_ONCE(hw_events->events[idx]); + if (has_branch_stack(event)) { + hw_events->branch_users++; + perf_sched_cb_inc(event->pmu); + } + event->hw.idx = idx; hw_events->events[idx] = event; @@ -509,8 +520,7 @@ static int armpmu_event_init(struct perf_event *event) !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) return -ENOENT; - /* does not support taken branch sampling */ - if (has_branch_stack(event)) + if (has_branch_stack(event) && !armpmu->reg_brbidr) return -EOPNOTSUPP; return __hw_perf_event_init(event); diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 2acbd02816a4..0976251708a3 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -25,6 +25,8 @@ #include #include +#include "arm_brbe.h" + /* ARMv8 Cortex-A53 specific event types. */ #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 @@ -438,7 +440,19 @@ static ssize_t threshold_max_show(struct device *dev, static DEVICE_ATTR_RO(threshold_max); +static ssize_t branches_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); + + return sysfs_emit(page, "%d\n", brbe_num_branch_records(cpu_pmu)); +} + +static DEVICE_ATTR_RO(branches); + static struct attribute *armv8_pmuv3_caps_attrs[] = { + &dev_attr_branches.attr, &dev_attr_slots.attr, &dev_attr_bus_slots.attr, &dev_attr_bus_width.attr, @@ -446,9 +460,22 @@ static struct attribute *armv8_pmuv3_caps_attrs[] = { NULL, }; +static umode_t caps_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct pmu *pmu = dev_get_drvdata(dev); + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); + + if (i == 0) + return brbe_num_branch_records(cpu_pmu) ? attr->mode : 0; + + return attr->mode; +} + static const struct attribute_group armv8_pmuv3_caps_attr_group = { .name = "caps", .attrs = armv8_pmuv3_caps_attrs, + .is_visible = caps_is_visible, }; /* @@ -800,6 +827,7 @@ static void armv8pmu_disable_event(struct perf_event *event) static void armv8pmu_start(struct arm_pmu *cpu_pmu) { struct perf_event_context *ctx; + struct pmu_hw_events *hw_events = this_cpu_ptr(cpu_pmu->hw_events); int nr_user = 0; ctx = perf_cpu_task_ctx(); @@ -813,16 +841,34 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu) host_kvm_vcpu_pmu_resync_el0(); + if (hw_events->branch_users) + brbe_enable(cpu_pmu); + /* Enable all counters */ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); } static void armv8pmu_stop(struct arm_pmu *cpu_pmu) { + struct pmu_hw_events *hw_events = this_cpu_ptr(cpu_pmu->hw_events); + + if (hw_events->branch_users) + brbe_disable(); + /* Disable all counters */ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); } +static void read_branch_records(struct pmu_hw_events *cpuc, + struct perf_event *event, + struct perf_sample_data *data) +{ + struct perf_branch_stack *branch_stack = cpuc->branch_stack; + + brbe_read_filtered_entries(branch_stack, event); + perf_sample_save_brstack(data, event, branch_stack, NULL); +} + static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) { u64 pmovsr; @@ -873,6 +919,9 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; + if (has_branch_stack(event)) + read_branch_records(cpuc, event, &data); + /* * Perf event overflow will queue the processing of the event as * an irq_work which will be taken care of in the handling of @@ -930,7 +979,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, /* Always prefer to place a cycle counter into the cycle counter. */ if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) && - !armv8pmu_event_get_threshold(&event->attr)) { + !armv8pmu_event_get_threshold(&event->attr) && !has_branch_stack(event)) { if (!test_and_set_bit(ARMV8_PMU_CYCLE_IDX, cpuc->used_mask)) return ARMV8_PMU_CYCLE_IDX; else if (armv8pmu_event_is_64bit(event) && @@ -979,6 +1028,19 @@ static int armv8pmu_user_event_idx(struct perf_event *event) return event->hw.idx + 1; } +static void armv8pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) +{ + struct arm_pmu *armpmu = *this_cpu_ptr(&cpu_armpmu); + struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); + + if (!hw_events->branch_users) + return; + + if (sched_in) + brbe_invalidate(); +} + /* * Add an event filter to a given event. */ @@ -996,6 +1058,13 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, return -EOPNOTSUPP; } + if (has_branch_stack(perf_event)) { + if (!brbe_num_branch_records(cpu_pmu) || !brbe_branch_attr_valid(perf_event)) + return -EOPNOTSUPP; + + perf_event->attach_state |= PERF_ATTACH_SCHED_CB; + } + /* * If we're running in hyp mode, then we *are* the hypervisor. * Therefore we ignore exclude_hv in this configuration, since @@ -1062,6 +1131,11 @@ static void armv8pmu_reset(void *info) /* Clear the counters we flip at guest entry/exit */ host_kvm_clr_pmu_events(mask); + if (brbe_num_branch_records(cpu_pmu)) { + brbe_disable(); + brbe_invalidate(); + } + /* * Initialize & Reset PMNC. Request overflow interrupt for * 64 bit cycle counter but cheat in armv8pmu_write_counter(). @@ -1230,6 +1304,25 @@ static void __armv8pmu_probe_pmu(void *info) cpu_pmu->reg_pmmir = read_pmmir(); else cpu_pmu->reg_pmmir = 0; + + brbe_probe(cpu_pmu); +} + +static int branch_records_alloc(struct arm_pmu *armpmu) +{ + size_t size = struct_size_t(struct perf_branch_stack, entries, + brbe_num_branch_records(armpmu)); + int cpu; + + for_each_cpu(cpu, &armpmu->supported_cpus) { + struct pmu_hw_events *events_cpu; + + events_cpu = per_cpu_ptr(armpmu->hw_events, cpu); + events_cpu->branch_stack = kmalloc(size, GFP_KERNEL); + if (!events_cpu->branch_stack) + return -ENOMEM; + } + return 0; } static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) @@ -1246,7 +1339,15 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) if (ret) return ret; - return probe.present ? 0 : -ENODEV; + if (!probe.present) + return -ENODEV; + + if (brbe_num_branch_records(cpu_pmu)) { + ret = branch_records_alloc(cpu_pmu); + if (ret) + return ret; + } + return 0; } static void armv8pmu_disable_user_access_ipi(void *unused) @@ -1309,6 +1410,8 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, cpu_pmu->set_event_filter = armv8pmu_set_event_filter; cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx; + if (brbe_num_branch_records(cpu_pmu)) + cpu_pmu->pmu.sched_task = armv8pmu_sched_task; cpu_pmu->name = name; cpu_pmu->map_event = map_event; diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 62093f5c342e..1145af46d84c 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -76,6 +76,11 @@ struct pmu_hw_events { struct arm_pmu *percpu_pmu; int irq; + + struct perf_branch_stack *branch_stack; + + /* Active events requesting branch records */ + unsigned int branch_users; }; enum armpmu_attr_groups { @@ -117,6 +122,7 @@ struct arm_pmu { /* PMUv3 only */ int pmuver; u64 reg_pmmir; + u64 reg_brbidr; #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); #define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000 @@ -128,6 +134,8 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) +DECLARE_PER_CPU(struct arm_pmu *, cpu_armpmu); + u64 armpmu_event_update(struct perf_event *event); int armpmu_event_set_period(struct perf_event *event); -- Gitee From 5962eb396d3d429cc556af2d818cb080385412fc Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Mon, 15 Dec 2025 14:21:42 +0800 Subject: [PATCH 75/75] anolis: arm64: enable CONFIG_ARM64_BRBE=y ANBZ: #25252 Enable CONFIG_ARM64_BRBE=y on arm64 to support Branch Record Buffer Extension (BRBE). Signed-off-by: Wei Chen --- anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_BRBE | 1 + 1 file changed, 1 insertion(+) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_BRBE diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_BRBE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_BRBE new file mode 100644 index 000000000000..15184e522c60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_BRBE @@ -0,0 +1 @@ +CONFIG_ARM64_BRBE=y -- Gitee