From 7c1c1b37a998376805d99a51e207c7a976014cac Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Mar 2021 09:43:20 +0100 Subject: [PATCH 1/5] x86/boot/compressed/64: Reload CS in startup_32 ANBZ: #5983 commit 0c289ff81c24033777fab23019039f11e1449ba4 upstream. Exception handling in the startup_32 boot path requires the CS selector to be correctly set up. Reload it from the current GDT. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20210312123824.306-4-joro@8bytes.org Signed-off-by: hanliyang --- arch/x86/boot/compressed/head_64.S | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 3daf411101dc..31cbf1e49139 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -107,9 +107,16 @@ SYM_FUNC_START(startup_32) movl %eax, %gs movl %eax, %ss -/* setup a stack and make sure cpu supports long mode. */ + /* Setup a stack and load CS from current GDT */ leal rva(boot_stack_end)(%ebp), %esp + pushl $__KERNEL32_CS + leal rva(1f)(%ebp), %eax + pushl %eax + lretl +1: + + /* Make sure cpu supports long mode. */ call verify_cpu testl %eax, %eax jnz .Lno_longmode -- Gitee From c1b0e7f1817f6d4a1bf7e2d3caf5c0482dfc1830 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Mar 2021 09:43:21 +0100 Subject: [PATCH 2/5] x86/boot/compressed/64: Setup IDT in startup_32 boot path ANBZ: #5983 commit 79419e13e8082cc15d174df979a363528e31f2e7 upstream. This boot path needs exception handling when it is used with SEV-ES. Setup an IDT and provide a helper function to write IDT entries for use in 32-bit protected mode. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20210312123824.306-5-joro@8bytes.org [ hanliyang: Fix conflict in arch/x86/boot/compressed/head_64.S ] Signed-off-by: hanliyang --- arch/x86/boot/compressed/head_64.S | 72 ++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 31cbf1e49139..8ec06aef8d28 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -116,6 +116,9 @@ SYM_FUNC_START(startup_32) lretl 1: + /* Setup Exception handling for SEV-ES */ + call startup32_load_idt + /* Make sure cpu supports long mode. */ call verify_cpu testl %eax, %eax @@ -731,6 +734,19 @@ SYM_DATA_START(boot_idt) .endr SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end) +#ifdef CONFIG_AMD_MEM_ENCRYPT +SYM_DATA_START(boot32_idt_desc) + .word boot32_idt_end - boot32_idt - 1 + .long 0 +SYM_DATA_END(boot32_idt_desc) + .balign 8 +SYM_DATA_START(boot32_idt) + .rept 32 + .quad 0 + .endr +SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end) +#endif + #ifdef CONFIG_EFI_STUB SYM_DATA(image_offset, .long 0) #endif @@ -895,6 +911,62 @@ SYM_FUNC_START(startup32_check_sev_cbit) RET SYM_FUNC_END(startup32_check_sev_cbit) +#ifdef CONFIG_AMD_MEM_ENCRYPT + __HEAD + .code32 +/* + * Write an IDT entry into boot32_idt + * + * Parameters: + * + * %eax: Handler address + * %edx: Vector number + * + * Physical offset is expected in %ebp + */ +SYM_FUNC_START(startup32_set_idt_entry) + push %ebx + push %ecx + + /* IDT entry address to %ebx */ + leal rva(boot32_idt)(%ebp), %ebx + shl $3, %edx + addl %edx, %ebx + + /* Build IDT entry, lower 4 bytes */ + movl %eax, %edx + andl $0x0000ffff, %edx # Target code segment offset [15:0] + movl $__KERNEL32_CS, %ecx # Target code segment selector + shl $16, %ecx + orl %ecx, %edx + + /* Store lower 4 bytes to IDT */ + movl %edx, (%ebx) + + /* Build IDT entry, upper 4 bytes */ + movl %eax, %edx + andl $0xffff0000, %edx # Target code segment offset [31:16] + orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate + + /* Store upper 4 bytes to IDT */ + movl %edx, 4(%ebx) + + pop %ecx + pop %ebx + ret +SYM_FUNC_END(startup32_set_idt_entry) +#endif + +SYM_FUNC_START(startup32_load_idt) +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* Load IDT */ + leal rva(boot32_idt)(%ebp), %eax + movl %eax, rva(boot32_idt_desc+2)(%ebp) + lidt rva(boot32_idt_desc)(%ebp) +#endif + ret +SYM_FUNC_END(startup32_load_idt) + /* * Stack and heap for uncompression */ -- Gitee From f2a29711e47c4b0b923e869acdc8cdbe74c30574 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Mar 2021 09:43:22 +0100 Subject: [PATCH 3/5] x86/boot/compressed/64: Add 32-bit boot #VC handler ANBZ: #5983 commit 1ccdbf748d862bc2ea106fa9f2300983c77860fe upstream. Add a #VC exception handler which is used when the kernel still executes in protected mode. This boot-path already uses CPUID, which will cause #VC exceptions in an SEV-ES guest. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20210312123824.306-6-joro@8bytes.org [ hanliyang: Fix conflicts in arch/x86/boot/compressed ] Signed-off-by: hanliyang --- arch/x86/boot/compressed/head_64.S | 6 ++ arch/x86/boot/compressed/mem_encrypt.S | 95 ++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 8ec06aef8d28..5b57f374bc5c 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -34,6 +34,7 @@ #include #include #include +#include #include "pgtable.h" /* @@ -959,6 +960,11 @@ SYM_FUNC_END(startup32_set_idt_entry) SYM_FUNC_START(startup32_load_idt) #ifdef CONFIG_AMD_MEM_ENCRYPT + /* #VC handler */ + leal rva(startup32_vc_handler)(%ebp), %eax + movl $X86_TRAP_VC, %edx + call startup32_set_idt_entry + /* Load IDT */ leal rva(boot32_idt)(%ebp), %eax movl %eax, rva(boot32_idt_desc+2)(%ebp) diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index 484a9c06f41d..58a29fa70ed8 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -61,6 +61,101 @@ SYM_FUNC_START(get_sev_encryption_bit) RET SYM_FUNC_END(get_sev_encryption_bit) +/** + * sev_es_req_cpuid - Request a CPUID value from the Hypervisor using + * the GHCB MSR protocol + * + * @%eax: Register to request (0=EAX, 1=EBX, 2=ECX, 3=EDX) + * @%edx: CPUID Function + * + * Returns 0 in %eax on success, non-zero on failure + * %edx returns CPUID value on success + */ +SYM_CODE_START_LOCAL(sev_es_req_cpuid) + shll $30, %eax + orl $0x00000004, %eax + movl $MSR_AMD64_SEV_ES_GHCB, %ecx + wrmsr + rep; vmmcall # VMGEXIT + rdmsr + + /* Check response */ + movl %eax, %ecx + andl $0x3ffff000, %ecx # Bits [12-29] MBZ + jnz 2f + + /* Check return code */ + andl $0xfff, %eax + cmpl $5, %eax + jne 2f + + /* All good - return success */ + xorl %eax, %eax +1: + ret +2: + movl $-1, %eax + jmp 1b +SYM_CODE_END(sev_es_req_cpuid) + +SYM_CODE_START(startup32_vc_handler) + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + + /* Keep CPUID function in %ebx */ + movl %eax, %ebx + + /* Check if error-code == SVM_EXIT_CPUID */ + cmpl $0x72, 16(%esp) + jne .Lfail + + movl $0, %eax # Request CPUID[fn].EAX + movl %ebx, %edx # CPUID fn + call sev_es_req_cpuid # Call helper + testl %eax, %eax # Check return code + jnz .Lfail + movl %edx, 12(%esp) # Store result + + movl $1, %eax # Request CPUID[fn].EBX + movl %ebx, %edx # CPUID fn + call sev_es_req_cpuid # Call helper + testl %eax, %eax # Check return code + jnz .Lfail + movl %edx, 8(%esp) # Store result + + movl $2, %eax # Request CPUID[fn].ECX + movl %ebx, %edx # CPUID fn + call sev_es_req_cpuid # Call helper + testl %eax, %eax # Check return code + jnz .Lfail + movl %edx, 4(%esp) # Store result + + movl $3, %eax # Request CPUID[fn].EDX + movl %ebx, %edx # CPUID fn + call sev_es_req_cpuid # Call helper + testl %eax, %eax # Check return code + jnz .Lfail + movl %edx, 0(%esp) # Store result + + popl %edx + popl %ecx + popl %ebx + popl %eax + + /* Remove error code */ + addl $4, %esp + + /* Jump over CPUID instruction */ + addl $2, (%esp) + + iret +.Lfail: + hlt + jmp .Lfail +SYM_CODE_END(startup32_vc_handler) + .code64 #include "../../kernel/sev_verify_cbit.S" -- Gitee From da4aec7e2b1b16fe4e855b9535f275078e8f1298 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Mar 2021 13:38:22 +0100 Subject: [PATCH 4/5] x86/boot/compressed/64: Add CPUID sanity check to 32-bit boot-path ANBZ: #5983 commit e927e62d8e370ebfc0d702fec22bc752249ebcef upstream. The 32-bit #VC handler has no GHCB and can only handle CPUID exit codes. It is needed by the early boot code to handle #VC exceptions raised in verify_cpu() and to get the position of the C-bit. But the CPUID information comes from the hypervisor which is untrusted and might return results which trick the guest into the no-SEV boot path with no C-bit set in the page-tables. All data written to memory would then be unencrypted and could leak sensitive data to the hypervisor. Add sanity checks to the 32-bit boot #VC handler to make sure the hypervisor does not pretend that SEV is not enabled. Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20210312123824.306-7-joro@8bytes.org Signed-off-by: hanliyang --- arch/x86/boot/compressed/mem_encrypt.S | 28 ++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index 58a29fa70ed8..2aa751fee335 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -139,6 +139,26 @@ SYM_CODE_START(startup32_vc_handler) jnz .Lfail movl %edx, 0(%esp) # Store result + /* + * Sanity check CPUID results from the Hypervisor. See comment in + * do_vc_no_ghcb() for more details on why this is necessary. + */ + + /* Fail if SEV leaf not available in CPUID[0x80000000].EAX */ + cmpl $0x80000000, %ebx + jne .Lcheck_sev + cmpl $0x8000001f, 12(%esp) + jb .Lfail + jmp .Ldone + +.Lcheck_sev: + /* Fail if SEV bit not set in CPUID[0x8000001f].EAX[1] */ + cmpl $0x8000001f, %ebx + jne .Ldone + btl $1, 12(%esp) + jnc .Lfail + +.Ldone: popl %edx popl %ecx popl %ebx @@ -152,6 +172,14 @@ SYM_CODE_START(startup32_vc_handler) iret .Lfail: + /* Send terminate request to Hypervisor */ + movl $0x100, %eax + xorl %edx, %edx + movl $MSR_AMD64_SEV_ES_GHCB, %ecx + wrmsr + rep; vmmcall + + /* If request fails, go to hlt loop */ hlt jmp .Lfail SYM_CODE_END(startup32_vc_handler) -- Gitee From f63407e2c08983e266dd8ad51d0f3c3e427374cc Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 20 Aug 2021 14:57:03 +0200 Subject: [PATCH 5/5] x86/efi: Restore Firmware IDT before calling ExitBootServices() ANBZ: #5983 commit 22aa45cb465be474e97666b3f7587ccb06ee411b upstream. Commit 79419e13e808 ("x86/boot/compressed/64: Setup IDT in startup_32 boot path") introduced an IDT into the 32-bit boot path of the decompressor stub. But the IDT is set up before ExitBootServices() is called, and some UEFI firmwares rely on their own IDT. Save the firmware IDT on boot and restore it before calling into EFI functions to fix boot failures introduced by above commit. Fixes: 79419e13e808 ("x86/boot/compressed/64: Setup IDT in startup_32 boot path") Reported-by: Fabio Aiuto Signed-off-by: Joerg Roedel Signed-off-by: Borislav Petkov Acked-by: Ard Biesheuvel Cc: stable@vger.kernel.org # 5.13+ Link: https://lkml.kernel.org/r/20210820125703.32410-1-joro@8bytes.org [ hanliyang: Fix comments in arch/x86/boot/compressed/efi_thunk_64.S ] Signed-off-by: hanliyang --- arch/x86/boot/compressed/efi_thunk_64.S | 30 +++++++++++++++++-------- arch/x86/boot/compressed/head_64.S | 3 +++ 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S index c8052a141b08..70052779b235 100644 --- a/arch/x86/boot/compressed/efi_thunk_64.S +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -5,9 +5,8 @@ * Early support for invoking 32-bit EFI services from a 64-bit kernel. * * Because this thunking occurs before ExitBootServices() we have to - * restore the firmware's 32-bit GDT before we make EFI serivce calls, - * since the firmware's 32-bit IDT is still currently installed and it - * needs to be able to service interrupts. + * restore the firmware's 32-bit GDT and IDT before we make EFI service + * calls. * * On the plus side, we don't have to worry about mangling 64-bit * addresses into 32-bits because we're executing with an identity @@ -39,7 +38,7 @@ SYM_FUNC_START(__efi64_thunk) /* * Convert x86-64 ABI params to i386 ABI */ - subq $32, %rsp + subq $64, %rsp movl %esi, 0x0(%rsp) movl %edx, 0x4(%rsp) movl %ecx, 0x8(%rsp) @@ -49,14 +48,19 @@ SYM_FUNC_START(__efi64_thunk) leaq 0x14(%rsp), %rbx sgdt (%rbx) + addq $16, %rbx + sidt (%rbx) + /* - * Switch to gdt with 32-bit segments. This is the firmware GDT - * that was installed when the kernel started executing. This - * pointer was saved at the EFI stub entry point in head_64.S. + * Switch to IDT and GDT with 32-bit segments. This is the firmware GDT + * and IDT that was installed when the kernel started executing. The + * pointers were saved at the EFI stub entry point in head_64.S. * * Pass the saved DS selector to the 32-bit code, and use far return to * restore the saved CS selector. */ + leaq efi32_boot_idt(%rip), %rax + lidt (%rax) leaq efi32_boot_gdt(%rip), %rax lgdt (%rax) @@ -67,7 +71,7 @@ SYM_FUNC_START(__efi64_thunk) pushq %rax lretq -1: addq $32, %rsp +1: addq $64, %rsp movq %rdi, %rax pop %rbx @@ -128,10 +132,13 @@ SYM_FUNC_START_LOCAL(efi_enter32) /* * Some firmware will return with interrupts enabled. Be sure to - * disable them before we switch GDTs. + * disable them before we switch GDTs and IDTs. */ cli + lidtl (%ebx) + subl $16, %ebx + lgdtl (%ebx) movl %cr4, %eax @@ -166,6 +173,11 @@ SYM_DATA_START(efi32_boot_gdt) .quad 0 SYM_DATA_END(efi32_boot_gdt) +SYM_DATA_START(efi32_boot_idt) + .word 0 + .quad 0 +SYM_DATA_END(efi32_boot_idt) + SYM_DATA_START(efi32_boot_cs) .word 0 SYM_DATA_END(efi32_boot_cs) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 5b57f374bc5c..527ecaa8c27e 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -319,6 +319,9 @@ SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL) movw %cs, rva(efi32_boot_cs)(%ebp) movw %ds, rva(efi32_boot_ds)(%ebp) + /* Store firmware IDT descriptor */ + sidtl rva(efi32_boot_idt)(%ebp) + /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax -- Gitee