diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 47dafd6ab3a30abd5aef575a34950abca498295f..356c2d28b6b6e62ee09aedd119a81084b21521b6 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -123,7 +123,7 @@ void stage2_unmap_vm(struct kvm *kvm); int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size, bool writable); + phys_addr_t pa, unsigned long size, bool writable, bool writecombine); int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 52ab38db04c7ed0c95c460a65606fadbd4a68719..3b5a2e8e6e3b15e62de72693f94505788929c9d7 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -42,6 +42,7 @@ enum kvm_pgtable_prot { KVM_PGTABLE_PROT_R = BIT(2), KVM_PGTABLE_PROT_DEVICE = BIT(3), + KVM_PGTABLE_PROT_DEVICE_VGA = BIT(4), }; #define PAGE_HYP (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 505bdd75b5411258be7d70951c25371abb4e1ab5..50ac48f865f6701610cfb8bd0f44e83316d56f7f 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -143,6 +143,7 @@ * Memory types for Stage-2 translation */ #define MT_S2_NORMAL 0xf +#define MT_S2_NORMAL_NC 0x5 #define MT_S2_DEVICE_nGnRE 0x1 /* @@ -150,6 +151,7 @@ * Stage-2 enforces Normal-WB and Device-nGnRE */ #define MT_S2_FWB_NORMAL 6 +#define MT_S2_FWB_NORMAL_NC 5 #define MT_S2_FWB_DEVICE_nGnRE 1 #ifdef CONFIG_ARM64_4K_PAGES diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 4d99d07c610c8940515492c209eada3cae63564b..d298df1e23f6adaa1177853c45fd1b6c06f55d87 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -170,6 +170,38 @@ static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp) smp_store_release(ptep, pte); } +static bool stage2_pte_cacheable(kvm_pte_t pte) +{ + u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR; + return memattr == PAGE_S2_MEMATTR(NORMAL); +} + +static void stage2_flush_dcache(void *addr, u64 size) +{ + if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + return; + + __flush_dcache_area(addr, size); +} + +static bool stage2_pte_executable(kvm_pte_t pte) +{ + return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN); +} + +static void stage2_invalidate_icache(kvm_pfn_t pfn, unsigned long size) +{ + if (icache_is_aliasing()) { + /* any kind of VIPT cache */ + __flush_icache_all(); + } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { + /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ + void *va = page_address(pfn_to_page(pfn)); + + invalidate_icache_range((unsigned long)va, (unsigned long)va + size); + } +} + static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr, u32 level) { @@ -185,6 +217,13 @@ static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr, if (kvm_pte_valid(old)) return old == pte; + /* Flush data cache before installation of the new PTE */ + if (stage2_pte_cacheable(pte)) + stage2_flush_dcache(kvm_pte_follow(pte), kvm_granule_size(level)); + + if (stage2_pte_executable(pte)) + stage2_invalidate_icache(__phys_to_pfn(pa), kvm_granule_size(level)); + smp_store_release(ptep, pte); return true; } @@ -440,9 +479,15 @@ struct stage2_map_data { static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot, struct stage2_map_data *data) { - bool device = prot & KVM_PGTABLE_PROT_DEVICE; - kvm_pte_t attr = device ? PAGE_S2_MEMATTR(DEVICE_nGnRE) : - PAGE_S2_MEMATTR(NORMAL); + bool device = (prot & KVM_PGTABLE_PROT_DEVICE_VGA) || (prot & KVM_PGTABLE_PROT_DEVICE); + kvm_pte_t attr; + + if (prot & KVM_PGTABLE_PROT_DEVICE_VGA) + attr = PAGE_S2_MEMATTR(NORMAL_NC); + else if (prot & KVM_PGTABLE_PROT_DEVICE) + attr = PAGE_S2_MEMATTR(DEVICE_nGnRE); + else + attr = PAGE_S2_MEMATTR(NORMAL); u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS; if (!(prot & KVM_PGTABLE_PROT_X)) @@ -641,20 +686,6 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, return ret; } -static void stage2_flush_dcache(void *addr, u64 size) -{ - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) - return; - - __flush_dcache_area(addr, size); -} - -static bool stage2_pte_cacheable(kvm_pte_t pte) -{ - u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR; - return memattr == PAGE_S2_MEMATTR(NORMAL); -} - static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 26068456ec0f3b56b5ccf703d166223389f33c6c..09feab947a2a2d7b676eb6f6f4b96212490b3163 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -487,6 +487,13 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) } } +static inline bool is_vma_write_combine(struct vm_area_struct *vma) +{ + pteval_t pteval = pgprot_val(vma->vm_page_prot); + + return ((pteval & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC)); +} + /** * kvm_phys_addr_ioremap - map a device range to guest IPA * @@ -497,13 +504,14 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) * @writable: Whether or not to create a writable mapping */ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size, bool writable) + phys_addr_t pa, unsigned long size, bool writable, bool writecombine) { phys_addr_t addr; int ret = 0; struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, }; struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; - enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE | + enum kvm_pgtable_prot prot = + (writecombine ? KVM_PGTABLE_PROT_DEVICE_VGA : KVM_PGTABLE_PROT_DEVICE) | KVM_PGTABLE_PROT_R | (writable ? KVM_PGTABLE_PROT_W : 0); @@ -609,16 +617,6 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); } -static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) -{ - __clean_dcache_guest_page(pfn, size); -} - -static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) -{ - __invalidate_icache_guest_page(pfn, size); -} - static void kvm_send_hwpoison_signal(unsigned long address, short lsb) { send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); @@ -885,12 +883,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, mark_page_dirty(kvm, gfn); } - if (fault_status != FSC_PERM && !device) - clean_dcache_guest_page(pfn, vma_pagesize); - if (exec_fault) { prot |= KVM_PGTABLE_PROT_X; - invalidate_icache_guest_page(pfn, vma_pagesize); } if (device) @@ -1142,9 +1136,9 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) /* * We've moved a page around, probably through CoW, so let's treat it - * just like a translation fault and clean the cache to the PoC. + * just like a translation fault and the map handler will clean the + * cache to the PoC. */ - clean_dcache_guest_page(pfn, PAGE_SIZE); handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn); return 0; } @@ -1354,7 +1348,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ret = kvm_phys_addr_ioremap(kvm, gpa, pa, vm_end - vm_start, - writable); + writable, is_vma_write_combine(vma)); if (ret) break; } diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c index ebf53a4e129630fef87b018021eb5f393baca16a..5d6492fa19baeed530346a4b6afba9c76e319b5e 100644 --- a/arch/arm64/kvm/vgic/vgic-v2.c +++ b/arch/arm64/kvm/vgic/vgic-v2.c @@ -341,7 +341,7 @@ int vgic_v2_map_resources(struct kvm *kvm) if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, kvm_vgic_global_state.vcpu_base, - KVM_VGIC_V2_CPU_SIZE, true); + KVM_VGIC_V2_CPU_SIZE, true, false); if (ret) { kvm_err("Unable to remap VGIC CPU to VCPU\n"); goto out; diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 57ae8b46b836156607e7514e44664cfbb7f5e231..40ce1ff36679fe08bd5194bb7312c7327671ba68 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1718,7 +1718,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) } vma->vm_private_data = vdev; +#ifdef CONFIG_ARM64 + if (vfio_pci_is_vga(pdev)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +#else vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +#endif vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; /*