diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 660b14ce13179d4d5c0e7220490bf38ec735340e..12c120e436a2444facf0e300ec142f28f696e5b0 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -241,8 +241,10 @@ pmd_page_vaddr(pmd_t pmd) #define pud_page(pud) (mem_map + ((pud_val(pud) & _PFN_MASK) >> 32)) #endif -extern inline unsigned long pud_page_vaddr(pud_t pgd) -{ return PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); } +extern inline pmd_t *pud_pgtable(pud_t pgd) +{ + return (pmd_t *)(PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT))); +} extern inline int pte_none(pte_t pte) { return !pte_val(pte); } extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } @@ -292,7 +294,7 @@ extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; retu /* Find an entry in the second-level page table.. */ extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address) { - pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); + pmd_t *ret = pud_pgtable(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); smp_rmb(); /* see above */ return ret; } diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index d4edab51a77c075d3914e3c18d9ba1a578bec19f..eabe72ff7381522c6725a3e51bcf50d30362f6ee 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -130,7 +130,7 @@ flush_pmd_entry(pudp); \ } while (0) -static inline pmd_t *pud_page_vaddr(pud_t pud) +static inline pmd_t *pud_pgtable(pud_t pud) { return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); } diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 3f74db7b0a31dcbeac8fd02f5eb7d41fb8732320..3635d48ada17de4474301029413bbd5dada93670 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -633,9 +633,9 @@ static inline phys_addr_t pud_page_paddr(pud_t pud) return __pud_to_phys(pud); } -static inline unsigned long pud_page_vaddr(pud_t pud) +static inline pmd_t *pud_pgtable(pud_t pud) { - return (unsigned long)__va(pud_page_paddr(pud)); + return (pmd_t *)__va(pud_page_paddr(pud)); } /* Find an entry in the second-level page table. */ diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 9f64fdfbf275056f1da315d5614cec2af6267364..fd92792d148b463e595acff832078c4f1b3096e9 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -279,7 +279,7 @@ extern unsigned long VMALLOC_END; #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) #define pud_present(pud) (pud_val(pud) != 0UL) #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) +#define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & _PFN_MASK)) #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) #if CONFIG_PGTABLE_LEVELS == 4 diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 6e591514d7e3cfbfbd38ec230702f9305dfbd5b4..34eff7c52582d8a89bbe5bdfb320c9536836c0a1 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -130,7 +130,6 @@ config LOONGARCH select TRACE_IRQFLAGS_SUPPORT select USER_STACKTRACE_SUPPORT select ZONE_DMA32 - select ARCH_HAS_RELATIVE_EXTABLE config 32BIT bool diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index c82e52606c1d3ea4ec88b5e25b6b07c958a28602..20a01c628704b759d5548e409408a6096698b157 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -51,9 +51,6 @@ CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_TEST=m CONFIG_VIRTUALIZATION=y CONFIG_KVM=m -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y @@ -71,13 +68,27 @@ CONFIG_ZBUD=y CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=m CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_NET_KEY=y +CONFIG_PACKET=m +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=m +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y @@ -86,15 +97,61 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m CONFIG_INET_ESP=m -CONFIG_INET_UDP_DIAG=y +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y CONFIG_BRIDGE_NETFILTER=m @@ -124,6 +181,7 @@ CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_NETDEV=y CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m CONFIG_NFT_COUNTER=m CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m @@ -139,12 +197,16 @@ CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -177,7 +239,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m @@ -209,15 +270,49 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y CONFIG_IP_VS_RR=m -CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m @@ -242,7 +337,8 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_FIB_IPV6=m -CONFIG_IP6_NF_IPTABLES=y +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -253,7 +349,8 @@ CONFIG_IP6_NF_MATCH_MH=m CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_MATCH_SRH=m -CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m @@ -262,53 +359,198 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_DECNET_NF_GRABULATOR=m CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_NF_CONNTRACK_BRIDGE=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m CONFIG_BRIDGE_EBT_ARP=m CONFIG_BRIDGE_EBT_IP=m CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y CONFIG_IP_SCTP=m -CONFIG_RDS=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m CONFIG_BRIDGE=m -CONFIG_VLAN_8021Q=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y +CONFIG_DECNET=m +CONFIG_DECNET_ROUTER=y +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y CONFIG_NET_CLS_RSVP=m CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_CGROUP=m CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_FLOWER=m CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y CONFIG_DCB=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y +CONFIG_BATMAN_ADV_SYSFS=y CONFIG_OPENVSWITCH=m CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m -CONFIG_NETLINK_DIAG=y +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m CONFIG_BT=m CONFIG_BT_HCIBTUSB=m # CONFIG_BT_HCIBTUSB_BCM is not set @@ -434,7 +676,7 @@ CONFIG_WIREGUARD=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m -CONFIG_VXLAN=y +CONFIG_VXLAN=m CONFIG_RIONET=m CONFIG_TUN=m CONFIG_VETH=m @@ -800,7 +1042,6 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_DEFLATE=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index 3e0c3e4e57aeafeef53cdcd8e2f53ca531721b1e..431c791df515fc795a9e55cc24a828bd1b6bd020 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -5,7 +5,6 @@ generic-y += mcs_spinlock.h generic-y += parport.h generic-y += early_ioremap.h generic-y += qrwlock.h -generic-y += qspinlock.h generic-y += rwsem.h generic-y += segment.h generic-y += user.h diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h index fed68810a4991f24f19def87b1dbab9e54fe81ea..0974f961163ed73362503902d2bed66a4d0c1d0f 100644 --- a/arch/loongarch/include/asm/atomic.h +++ b/arch/loongarch/include/asm/atomic.h @@ -156,30 +156,30 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) if (__builtin_constant_p(i)) { __asm__ __volatile__( - "1: ll.w %1, %2 # atomic_sub_if_positive\n" - " addi.w %0, %1, %3 \n" - " move %1, %0 \n" - " bltz %0, 2f \n" - " sc.w %1, %2 \n" - " beqz %1, 1b \n" - " b 3f " - "2: \n" + "1: ll.w %1, %2\n" + " addi.w %0, %1, %3\n" + " move %1, %0\n" + " bltz %0, 2f\n" + " sc.w %1, %2\n" + " beqz %1, 1b\n" + " b 3f\n" + "2:\n" __WEAK_LLSC_MB - "3: " + "3:\n" : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "I" (-i)); } else { __asm__ __volatile__( - "1: ll.w %1, %2 # atomic_sub_if_positive\n" - " sub.w %0, %1, %3 \n" - " move %1, %0 \n" - " bltz %0, 2f \n" - " sc.w %1, %2 \n" - " beqz %1, 1b \n" - " b 3f " - "2: \n" + "1: ll.w %1, %2\n" + " sub.w %0, %1, %3\n" + " move %1, %0\n" + " bltz %0, 2f\n" + " sc.w %1, %2\n" + " beqz %1, 1b\n" + " b 3f\n" + "2:\n" __WEAK_LLSC_MB - "3: " + "3:\n" : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "r" (i)); } @@ -321,30 +321,30 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) if (__builtin_constant_p(i)) { __asm__ __volatile__( - "1: ll.d %1, %2 # atomic64_sub_if_positive \n" - " addi.d %0, %1, %3 \n" - " move %1, %0 \n" - " bltz %0, 2f \n" - " sc.d %1, %2 \n" - " beqz %1, 1b \n" - " b 3f " - "2: \n" + "1: ll.d %1, %2\n" + " addi.d %0, %1, %3\n" + " move %1, %0\n" + " bltz %0, 2f\n" + " sc.d %1, %2\n" + " beqz %1, 1b\n" + " b 3f\n" + "2:\n" __WEAK_LLSC_MB - "3: " + "3:\n" : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "I" (-i)); } else { __asm__ __volatile__( - "1: ll.d %1, %2 # atomic64_sub_if_positive \n" - " sub.d %0, %1, %3 \n" - " move %1, %0 \n" - " bltz %0, 2f \n" - " sc.d %1, %2 \n" - " beqz %1, 1b \n" - " b 3f " - "2: \n" + "1: ll.d %1, %2\n" + " sub.d %0, %1, %3\n" + " move %1, %0\n" + " bltz %0, 2f\n" + " sc.d %1, %2\n" + " beqz %1, 1b\n" + " b 3f\n" + "2:\n" __WEAK_LLSC_MB - "3: " + "3:\n" : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "r" (i)); } diff --git a/arch/loongarch/include/asm/barrier.h b/arch/loongarch/include/asm/barrier.h index c1256e02a2bb0d99a5e72014480461410468bb0e..4b663f19770611b7cb5d1d0aa88a4a4de1d9b48a 100644 --- a/arch/loongarch/include/asm/barrier.h +++ b/arch/loongarch/include/asm/barrier.h @@ -5,28 +5,56 @@ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H -#define __sync() __asm__ __volatile__("dbar 0" : : : "memory") +/* + * Hint encoding: + * + * Bit4: ordering or completion (0: completion, 1: ordering) + * Bit3: barrier for previous read (0: true, 1: false) + * Bit2: barrier for previous write (0: true, 1: false) + * Bit1: barrier for succeeding read (0: true, 1: false) + * Bit0: barrier for succeeding write (0: true, 1: false) + * + * Hint 0x700: barrier for "read after read" from the same address + */ + +#define DBAR(hint) __asm__ __volatile__("dbar %0 " : : "I"(hint) : "memory") + +#define crwrw 0b00000 +#define cr_r_ 0b00101 +#define c_w_w 0b01010 -#define fast_wmb() __sync() -#define fast_rmb() __sync() -#define fast_mb() __sync() -#define fast_iob() __sync() -#define wbflush() __sync() +#define orwrw 0b10000 +#define or_r_ 0b10101 +#define o_w_w 0b11010 -#define wmb() fast_wmb() -#define rmb() fast_rmb() -#define mb() fast_mb() -#define iob() fast_iob() +#define orw_w 0b10010 +#define or_rw 0b10100 -/*Memory barrier for multiprocessors*/ -#define __smp_mb() __asm__ __volatile__("dbar 0" : : : "memory") -#define __smp_rmb() __asm__ __volatile__("dbar 0" : : : "memory") -#define __smp_wmb() (__asm__ __volatile__("dbar 0" : : : "memory")) +#define c_sync() DBAR(crwrw) +#define c_rsync() DBAR(cr_r_) +#define c_wsync() DBAR(c_w_w) + +#define o_sync() DBAR(orwrw) +#define o_rsync() DBAR(or_r_) +#define o_wsync() DBAR(o_w_w) + +#define ldacq_mb() DBAR(or_rw) +#define strel_mb() DBAR(orw_w) + +#define mb() c_sync() +#define rmb() c_rsync() +#define wmb() c_wsync() +#define iob() c_sync() +#define wbflush() c_sync() + +#define __smp_mb() o_sync() +#define __smp_rmb() o_rsync() +#define __smp_wmb() o_wsync() #ifdef CONFIG_SMP -#define __WEAK_LLSC_MB " dbar 0 \n" +#define __WEAK_LLSC_MB " dbar 0x700 \n" #else -#define __WEAK_LLSC_MB " \n" +#define __WEAK_LLSC_MB " \n" #endif #define __smp_mb__before_atomic() barrier() @@ -60,68 +88,19 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, return mask; } -#define __smp_load_acquire(p) \ -({ \ - union { typeof(*p) __val; char __c[1]; } __u; \ - unsigned long __tmp = 0; \ - compiletime_assert_atomic_type(*p); \ - switch (sizeof(*p)) { \ - case 1: \ - *(__u8 *)__u.__c = *(volatile __u8 *)p; \ - __smp_mb(); \ - break; \ - case 2: \ - *(__u16 *)__u.__c = *(volatile __u16 *)p; \ - __smp_mb(); \ - break; \ - case 4: \ - __asm__ __volatile__( \ - "amor_db.w %[val], %[tmp], %[mem] \n" \ - : [val] "=&r" (*(__u32 *)__u.__c) \ - : [mem] "ZB" (*(u32 *) p), [tmp] "r" (__tmp) \ - : "memory"); \ - break; \ - case 8: \ - __asm__ __volatile__( \ - "amor_db.d %[val], %[tmp], %[mem] \n" \ - : [val] "=&r" (*(__u64 *)__u.__c) \ - : [mem] "ZB" (*(u64 *) p), [tmp] "r" (__tmp) \ - : "memory"); \ - break; \ - } \ - (typeof(*p))__u.__val; \ +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + ldacq_mb(); \ + ___p1; \ }) -#define __smp_store_release(p, v) \ -do { \ - union { typeof(*p) __val; char __c[1]; } __u = \ - { .__val = (__force typeof(*p)) (v) }; \ - unsigned long __tmp; \ - compiletime_assert_atomic_type(*p); \ - switch (sizeof(*p)) { \ - case 1: \ - __smp_mb(); \ - *(volatile __u8 *)p = *(__u8 *)__u.__c; \ - break; \ - case 2: \ - __smp_mb(); \ - *(volatile __u16 *)p = *(__u16 *)__u.__c; \ - break; \ - case 4: \ - __asm__ __volatile__( \ - "amswap_db.w %[tmp], %[val], %[mem] \n" \ - : [mem] "+ZB" (*(u32 *)p), [tmp] "=&r" (__tmp) \ - : [val] "r" (*(__u32 *)__u.__c) \ - : ); \ - break; \ - case 8: \ - __asm__ __volatile__( \ - "amswap_db.d %[tmp], %[val], %[mem] \n" \ - : [mem] "+ZB" (*(u64 *)p), [tmp] "=&r" (__tmp) \ - : [val] "r" (*(__u64 *)__u.__c) \ - : ); \ - break; \ - } \ +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + strel_mb(); \ + WRITE_ONCE(*p, v); \ } while (0) #define __smp_store_mb(p, v) \ diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h index c2b3472d49ec73e89995f4033b9954ca7c578a8d..b3eb2533e9348fab6d033bdf1b5b9ef07627ff5f 100644 --- a/arch/loongarch/include/asm/cmpxchg.h +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -102,10 +102,10 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, " move $t0, %z4 \n" \ " " st " $t0, %1 \n" \ " beqz $t0, 1b \n" \ - " b 3f " \ + " b 3f\n" \ "2: \n" \ __WEAK_LLSC_MB \ - "3: " \ + "3:\n" \ : "=&r" (__ret), "=ZB"(*m) \ : "ZB"(*m), "Jr" (old), "Jr" (new) \ : "t0", "memory"); \ diff --git a/arch/loongarch/include/asm/extable.h b/arch/loongarch/include/asm/extable.h index 339122c9d614e36683ecb8aa78fe140914417827..6499c9f0640e0c47aadc8d8520bc6c77eac908da 100644 --- a/arch/loongarch/include/asm/extable.h +++ b/arch/loongarch/include/asm/extable.h @@ -15,6 +15,8 @@ * on our cache or tlb entries. */ +#define ARCH_HAS_RELATIVE_EXTABLE + struct exception_table_entry { int insn, fixup; short type, data; diff --git a/arch/loongarch/include/asm/futex.h b/arch/loongarch/include/asm/futex.h index f0e1a3e63f79ef23920c62f27351fbb1fbe6746b..8c619d71d2adcec2ac26555353e750bcdc15be58 100644 --- a/arch/loongarch/include/asm/futex.h +++ b/arch/loongarch/include/asm/futex.h @@ -78,10 +78,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv " move $t0, %z5 \n" "2: sc.w $t0, %2 \n" " beqz $t0, 1b \n" - " b 5f " + " b 5f\n" "3: \n" __WEAK_LLSC_MB - "5: " + "5:\n" _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) : "+r" (ret), "=&r" (val), "=ZC" (*uaddr) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 803bdeea8823879a4afa14866460ccd7d22415a5..8626f9da0f36acaea7e1da06975618763e979ca1 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -183,6 +183,7 @@ union loongarch_instruction { struct reg3_format reg3_format; struct reg2_format reg2_format; struct reg2i12_format reg2i12_format; + struct reg2i14_format reg2i14_format; struct reg2i16_format reg2i16_format; struct reg2csr_format reg2csr_format; struct reg0i15_format reg0i15_format; diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index 5207da7c2a822f5fa50a5865de79b91aa186ba95..593641b79e71355166c4509632cd54b0c969dc74 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -60,7 +60,7 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, #define ioremap_cache(offset, size) \ ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) -#define mmiowb() asm volatile ("dbar 0" ::: "memory") +#define mmiowb() wmb() /* * String version of I/O memory access operations. diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index f3e3ba1cc843177ee45f187fa56c28c5ebbda875..31be19474288cea85f9938740f8d82467b658716 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -53,6 +53,7 @@ struct acpi_vector_group { extern struct acpi_vector_group pch_group[MAX_IO_PICS]; extern struct acpi_vector_group msi_group[MAX_IO_PICS]; +#define MAX_CORES_PER_EIO_NODE 256 #define CORES_PER_EIO_NODE 4 #define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index e82119198ce3c56848182d2fc51dc53e0d9ed114..7ee205d694499cc7cdd1fc19fc8b693f58994f76 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -275,6 +275,11 @@ static __always_inline u64 iocsr_read64(u32 reg) return __iocsrrd_d(reg); } +static __always_inline void iocsr_write8(u8 val, u32 reg) +{ + __iocsrwr_b(val, reg); +} + static __always_inline void iocsr_write32(u32 val, u32 reg) { __iocsrwr_w(val, reg); diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h new file mode 100644 index 0000000000000000000000000000000000000000..5a447f06dbdc963203ee58e77cbb6f6c782a450a --- /dev/null +++ b/arch/loongarch/include/asm/paravirt.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_PARAVIRT_H +#define _ASM_LOONGARCH_PARAVIRT_H +#include + +#ifdef CONFIG_PARAVIRT +static inline bool kvm_para_available(void) +{ + return true; +} +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +struct pv_time_ops { + unsigned long long (*steal_clock)(int cpu); +}; +struct kvm_steal_time { + __u64 steal; + __u32 version; + __u32 flags; + __u32 pad[12]; +}; +extern struct pv_time_ops pv_time_ops; + +static inline u64 paravirt_steal_clock(int cpu) +{ + return pv_time_ops.steal_clock(cpu); +} + +static inline bool pv_feature_support(int feature) +{ + return kvm_hypercall1(KVM_HC_FUNC_FEATURE, feature) == KVM_RET_SUC; +} +static inline void pv_notify_host(int feature, unsigned long data) +{ + kvm_hypercall2(KVM_HC_FUNC_NOTIFY, feature, data); +} + +int __init pv_time_init(void); +int __init pv_ipi_init(void); +#else +static inline bool kvm_para_available(void) +{ + return false; +} + +static inline int pv_time_init(void) +{ + return 0; +} + +static inline int pv_ipi_init(void) +{ + return 0; +} +#endif +#endif /* _ASM_LOONGARCH_PARAVIRT_H */ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index b56a476bc5480baae6b01cc5976ec031604fbbba..b2033e8b1d32691d399319e3fe9c16034b8bf7f6 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -298,10 +298,10 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) " or %[tmp], %[tmp], %[global] \n" __SC "%[tmp], %[buddy] \n" " beqz %[tmp], 1b \n" - " b 3f " + " b 3f\n" "2: \n" __WEAK_LLSC_MB - "3: " + "3:\n" : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [global] "r" (page_global)); #else /* !CONFIG_SMP */ diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h new file mode 100644 index 0000000000000000000000000000000000000000..34f43f8ad5912b657d86c96280ca1e6f104f81c9 --- /dev/null +++ b/arch/loongarch/include/asm/qspinlock.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_QSPINLOCK_H +#define _ASM_QSPINLOCK_H + +#include + +#define queued_spin_unlock queued_spin_unlock + +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + compiletime_assert_atomic_type(lock->locked); + c_sync(); + WRITE_ONCE(lock->locked, 0); +} + +#include + +#endif /* _ASM_QSPINLOCK_H */ diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 9ba3567d268bfb77ac96c860f027121c2fb2ca30..177dda325ef42ddde2e42c2a3a880b1c6b227d34 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -28,6 +28,7 @@ #include #include "legacy_boot.h" +static unsigned long screen_info_table __initdata = EFI_INVALID_TABLE_ADDR; static unsigned long new_memmap __initdata = EFI_INVALID_TABLE_ADDR; static unsigned long initrd __initdata = EFI_INVALID_TABLE_ADDR; @@ -36,12 +37,32 @@ static unsigned long efi_config_table; static efi_system_table_t *efi_systab; static efi_config_table_type_t arch_tables[] __initdata = { + {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table, NULL}, {LINUX_EFI_NEW_MEMMAP_GUID, &new_memmap, "NEWMEM"}, {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD"}, {}, }; static __initdata pgd_t *pgd_efi; +static void __init init_screen_info(void) +{ + struct screen_info *si; + + if (screen_info_table != EFI_INVALID_TABLE_ADDR) { + si = early_memremap_ro(screen_info_table, sizeof(*si)); + if (!si) { + pr_err("Could not map screen_info config table\n"); + return; + } + screen_info = *si; + memset(si, 0, sizeof(*si)); + early_memunmap(si, sizeof(*si)); + } + + if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) + memblock_reserve(screen_info.lfb_base, screen_info.lfb_size); +} + static int __init efimap_populate_hugepages( unsigned long start, unsigned long end, pgprot_t prot) @@ -293,6 +314,5 @@ void __init loongson_efi_init(void) init_new_memmap(); - if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) - memblock_reserve(screen_info.lfb_base, screen_info.lfb_size); + init_screen_info(); } diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index f55910ab85038864808326db9846f8b2f6a39e06..41e6d20037539a9460be4ac72f4fb7da91d20e5b 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -106,6 +106,14 @@ void __init init_IRQ(void) unsigned int order = get_order(IRQ_STACK_SIZE); struct page *page; + u64 node; + + if (!acpi_gbl_reduced_hardware) + for_each_node(node) + writel(0x40000000 | (node << 12), + (void __iomem *)(((node << 44) + | 0x80000EFDFB000000ULL) + 0x274)); + clear_csr_ecfg(ECFG0_IM); clear_csr_estat(ESTATF_IP); diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c index 9c2aaf6317c6589527a0dbb52bc58643a826ac9a..5b750d946fa978fd5c9ff18d46ce691754631e49 100644 --- a/arch/loongarch/kernel/legacy_boot.c +++ b/arch/loongarch/kernel/legacy_boot.c @@ -113,13 +113,15 @@ static int bad_pch_pic(unsigned long address) void register_default_pic(int id, u32 address, u32 irq_base) { - int idx, entries; + int j, idx, entries, cores; unsigned long addr; + u64 node_map = 0; if (bad_pch_pic(address)) return; idx = nr_io_pics; + cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); pchpic_default[idx].address = address; if (idx) @@ -138,14 +140,29 @@ void register_default_pic(int id, u32 address, u32 irq_base) pchmsi_default[idx].start = entries; pchmsi_default[idx].count = MSI_MSG_DEFAULT_COUNT; - eiointc_default[idx].cascade = 3; + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + node_map |= (1 << node); + } + eiointc_default[idx].cascade = 3 + idx; eiointc_default[idx].node = id; - eiointc_default[idx].node_map = 1; + eiointc_default[idx].node_map = node_map; if (idx) { - eiointc_default[idx].cascade = 0x4; - eiointc_default[0].node_map = 0x1DF; - eiointc_default[idx].node_map = 0xFE20; + int i; + + for (i = 0; i < idx + 1; i++) { + node_map = 0; + + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + if (((node & 7) < 4) ? !i : i) + node_map |= (1 << node); + } + eiointc_default[i].node_map = node_map; + } } acpi_pchpic[idx] = &pchpic_default[idx]; @@ -204,7 +221,7 @@ static int acpi_parse_madt_pch_pic_entries(void) return 0; } -int legacy_madt_table_init(void) +int __init legacy_madt_table_init(void) { int error; @@ -278,7 +295,7 @@ int setup_legacy_IRQ(void) pr_info("Pic domain error!\n"); return -1; } - if (pic_domain) + if (pic_domain && !cpu_has_hypervisor) pch_lpc_acpi_init(pic_domain, acpi_pchlpc); return 0; @@ -387,7 +404,7 @@ static unsigned long init_initrd(void) } #endif -void fw_init_cmdline(unsigned long argc, unsigned long cmdp) +void __init fw_init_cmdline(unsigned long argc, unsigned long cmdp) { int i; char **_fw_argv; @@ -491,16 +508,6 @@ unsigned int bpi_init(void) return list_find(efi_bp); } -static void register_addrs_set(u64 *registers, const u64 addr, int num) -{ - u64 i; - - for (i = 0; i < num; i++) { - *registers = (i << 44) | addr; - registers++; - } -} - static int get_bpi_version(u64 *signature) { u8 data[9]; @@ -521,7 +528,7 @@ static void __init parse_bpi_flags(void) clear_bit(EFI_BOOT, &efi.flags); } -unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigned long bpi) +unsigned long __init legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigned long bpi) { int ret; @@ -530,9 +537,12 @@ unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigne efi_bp = (struct boot_params *)bpi; bpi_version = get_bpi_version(&efi_bp->signature); pr_info("BPI%d with boot flags %llx.\n", bpi_version, efi_bp->flags); - if (bpi_version == BPI_VERSION_NONE) - panic("Fatal error, bpi ver BONE!\n"); - else if (bpi_version == BPI_VERSION_V2) + if (bpi_version == BPI_VERSION_NONE) { + if (cpu_has_hypervisor) + pr_err("Fatal error, bpi ver BONE!\n"); + else + panic("Fatal error, bpi ver BONE!\n"); + } else if (bpi_version == BPI_VERSION_V2) parse_bpi_flags(); fw_init_cmdline(argc, cmdptr); diff --git a/arch/loongarch/kernel/legacy_boot.h b/arch/loongarch/kernel/legacy_boot.h index 7b7ec8a013c5ef4a1c8eeca3f6d4c6d509baecb7..c3ba036020d8e5b931b9e2472576b6d06c99e309 100644 --- a/arch/loongarch/kernel/legacy_boot.h +++ b/arch/loongarch/kernel/legacy_boot.h @@ -66,7 +66,7 @@ struct loongsonlist_screeninfo { struct _extension_list_hdr header; struct screen_info si; }; -unsigned long legacy_boot_init(unsigned long argc, +unsigned long __init legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigned long bpi); extern int bpi_version; extern struct boot_params *efi_bp; @@ -75,7 +75,7 @@ extern int set_processor_mask(u32 id, u32 flags); extern int __init setup_legacy_IRQ(void); extern struct loongson_system_configuration loongson_sysconf; extern unsigned long long smp_group[MAX_PACKAGES]; -extern int legacy_madt_table_init(void); +extern int __init legacy_madt_table_init(void); extern struct pch_pic *pch_pic_priv[MAX_IO_PICS]; extern struct irq_domain *get_cpudomain(void); extern int __init cpuintc_acpi_init(union acpi_subtable_headers *header, diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c index 5bdb1fe8e0586c805b83878855b2ad6ba1c57d9e..f1d95ee107192ddf32b4670f28d9f01f71152ab1 100644 --- a/arch/loongarch/kernel/mem.c +++ b/arch/loongarch/kernel/mem.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -14,7 +15,7 @@ void __init memblock_init(void) { u32 i, mem_type; u64 mem_start, mem_end, mem_size; - efi_memory_desc_t *md; + if (g_mmap) { /* parse memory information */ for (i = 0; i < g_mmap->map_count; i++) { @@ -25,8 +26,8 @@ void __init memblock_init(void) switch (mem_type) { case ADDRESS_TYPE_SYSRAM: - pr_info("add memory region memblock - base: - %lx size: %x\n", mem_start, mem_size); + pr_info("add memory region memblock - base:%lx size: %x\n", + mem_start, mem_size); memblock_add(mem_start, mem_size); if (max_low_pfn < (mem_end >> PAGE_SHIFT)) max_low_pfn = mem_end >> PAGE_SHIFT; @@ -39,6 +40,9 @@ void __init memblock_init(void) __pa_symbol(&_end) - __pa_symbol(&_text)); return; } + + efi_memory_desc_t *md; + /* Parse memory information */ for_each_efi_memory_desc(md) { mem_type = md->type; diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index bee4194177fdda50814491d94470b8d92e5333fd..0738f7562782bf81165a8f47eac078fffc33f1f6 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 4b7b42cf19bdcf89c7ad8a8cf8d05b319cce774e..493f67763d841844b4d6ba6b49b9f40eca372645 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -119,7 +119,7 @@ static u32 ipi_read_clear(int cpu) action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS); /* Clear the ipi register to clear the interrupt */ iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR); - smp_mb(); + wbflush(); return action; } diff --git a/arch/loongarch/kvm/csr.c b/arch/loongarch/kvm/csr.c index ba9ac3c030f5814f714a5dfec383af2676d2b811..f01a83f816a075bf45a0fb8c2bb141cf3946d3a9 100644 --- a/arch/loongarch/kvm/csr.c +++ b/arch/loongarch/kvm/csr.c @@ -9,37 +9,48 @@ #include "kvmcpu.h" #include "intc/ls3a_ipi.h" #include "intc/ls3a_ext_irq.h" -#include "ls_irq.h" #include "kvm_compat.h" #include "kvmcsr.h" #include "irq.h" -static inline int CASE_READ_SW_GCSR(csr, regid, csrid) -{ - if (regid == csrid) - return kvm_read_sw_gcsr(csr, csrid); -} - unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) { struct loongarch_csrs *csr = vcpu->arch.csr; unsigned long val = 0; - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_ERRCTL); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO1); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO2); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRENTRY); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRERA); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_ERRSAVE); + switch (csrid) { + case KVM_CSR_ERRCTL: + return kvm_read_sw_gcsr(csr, KVM_CSR_ERRCTL); + case KVM_CSR_ERRINFO1: + return kvm_read_sw_gcsr(csr, KVM_CSR_ERRINFO1); + case KVM_CSR_ERRINFO2: + return kvm_read_sw_gcsr(csr, KVM_CSR_ERRINFO2); + case KVM_CSR_MERRENTRY: + return kvm_read_sw_gcsr(csr, KVM_CSR_MERRENTRY); + case KVM_CSR_MERRERA: + return kvm_read_sw_gcsr(csr, KVM_CSR_MERRERA); + case KVM_CSR_ERRSAVE: + return kvm_read_sw_gcsr(csr, KVM_CSR_ERRSAVE); /* read sw csr when not config pmu to guest */ - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL0); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL1); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL2); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL3); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR0); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR1); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR2); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR3); + case KVM_CSR_PERFCTRL0: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL0); + case KVM_CSR_PERFCTRL1: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL1); + case KVM_CSR_PERFCTRL2: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL2); + case KVM_CSR_PERFCTRL3: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL3); + case KVM_CSR_PERFCNTR0: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCNTR0); + case KVM_CSR_PERFCNTR1: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCNTR1); + case KVM_CSR_PERFCNTR2: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCNTR2); + case KVM_CSR_PERFCNTR3: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCNTR3); + default: + break; + } val = 0; if (csrid < 4096) @@ -50,37 +61,83 @@ unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) return val; } -static inline int CASE_WRITE_SW_GCSR(csr, regid, csrid, val) -{ - if (regid == csrid) { - kvm_write_sw_gcsr(csr, csrid, val); - return; - } -} - void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val) { struct loongarch_csrs *csr = vcpu->arch.csr; - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_ERRCTL, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO1, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO2, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRENTRY, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRERA, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_ERRSAVE, val); + switch (csrid) { + case KVM_CSR_ERRCTL: + return kvm_write_sw_gcsr(csr, KVM_CSR_ERRCTL, val); + case KVM_CSR_ERRINFO1: + return kvm_write_sw_gcsr(csr, KVM_CSR_ERRINFO1, val); + case KVM_CSR_ERRINFO2: + return kvm_write_sw_gcsr(csr, KVM_CSR_ERRINFO2, val); + case KVM_CSR_MERRENTRY: + return kvm_write_sw_gcsr(csr, KVM_CSR_MERRENTRY, val); + case KVM_CSR_MERRERA: + return kvm_write_sw_gcsr(csr, KVM_CSR_MERRERA, val); + case KVM_CSR_ERRSAVE: + return kvm_write_sw_gcsr(csr, KVM_CSR_ERRSAVE, val); + default: + break; + } /* give pmu register to guest when config perfctrl */ - CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL0, val); - CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL1, val); - CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL2, val); - CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL3, val); - /* write sw pmu csr if not config ctrl */ - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR0, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR1, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR2, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR3, val); + switch (csrid) { + case KVM_CSR_PERFCTRL0: + if (val & KVM_PMU_PLV_ENABLE) { + kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); + kvm_write_hw_gcsr(csr, KVM_CSR_PERFCTRL0, val | KVM_PERFCTRL_GMOD); + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + } else { + kvm_write_sw_gcsr(csr, KVM_CSR_PERFCTRL0, val); + } + return; + case KVM_CSR_PERFCTRL1: + if (val & KVM_PMU_PLV_ENABLE) { + kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); + kvm_write_hw_gcsr(csr, KVM_CSR_PERFCTRL1, val | KVM_PERFCTRL_GMOD); + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + } else { + kvm_write_sw_gcsr(csr, KVM_CSR_PERFCTRL1, val); + } + return; + case KVM_CSR_PERFCTRL2: + if (val & KVM_PMU_PLV_ENABLE) { + kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); + kvm_write_hw_gcsr(csr, KVM_CSR_PERFCTRL2, val | KVM_PERFCTRL_GMOD); + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + } else { + kvm_write_sw_gcsr(csr, KVM_CSR_PERFCTRL2, val); + } + return; + case KVM_CSR_PERFCTRL3: + if (val & KVM_PMU_PLV_ENABLE) { + kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); + kvm_write_hw_gcsr(csr, KVM_CSR_PERFCTRL3, val | KVM_PERFCTRL_GMOD); + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + } else { + kvm_write_sw_gcsr(csr, KVM_CSR_PERFCTRL3, val); + } + return; + default: + break; + } + /* write sw pmu csr if not config ctrl */ + switch (csrid) { + case KVM_CSR_PERFCNTR0: + return kvm_write_sw_gcsr(csr, KVM_CSR_PERFCNTR0, val); + case KVM_CSR_PERFCNTR1: + return kvm_write_sw_gcsr(csr, KVM_CSR_PERFCNTR1, val); + case KVM_CSR_PERFCNTR2: + return kvm_write_sw_gcsr(csr, KVM_CSR_PERFCNTR2, val); + case KVM_CSR_PERFCNTR3: + return kvm_write_sw_gcsr(csr, KVM_CSR_PERFCNTR3, val); + default: + break; + } if (csrid < 4096) kvm_write_sw_gcsr(csr, csrid, val); @@ -89,26 +146,29 @@ void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, csrid, vcpu->arch.pc); } -static inline int CASE_CHANGE_SW_GCSR(csr, regid, csrid, mask, val) -{ - if (regid == csrid) { - kvm_change_sw_gcsr(csr, csrid, mask, val); - return; - } -} - void _kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long csr_mask, unsigned long val) { struct loongarch_csrs *csr = vcpu->arch.csr; - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_IMPCTL1, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_ERRCTL, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO1, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO2, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRENTRY, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRERA, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_ERRSAVE, csr_mask, val); + switch (csrid) { + case KVM_CSR_IMPCTL1: + return kvm_change_sw_gcsr(csr, KVM_CSR_IMPCTL1, csr_mask, val); + case KVM_CSR_ERRCTL: + return kvm_change_sw_gcsr(csr, KVM_CSR_ERRCTL, csr_mask, val); + case KVM_CSR_ERRINFO1: + return kvm_change_sw_gcsr(csr, KVM_CSR_ERRINFO1, csr_mask, val); + case KVM_CSR_ERRINFO2: + return kvm_change_sw_gcsr(csr, KVM_CSR_ERRINFO2, csr_mask, val); + case KVM_CSR_MERRENTRY: + return kvm_change_sw_gcsr(csr, KVM_CSR_MERRENTRY, csr_mask, val); + case KVM_CSR_MERRERA: + return kvm_change_sw_gcsr(csr, KVM_CSR_MERRERA, csr_mask, val); + case KVM_CSR_ERRSAVE: + return kvm_change_sw_gcsr(csr, KVM_CSR_ERRSAVE, csr_mask, val); + default: + break; + } if (csrid < 4096) { unsigned long orig; @@ -126,73 +186,208 @@ int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force) { struct loongarch_csrs *csr = vcpu->arch.csr; - GET_HW_GCSR(id, KVM_CSR_CRMD, v); - GET_HW_GCSR(id, KVM_CSR_PRMD, v); - GET_HW_GCSR(id, KVM_CSR_EUEN, v); - GET_HW_GCSR(id, KVM_CSR_MISC, v); - GET_HW_GCSR(id, KVM_CSR_ECFG, v); - GET_HW_GCSR(id, KVM_CSR_ESTAT, v); - GET_HW_GCSR(id, KVM_CSR_ERA, v); - GET_HW_GCSR(id, KVM_CSR_BADV, v); - GET_HW_GCSR(id, KVM_CSR_BADI, v); - GET_HW_GCSR(id, KVM_CSR_EENTRY, v); - GET_HW_GCSR(id, KVM_CSR_TLBIDX, v); - GET_HW_GCSR(id, KVM_CSR_TLBEHI, v); - GET_HW_GCSR(id, KVM_CSR_TLBELO0, v); - GET_HW_GCSR(id, KVM_CSR_TLBELO1, v); - GET_HW_GCSR(id, KVM_CSR_ASID, v); - GET_HW_GCSR(id, KVM_CSR_PGDL, v); - GET_HW_GCSR(id, KVM_CSR_PGDH, v); - GET_HW_GCSR(id, KVM_CSR_PWCTL0, v); - GET_HW_GCSR(id, KVM_CSR_PWCTL1, v); - GET_HW_GCSR(id, KVM_CSR_STLBPGSIZE, v); - GET_HW_GCSR(id, KVM_CSR_RVACFG, v); - GET_HW_GCSR(id, KVM_CSR_CPUID, v); - GET_HW_GCSR(id, KVM_CSR_PRCFG1, v); - GET_HW_GCSR(id, KVM_CSR_PRCFG2, v); - GET_HW_GCSR(id, KVM_CSR_PRCFG3, v); - GET_HW_GCSR(id, KVM_CSR_KS0, v); - GET_HW_GCSR(id, KVM_CSR_KS1, v); - GET_HW_GCSR(id, KVM_CSR_KS2, v); - GET_HW_GCSR(id, KVM_CSR_KS3, v); - GET_HW_GCSR(id, KVM_CSR_KS4, v); - GET_HW_GCSR(id, KVM_CSR_KS5, v); - GET_HW_GCSR(id, KVM_CSR_KS6, v); - GET_HW_GCSR(id, KVM_CSR_KS7, v); - GET_HW_GCSR(id, KVM_CSR_TMID, v); - GET_HW_GCSR(id, KVM_CSR_TCFG, v); - GET_HW_GCSR(id, KVM_CSR_TVAL, v); - GET_HW_GCSR(id, KVM_CSR_CNTC, v); - GET_HW_GCSR(id, KVM_CSR_LLBCTL, v); - GET_HW_GCSR(id, KVM_CSR_TLBRENTRY, v); - GET_HW_GCSR(id, KVM_CSR_TLBRBADV, v); - GET_HW_GCSR(id, KVM_CSR_TLBRERA, v); - GET_HW_GCSR(id, KVM_CSR_TLBRSAVE, v); - GET_HW_GCSR(id, KVM_CSR_TLBRELO0, v); - GET_HW_GCSR(id, KVM_CSR_TLBRELO1, v); - GET_HW_GCSR(id, KVM_CSR_TLBREHI, v); - GET_HW_GCSR(id, KVM_CSR_TLBRPRMD, v); - GET_HW_GCSR(id, KVM_CSR_DMWIN0, v); - GET_HW_GCSR(id, KVM_CSR_DMWIN1, v); - GET_HW_GCSR(id, KVM_CSR_DMWIN2, v); - GET_HW_GCSR(id, KVM_CSR_DMWIN3, v); - GET_HW_GCSR(id, KVM_CSR_MWPS, v); - GET_HW_GCSR(id, KVM_CSR_FWPS, v); - - GET_SW_GCSR(csr, id, KVM_CSR_IMPCTL1, v); - GET_SW_GCSR(csr, id, KVM_CSR_IMPCTL2, v); - GET_SW_GCSR(csr, id, KVM_CSR_ERRCTL, v); - GET_SW_GCSR(csr, id, KVM_CSR_ERRINFO1, v); - GET_SW_GCSR(csr, id, KVM_CSR_ERRINFO2, v); - GET_SW_GCSR(csr, id, KVM_CSR_MERRENTRY, v); - GET_SW_GCSR(csr, id, KVM_CSR_MERRERA, v); - GET_SW_GCSR(csr, id, KVM_CSR_ERRSAVE, v); - GET_SW_GCSR(csr, id, KVM_CSR_CTAG, v); - GET_SW_GCSR(csr, id, KVM_CSR_DEBUG, v); - GET_SW_GCSR(csr, id, KVM_CSR_DERA, v); - GET_SW_GCSR(csr, id, KVM_CSR_DESAVE, v); - - GET_SW_GCSR(csr, id, KVM_CSR_TINTCLR, v); + switch (id) { + case KVM_CSR_CRMD: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_CRMD); + return 0; + case KVM_CSR_PRMD: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PRMD); + return 0; + case KVM_CSR_EUEN: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_EUEN); + return 0; + case KVM_CSR_MISC: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_MISC); + return 0; + case KVM_CSR_ECFG: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_ECFG); + return 0; + case KVM_CSR_ESTAT: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_ESTAT); + return 0; + case KVM_CSR_ERA: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_ERA); + return 0; + case KVM_CSR_BADV: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_BADV); + return 0; + case KVM_CSR_BADI: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_BADI); + return 0; + case KVM_CSR_EENTRY: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_EENTRY); + return 0; + case KVM_CSR_TLBIDX: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBIDX); + return 0; + case KVM_CSR_TLBEHI: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBEHI); + return 0; + case KVM_CSR_TLBELO0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBELO0); + return 0; + case KVM_CSR_TLBELO1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBELO1); + return 0; + case KVM_CSR_ASID: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_ASID); + return 0; + case KVM_CSR_PGDL: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PGDL); + return 0; + case KVM_CSR_PGDH: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PGDH); + return 0; + case KVM_CSR_PWCTL0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PWCTL0); + return 0; + case KVM_CSR_PWCTL1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PWCTL1); + return 0; + case KVM_CSR_STLBPGSIZE: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_STLBPGSIZE); + return 0; + case KVM_CSR_RVACFG: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_RVACFG); + return 0; + case KVM_CSR_CPUID: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_CPUID); + return 0; + case KVM_CSR_PRCFG1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PRCFG1); + return 0; + case KVM_CSR_PRCFG2: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PRCFG2); + return 0; + case KVM_CSR_PRCFG3: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PRCFG3); + return 0; + case KVM_CSR_KS0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS0); + return 0; + case KVM_CSR_KS1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS1); + return 0; + case KVM_CSR_KS2: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS2); + return 0; + case KVM_CSR_KS3: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS3); + return 0; + case KVM_CSR_KS4: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS4); + return 0; + case KVM_CSR_KS5: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS5); + return 0; + case KVM_CSR_KS6: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS6); + return 0; + case KVM_CSR_KS7: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS7); + return 0; + case KVM_CSR_TMID: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TMID); + return 0; + case KVM_CSR_TCFG: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TCFG); + return 0; + case KVM_CSR_TVAL: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TVAL); + return 0; + case KVM_CSR_CNTC: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_CNTC); + return 0; + case KVM_CSR_LLBCTL: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_LLBCTL); + return 0; + case KVM_CSR_TLBRENTRY: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRENTRY); + return 0; + case KVM_CSR_TLBRBADV: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRBADV); + return 0; + case KVM_CSR_TLBRERA: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRERA); + return 0; + case KVM_CSR_TLBRSAVE: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRSAVE); + return 0; + case KVM_CSR_TLBRELO0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRELO0); + return 0; + case KVM_CSR_TLBRELO1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRELO1); + return 0; + case KVM_CSR_TLBREHI: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBREHI); + return 0; + case KVM_CSR_TLBRPRMD: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRPRMD); + return 0; + case KVM_CSR_DMWIN0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_DMWIN0); + return 0; + case KVM_CSR_DMWIN1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_DMWIN1); + return 0; + case KVM_CSR_DMWIN2: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_DMWIN2); + return 0; + case KVM_CSR_DMWIN3: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_DMWIN3); + return 0; + case KVM_CSR_MWPS: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_MWPS); + return 0; + case KVM_CSR_FWPS: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_FWPS); + return 0; + default: + break; + } + + switch (id) { + case KVM_CSR_IMPCTL1: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_IMPCTL1); + return 0; + case KVM_CSR_IMPCTL2: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_IMPCTL2); + return 0; + case KVM_CSR_ERRCTL: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_ERRCTL); + return 0; + case KVM_CSR_ERRINFO1: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_ERRINFO1); + return 0; + case KVM_CSR_ERRINFO2: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_ERRINFO2); + return 0; + case KVM_CSR_MERRENTRY: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_MERRENTRY); + return 0; + case KVM_CSR_MERRERA: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_MERRERA); + return 0; + case KVM_CSR_ERRSAVE: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_ERRSAVE); + return 0; + case KVM_CSR_CTAG: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_CTAG); + return 0; + case KVM_CSR_DEBUG: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_DEBUG); + return 0; + case KVM_CSR_DERA: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_DERA); + return 0; + case KVM_CSR_DESAVE: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_DESAVE); + return 0; + case KVM_CSR_TINTCLR: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_TINTCLR); + return 0; + } if (force && (id < CSR_ALL_SIZE)) { *v = kvm_read_sw_gcsr(csr, id); @@ -207,73 +402,222 @@ int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force) struct loongarch_csrs *csr = vcpu->arch.csr; int ret; - SET_HW_GCSR(csr, id, KVM_CSR_CRMD, v); - SET_HW_GCSR(csr, id, KVM_CSR_PRMD, v); - SET_HW_GCSR(csr, id, KVM_CSR_EUEN, v); - SET_HW_GCSR(csr, id, KVM_CSR_MISC, v); - SET_HW_GCSR(csr, id, KVM_CSR_ECFG, v); - SET_HW_GCSR(csr, id, KVM_CSR_ERA, v); - SET_HW_GCSR(csr, id, KVM_CSR_BADV, v); - SET_HW_GCSR(csr, id, KVM_CSR_BADI, v); - SET_HW_GCSR(csr, id, KVM_CSR_EENTRY, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBIDX, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBEHI, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBELO0, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBELO1, v); - SET_HW_GCSR(csr, id, KVM_CSR_ASID, v); - SET_HW_GCSR(csr, id, KVM_CSR_PGDL, v); - SET_HW_GCSR(csr, id, KVM_CSR_PGDH, v); - SET_HW_GCSR(csr, id, KVM_CSR_PWCTL0, v); - SET_HW_GCSR(csr, id, KVM_CSR_PWCTL1, v); - SET_HW_GCSR(csr, id, KVM_CSR_STLBPGSIZE, v); - SET_HW_GCSR(csr, id, KVM_CSR_RVACFG, v); - SET_HW_GCSR(csr, id, KVM_CSR_CPUID, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS0, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS1, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS2, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS3, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS4, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS5, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS6, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS7, v); - SET_HW_GCSR(csr, id, KVM_CSR_TMID, v); - SET_HW_GCSR(csr, id, KVM_CSR_TCFG, v); - SET_HW_GCSR(csr, id, KVM_CSR_TVAL, v); - SET_HW_GCSR(csr, id, KVM_CSR_CNTC, v); - SET_HW_GCSR(csr, id, KVM_CSR_LLBCTL, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRENTRY, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRBADV, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRERA, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRSAVE, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRELO0, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRELO1, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBREHI, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRPRMD, v); - SET_HW_GCSR(csr, id, KVM_CSR_DMWIN0, v); - SET_HW_GCSR(csr, id, KVM_CSR_DMWIN1, v); - SET_HW_GCSR(csr, id, KVM_CSR_DMWIN2, v); - SET_HW_GCSR(csr, id, KVM_CSR_DMWIN3, v); - SET_HW_GCSR(csr, id, KVM_CSR_MWPS, v); - SET_HW_GCSR(csr, id, KVM_CSR_FWPS, v); - - SET_SW_GCSR(csr, id, KVM_CSR_IMPCTL1, v); - SET_SW_GCSR(csr, id, KVM_CSR_IMPCTL2, v); - SET_SW_GCSR(csr, id, KVM_CSR_ERRCTL, v); - SET_SW_GCSR(csr, id, KVM_CSR_ERRINFO1, v); - SET_SW_GCSR(csr, id, KVM_CSR_ERRINFO2, v); - SET_SW_GCSR(csr, id, KVM_CSR_MERRENTRY, v); - SET_SW_GCSR(csr, id, KVM_CSR_MERRERA, v); - SET_SW_GCSR(csr, id, KVM_CSR_ERRSAVE, v); - SET_SW_GCSR(csr, id, KVM_CSR_CTAG, v); - SET_SW_GCSR(csr, id, KVM_CSR_DEBUG, v); - SET_SW_GCSR(csr, id, KVM_CSR_DERA, v); - SET_SW_GCSR(csr, id, KVM_CSR_DESAVE, v); - SET_SW_GCSR(csr, id, KVM_CSR_PRCFG1, v); - SET_SW_GCSR(csr, id, KVM_CSR_PRCFG2, v); - SET_SW_GCSR(csr, id, KVM_CSR_PRCFG3, v); - - SET_SW_GCSR(csr, id, KVM_CSR_PGD, v); - SET_SW_GCSR(csr, id, KVM_CSR_TINTCLR, v); + switch (id) { + case KVM_CSR_CRMD: + kvm_write_hw_gcsr(csr, KVM_CSR_CRMD, *v); + return 0; + case KVM_CSR_PRMD: + kvm_write_hw_gcsr(csr, KVM_CSR_PRMD, *v); + return 0; + case KVM_CSR_EUEN: + kvm_write_hw_gcsr(csr, KVM_CSR_EUEN, *v); + return 0; + case KVM_CSR_MISC: + kvm_write_hw_gcsr(csr, KVM_CSR_MISC, *v); + return 0; + case KVM_CSR_ECFG: + kvm_write_hw_gcsr(csr, KVM_CSR_ECFG, *v); + return 0; + case KVM_CSR_ESTAT: + kvm_write_hw_gcsr(csr, KVM_CSR_ESTAT, *v); + return 0; + case KVM_CSR_ERA: + kvm_write_hw_gcsr(csr, KVM_CSR_ERA, *v); + return 0; + case KVM_CSR_BADV: + kvm_write_hw_gcsr(csr, KVM_CSR_BADV, *v); + return 0; + case KVM_CSR_BADI: + kvm_write_hw_gcsr(csr, KVM_CSR_BADI, *v); + return 0; + case KVM_CSR_EENTRY: + kvm_write_hw_gcsr(csr, KVM_CSR_EENTRY, *v); + return 0; + case KVM_CSR_TLBIDX: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBIDX, *v); + return 0k; + case KVM_CSR_TLBEHI: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBEHI, *v); + return 0; + case KVM_CSR_TLBELO0: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBELO0, *v); + return 0; + case KVM_CSR_TLBELO1: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBELO1, *v); + return 0; + case KVM_CSR_ASID: + kvm_write_hw_gcsr(csr, KVM_CSR_ASID, *v); + return 0; + case KVM_CSR_PGDL: + kvm_write_hw_gcsr(csr, KVM_CSR_PGDL, *v); + return 0; + case KVM_CSR_PGDH: + kvm_write_hw_gcsr(csr, KVM_CSR_PGDH, *v); + return 0; + case KVM_CSR_PWCTL0: + kvm_write_hw_gcsr(csr, KVM_CSR_PWCTL0, *v); + return 0; + case KVM_CSR_PWCTL1: + kvm_write_hw_gcsr(csr, KVM_CSR_PWCTL1, *v); + return 0; + case KVM_CSR_STLBPGSIZE: + kvm_write_hw_gcsr(csr, KVM_CSR_STLBPGSIZE, *v); + return 0; + case KVM_CSR_RVACFG: + kvm_write_hw_gcsr(csr, KVM_CSR_RVACFG, *v); + return 0; + case KVM_CSR_CPUID: + kvm_write_hw_gcsr(csr, KVM_CSR_CPUID, *v); + return 0; + case KVM_CSR_PRCFG1: + kvm_write_hw_gcsr(csr, KVM_CSR_PRCFG1, *v); + return 0; + case KVM_CSR_PRCFG2: + kvm_write_hw_gcsr(csr, KVM_CSR_PRCFG2, *v); + return 0; + case KVM_CSR_PRCFG3: + kvm_write_hw_gcsr(csr, KVM_CSR_PRCFG3, *v); + return 0; + case KVM_CSR_KS0: + kvm_write_hw_gcsr(csr, KVM_CSR_KS0, *v); + return 0; + case KVM_CSR_KS1: + kvm_write_hw_gcsr(csr, KVM_CSR_KS1, *v); + return 0; + case KVM_CSR_KS2: + kvm_write_hw_gcsr(csr, KVM_CSR_KS2, *v); + return 0; + case KVM_CSR_KS3: + kvm_write_hw_gcsr(csr, KVM_CSR_KS3, *v); + return 0; + case KVM_CSR_KS4: + kvm_write_hw_gcsr(csr, KVM_CSR_KS4, *v); + return 0; + case KVM_CSR_KS5: + kvm_write_hw_gcsr(csr, KVM_CSR_KS5, *v); + return 0; + case KVM_CSR_KS6: + kvm_write_hw_gcsr(csr, KVM_CSR_KS6, *v); + return 0; + case KVM_CSR_KS7: + kvm_write_hw_gcsr(csr, KVM_CSR_KS7, *v); + return 0; + case KVM_CSR_TMID: + kvm_write_hw_gcsr(csr, KVM_CSR_TMID, *v); + return 0; + case KVM_CSR_TCFG: + kvm_write_hw_gcsr(csr, KVM_CSR_TCFG, *v); + return 0; + case KVM_CSR_TVAL: + kvm_write_hw_gcsr(csr, KVM_CSR_TVAL, *v); + return 0; + case KVM_CSR_CNTC: + kvm_write_hw_gcsr(csr, KVM_CSR_CNTC, *v); + return 0; + case KVM_CSR_LLBCTL: + kvm_write_hw_gcsr(csr, KVM_CSR_LLBCTL, *v); + return 0; + case KVM_CSR_TLBRENTRY: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRENTRY, *v); + return 0; + case KVM_CSR_TLBRBADV: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRBADV, *v); + return 0; + case KVM_CSR_TLBRERA: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRERA, *v); + return 0; + case KVM_CSR_TLBRSAVE: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRSAVE, *v); + return 0; + case KVM_CSR_TLBRELO0: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRELO0, *v); + return 0; + case KVM_CSR_TLBRELO1: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRELO1, *v); + return 0; + case KVM_CSR_TLBREHI: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBREHI, *v); + return 0; + case KVM_CSR_TLBRPRMD: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRPRMD, *v); + return 0; + case KVM_CSR_DMWIN0: + kvm_write_hw_gcsr(csr, KVM_CSR_DMWIN0, *v); + return 0; + case KVM_CSR_DMWIN1: + kvm_write_hw_gcsr(csr, KVM_CSR_DMWIN1, *v); + return 0; + case KVM_CSR_DMWIN2: + kvm_write_hw_gcsr(csr, KVM_CSR_DMWIN2, *v); + return 0; + case KVM_CSR_DMWIN3: + kvm_write_hw_gcsr(csr, KVM_CSR_DMWIN3, *v); + return 0; + case KVM_CSR_MWPS: + kvm_write_hw_gcsr(csr, KVM_CSR_MWPS, *v); + return 0; + case KVM_CSR_FWPS: + kvm_write_hw_gcsr(csr, KVM_CSR_FWPS, *v); + return 0; + default: + break; + } + + switch (id) { + case KVM_CSR_IMPCTL1: + kvm_write_sw_gcsr(csr, KVM_CSR_IMPCTL1, *v); + return 0; + case KVM_CSR_IMPCTL2: + kvm_write_sw_gcsr(csr, KVM_CSR_IMPCTL2, *v); + return 0; + case KVM_CSR_ERRCTL: + kvm_write_sw_gcsr(csr, KVM_CSR_ERRCTL, *v); + return 0; + case KVM_CSR_ERRINFO1: + kvm_write_sw_gcsr(csr, KVM_CSR_ERRINFO1, *v); + return 0; + case KVM_CSR_ERRINFO2: + kvm_write_sw_gcsr(csr, KVM_CSR_ERRINFO2, *v); + return 0; + case KVM_CSR_MERRENTRY: + kvm_write_sw_gcsr(csr, KVM_CSR_MERRENTRY, *v); + return 0; + case KVM_CSR_MERRERA: + kvm_write_sw_gcsr(csr, KVM_CSR_MERRERA, *v); + return 0; + case KVM_CSR_ERRSAVE: + kvm_write_sw_gcsr(csr, KVM_CSR_ERRSAVE, *v); + return 0; + case KVM_CSR_CTAG: + kvm_write_sw_gcsr(csr, KVM_CSR_CTAG, *v); + return 0; + case KVM_CSR_DEBUG: + kvm_write_sw_gcsr(csr, KVM_CSR_DEBUG, *v); + return 0; + case KVM_CSR_DERA: + kvm_write_sw_gcsr(csr, KVM_CSR_DERA, *v); + return 0; + case KVM_CSR_DESAVE: + kvm_write_sw_gcsr(csr, KVM_CSR_DESAVE, *v); + return 0; + case KVM_CSR_PRCFG1: + kvm_write_sw_gcsr(csr, KVM_CSR_PRCFG1, *v); + return 0; + case KVM_CSR_PRCFG2: + kvm_write_sw_gcsr(csr, KVM_CSR_PRCFG2, *v); + return 0; + case KVM_CSR_PRCFG3: + kvm_write_sw_gcsr(csr, KVM_CSR_PRCFG3, *v); + return 0; + case KVM_CSR_PGD: + kvm_write_sw_gcsr(csr, KVM_CSR_PGD, *v); + return 0; + case KVM_CSR_TINTCLR: + kvm_write_sw_gcsr(csr, KVM_CSR_TINTCLR, *v); + return 0; + default: + break; + } ret = -1; switch (id) { @@ -521,7 +865,7 @@ static int _kvm_emu_iocsr_write(struct kvm_run *run, struct kvm_vcpu *vcpu, } /* all iocsr operation should in kvm, no mmio */ -int _kvm_emu_iocsr(larch_inst inst, +int _kvm_emu_iocsr(union loongarch_instruction inst, struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 rd, rj, opcode; diff --git a/arch/loongarch/kvm/emulate.c b/arch/loongarch/kvm/emulate.c index 251e5ba3279e22db745c2ae9291b9c7bd8892473..1890cb85c87a182ec410fc58c7ee2e517966cd3c 100644 --- a/arch/loongarch/kvm/emulate.c +++ b/arch/loongarch/kvm/emulate.c @@ -41,7 +41,7 @@ int _kvm_emu_idle(struct kvm_vcpu *vcpu) return EMULATE_DONE; } -int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, union loongarch_instruction inst) { struct kvm_run *run = vcpu->run; unsigned int rd, op8, opcode; @@ -164,7 +164,7 @@ int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) } -int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, union loongarch_instruction inst) { unsigned int op8, opcode, rd; int ret = 0; diff --git a/arch/loongarch/kvm/entry.S b/arch/loongarch/kvm/entry.S index bc07154b08a084d0119ce298ced1fcd1c8881438..be53cf896a1d5263c811b856299eff0919afd5ac 100644 --- a/arch/loongarch/kvm/entry.S +++ b/arch/loongarch/kvm/entry.S @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ @@ -51,7 +51,7 @@ .macro kvm_switch_to_guest KVM_ARCH GPRNUM tmp tmp1 /* set host excfg.VS=0, all exceptions share one exception entry */ csrrd \tmp, KVM_CSR_ECFG - bstrins.w \tmp, zero, (KVM_ECFG_VS_SHIFT + KVM_ECFG_VS_WIDTH - 1), KVM_ECFG_VS_SHIFT + bstrins.w \tmp, zero, KEVS_KEVW, KVM_ECFG_VS_SHIFT csrwr \tmp, KVM_CSR_ECFG /* Load up the new EENTRY */ @@ -77,11 +77,9 @@ /* Mix GID and RID */ csrrd \tmp1, KVM_CSR_GSTAT - bstrpick.w \tmp1, \tmp1, (KVM_GSTAT_GID_SHIFT + KVM_GSTAT_GID_WIDTH - 1), - KVM_GSTAT_GID_SHIFT + bstrpick.w \tmp1, \tmp1, KGGS_KGGW, KVM_GSTAT_GID_SHIFT csrrd \tmp, KVM_CSR_GTLBC - bstrins.w \tmp, \tmp1, (KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1), - KVM_GTLBC_TGID_SHIFT + bstrins.w \tmp, \tmp1, KGTS_KGTW, KVM_GTLBC_TGID_SHIFT csrwr \tmp, KVM_CSR_GTLBC /* @@ -198,13 +196,11 @@ SYM_FUNC_START(kvm_exit_entry) /* Clear GTLBC.TGID field */ csrrd t0, KVM_CSR_GTLBC - bstrins.w t0, zero, KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1, - KVM_GTLBC_TGID_SHIFT + bstrins.w t0, zero, KGTS_KGTW, KVM_GTLBC_TGID_SHIFT csrwr t0, KVM_CSR_GTLBC /* Enable Address Map mode */ - ori t0, zero, (1 << KVM_CRMD_DACM_SHIFT)|(1 << KVM_CRMD_DACF_SHIFT) | KVM_CRMD_PG - |PLV_KERN + ori t0, zero, KCDS_PK csrwr t0, KVM_CSR_CRMD KVM_LONG_L tp, a2, KVM_ARCH_HGP diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index ea8989f2ccc60f77ffa826a825f85056908d6430..cb070abbc6504b3a48071d6633d6203b68b882c4 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -20,7 +20,6 @@ #include #include "kvmcpu.h" #include - #include "trace.h" #include "kvm_compat.h" #include "kvmcsr.h" @@ -48,7 +47,7 @@ static int _kvm_fault_ni(struct kvm_vcpu *vcpu) return RESUME_HOST; } -static int _kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) +static int _kvm_handle_csr(struct kvm_vcpu *vcpu, union loongarch_instruction inst) { enum emulation_result er = EMULATE_DONE; unsigned int rd, rj, csrid; @@ -85,7 +84,7 @@ static int _kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) return er; } -static int _kvm_emu_cache(struct kvm_vcpu *vcpu, larch_inst inst) +static int _kvm_emu_cache(struct kvm_vcpu *vcpu, union loongarch_instruction inst) { return EMULATE_DONE; } @@ -94,7 +93,7 @@ static int _kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; struct kvm_run *run = vcpu->run; - larch_inst inst; + union loongarch_instruction inst; unsigned long curr_pc; int rd, rj; unsigned int index; @@ -181,7 +180,7 @@ static int _kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) static int _kvm_check_hypcall(struct kvm_vcpu *vcpu) { enum emulation_result ret; - larch_inst inst; + union loongarch_instruction inst; unsigned long curr_pc; unsigned int code; @@ -385,7 +384,7 @@ static int _kvm_handle_read_fault(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; ulong badv = vcpu->arch.badv; - larch_inst inst; + union loongarch_instruction inst; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -423,7 +422,7 @@ static int _kvm_handle_write_fault(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; ulong badv = vcpu->arch.badv; - larch_inst inst; + union loongarch_instruction inst; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -468,7 +467,6 @@ static int _kvm_handle_debug(struct kvm_vcpu *vcpu) return RESUME_HOST; } - static exit_handle_fn _kvm_fault_tables[KVM_INT_START] = { [KVM_EXCCODE_TLBL] = _kvm_handle_read_fault, [KVM_EXCCODE_TLBS] = _kvm_handle_write_fault, diff --git a/arch/loongarch/kvm/hypcall.c b/arch/loongarch/kvm/hypcall.c index ee156ea0e3219407af78d180a7cf511ea5a0b09b..7632c8f4d6596adcfa29d90b8dd68b9aea52ab75 100644 --- a/arch/loongarch/kvm/hypcall.c +++ b/arch/loongarch/kvm/hypcall.c @@ -99,7 +99,7 @@ int _kvm_handle_pv_hcall(struct kvm_vcpu *vcpu) break; }; - vcpu->arch.gprs[KVM_REG_V0] = hyp_ret; + vcpu->arch.gprs[KVM_REG_A0] = hyp_ret; return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/intc/ls3a_ext_irq.c b/arch/loongarch/kvm/intc/ls3a_ext_irq.c index 9f860e75f060008eb0ab48b8424e5f8cc193147c..b478ea61786d416cd12afd967118acd51dbe7463 100644 --- a/arch/loongarch/kvm/intc/ls3a_ext_irq.c +++ b/arch/loongarch/kvm/intc/ls3a_ext_irq.c @@ -13,6 +13,8 @@ #define ls3a_ext_irq_lock(s, flags) spin_lock_irqsave(&s->lock, flags) #define ls3a_ext_irq_unlock(s, flags) spin_unlock_irqrestore(&s->lock, flags) +extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, + struct kvm_loongarch_interrupt *irq); void ext_deactive_core_isr(struct kvm *kvm, int irq_num, int vcpu_id) { int ipnum; @@ -28,8 +30,8 @@ void ext_deactive_core_isr(struct kvm *kvm, int irq_num, int vcpu_id) bitmap_clear((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], irq_num, 1); found1 = find_next_bit((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], EXTIOI_IRQS, 0); - kvm_debug("vcpu_id %d irqnum %d found:0x%lx ipnum %d down\n", vcpu_id, irq_num, - found1, ipnum); + kvm_debug("vcpu_id %d irqnum %d found:0x%lx ipnum %d down\n", + vcpu_id, irq_num, found1, ipnum); if (found1 == EXTIOI_IRQS) { irq.cpu = vcpu_id; irq.irq = -(ipnum + 2); /* IP2~IP5 */ @@ -60,8 +62,9 @@ void ext_irq_update_core(struct kvm *kvm, int irq_num, int level) vcpu_id = state->ext_sw_coremap[irq_num]; ipnum = state->ext_sw_ipmap[irq_num]; - if (vcpu_id > (nrcpus - 1)) + if (vcpu_id > (nrcpus - 1)) { vcpu_id = 0; + } if (level == 1) { if (test_bit(irq_num, (void *)state->ext_en.reg_u8) == false) @@ -362,7 +365,7 @@ static int ls3a_ext_intctl_writeb(struct kvm_vcpu *vcpu, mask = 0x1; for (i = 0; i < 8; i++) { - if ((old_data_u8 & mask)&&(val_data_u8 & mask)) + if ((old_data_u8 & mask) && (val_data_u8 & mask)) ext_irq_update_core(kvm, i + reg_count * 8, 0); mask = mask << 1; } @@ -475,7 +478,7 @@ static int ls3a_ext_intctl_writew(struct kvm_vcpu *vcpu, mask = 0x1; for (i = 0; i < 8 * sizeof(old_data_u32); i++) { - if ((old_data_u32 & mask)&&(val_data_u32 & mask)) + if ((old_data_u32 & mask) && (val_data_u32 & mask)) ext_irq_update_core(kvm, i + reg_count * 32, 0); mask = mask << 1; } @@ -578,13 +581,12 @@ static int ls3a_ext_intctl_writel(struct kvm_vcpu *vcpu, mask = 0x1; for (i = 0; i < 8 * sizeof(old_data_u64); i++) { - if ((old_data_u64 & mask)&&(val_data_u64 & mask)) + if ((old_data_u64 & mask) && (val_data_u64 & mask)) ext_irq_update_core(kvm, i + reg_count * 64, 0); mask = mask << 1; } } else if ((offset >= EXTIOI_COREISR_START) && (offset < EXTIOI_COREISR_END)) { int bits; - vcpu_id = (offset >> 8) & 0xff; reg_count = (offset & 0x1f) / 8; @@ -820,7 +822,6 @@ int kvm_get_ls3a_extirq(struct kvm *kvm, struct kvm_loongarch_ls3a_extirq_state struct ls3a_kvm_extirq *v_extirq = ls3a_ext_irqchip(kvm); struct kvm_ls3a_extirq_state *extirq_state = &(v_extirq->ls3a_ext_irq); unsigned long flags; - if (!v_extirq) return -EINVAL; @@ -838,7 +839,6 @@ int kvm_set_ls3a_extirq(struct kvm *kvm, struct kvm_loongarch_ls3a_extirq_state struct ls3a_kvm_extirq *v_extirq = ls3a_ext_irqchip(kvm); struct kvm_ls3a_extirq_state *extirq_state = &(v_extirq->ls3a_ext_irq); unsigned long flags; - if (!v_extirq) return -EINVAL; diff --git a/arch/loongarch/kvm/intc/ls3a_ext_irq.h b/arch/loongarch/kvm/intc/ls3a_ext_irq.h index aac5fe40b5c290c143783d5421c074edc904de43..4976b2b217d3e572af4927cf75a43d20f806ac95 100644 --- a/arch/loongarch/kvm/intc/ls3a_ext_irq.h +++ b/arch/loongarch/kvm/intc/ls3a_ext_irq.h @@ -93,7 +93,7 @@ struct kvm_ls3a_extirq_state { uint8_t ext_sw_ipmap[EXTIOI_IRQS]; uint8_t ext_sw_coremap[EXTIOI_IRQS]; uint8_t ext_sw_ipisr[KVM_MAX_VCPUS][LS3A_INTC_IP][EXTIOI_IRQS_BITMAP_SIZE]; -} LS3AExtirqState; +}; struct ls3a_kvm_extirq { spinlock_t lock; @@ -128,5 +128,4 @@ void msi_irq_handler(struct kvm *kvm, int irq, int level); int kvm_setup_ls3a_extirq(struct kvm *kvm); int kvm_enable_ls3a_extirq(struct kvm *kvm, bool enable); void kvm_dump_ls3a_extirq_state(struct seq_file *m, struct ls3a_kvm_extirq *irqchip); -int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_loongarch_interrupt *irq); #endif diff --git a/arch/loongarch/kvm/intc/ls3a_ipi.c b/arch/loongarch/kvm/intc/ls3a_ipi.c index 8e6f145dee2be0e4597232567cc8f27c65ed0220..c194bf45de5fdd86b6627218207960f3db6fe515 100644 --- a/arch/loongarch/kvm/intc/ls3a_ipi.c +++ b/arch/loongarch/kvm/intc/ls3a_ipi.c @@ -11,11 +11,13 @@ #define ls3a_gipi_lock(s, flags) spin_lock_irqsave(&s->lock, flags) #define ls3a_gipi_unlock(s, flags) spin_unlock_irqrestore(&s->lock, flags) +extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, + struct kvm_loongarch_interrupt *irq); int kvm_helper_send_ipi(struct kvm_vcpu *vcpu, unsigned int cpu, unsigned int action) { struct kvm *kvm = vcpu->kvm; struct ls3a_kvm_ipi *ipi = ls3a_ipi_irqchip(kvm); - gipiState *s = &(ipi->ls3a_gipistate); + struct gipiState *s = &(ipi->ls3a_gipistate); unsigned long flags; struct kvm_loongarch_interrupt irq; @@ -39,7 +41,7 @@ static int ls3a_gipi_writel(struct ls3a_kvm_ipi *ipi, gpa_t addr, { uint64_t data, offset; struct kvm_loongarch_interrupt irq; - gipiState *s = &(ipi->ls3a_gipistate); + struct gipiState *s = &(ipi->ls3a_gipistate); uint32_t cpu, action_data; struct kvm *kvm; void *pbuf; @@ -123,7 +125,7 @@ static uint64_t ls3a_gipi_readl(struct ls3a_kvm_ipi *ipi, uint64_t offset; uint64_t ret = 0; - gipiState *s = &(ipi->ls3a_gipistate); + struct gipiState *s = &(ipi->ls3a_gipistate); uint32_t cpu; void *pbuf; @@ -171,10 +173,10 @@ static int kvm_ls3a_ipi_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val) { struct ls3a_kvm_ipi *ipi; - ipi_io_device *ipi_device; + struct ipi_io_device *ipi_device; unsigned long flags; - ipi_device = container_of(dev, ipi_io_device, device); + ipi_device = container_of(dev, struct ipi_io_device, device); ipi = ipi_device->ipi; ipi->kvm->stat.pip_write_exits++; @@ -190,10 +192,10 @@ static int kvm_ls3a_ipi_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val) { struct ls3a_kvm_ipi *ipi; - ipi_io_device *ipi_device; + struct ipi_io_device *ipi_device; unsigned long flags; - ipi_device = container_of(dev, ipi_io_device, device); + ipi_device = container_of(dev, struct ipi_io_device, device); ipi = ipi_device->ipi; ipi->kvm->stat.pip_read_exits++; @@ -262,11 +264,11 @@ int kvm_create_ls3a_ipi(struct kvm *kvm) int kvm_get_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state) { struct ls3a_kvm_ipi *ipi = ls3a_ipi_irqchip(kvm); - gipiState *ipi_state = &(ipi->ls3a_gipistate); + struct gipiState *ipi_state = &(ipi->ls3a_gipistate); unsigned long flags; ls3a_gipi_lock(ipi, flags); - memcpy(state, ipi_state, sizeof(gipiState)); + memcpy(state, ipi_state, sizeof(struct gipiState)); ls3a_gipi_unlock(ipi, flags); return 0; } @@ -274,14 +276,14 @@ int kvm_get_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state) int kvm_set_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state) { struct ls3a_kvm_ipi *ipi = ls3a_ipi_irqchip(kvm); - gipiState *ipi_state = &(ipi->ls3a_gipistate); + struct gipiState *ipi_state = &(ipi->ls3a_gipistate); unsigned long flags; if (!ipi) return -EINVAL; ls3a_gipi_lock(ipi, flags); - memcpy(ipi_state, state, sizeof(gipiState)); + memcpy(ipi_state, state, sizeof(struct gipiState)); ls3a_gipi_unlock(ipi, flags); return 0; } diff --git a/arch/loongarch/kvm/intc/ls3a_ipi.h b/arch/loongarch/kvm/intc/ls3a_ipi.h index 60c63f9eda88c792aa42a0a12341c614c8cd3c7d..75fe821defc24d4506d3f186110180df88e2f6b4 100644 --- a/arch/loongarch/kvm/intc/ls3a_ipi.h +++ b/arch/loongarch/kvm/intc/ls3a_ipi.h @@ -18,11 +18,11 @@ struct gipi_single { uint32_t set; uint32_t clear; uint64_t buf[4]; -} gipi_single; +}; struct gipiState { - gipi_single core[KVM_MAX_VCPUS]; -} gipiState; + struct gipi_single core[KVM_MAX_VCPUS]; +}; struct ls3a_kvm_ipi; @@ -30,14 +30,14 @@ struct ipi_io_device { struct ls3a_kvm_ipi *ipi; struct kvm_io_device device; int nodeNum; -} ipi_io_device; +}; struct ls3a_kvm_ipi { spinlock_t lock; struct kvm *kvm; - gipiState ls3a_gipistate; + struct gipiState ls3a_gipistate; int nodeNum; - ipi_io_device dev_ls3a_ipi; + struct ipi_io_device dev_ls3a_ipi; }; #define SMP_MAILBOX (LOONGSON_VIRT_REG_BASE + 0x0000) diff --git a/arch/loongarch/kvm/intc/ls7a_irq.c b/arch/loongarch/kvm/intc/ls7a_irq.c index f863a8e829d4883794f615bfb594e800cf330294..5155de694f4e312431ace3a96c15afa50f1c89f0 100644 --- a/arch/loongarch/kvm/intc/ls7a_irq.c +++ b/arch/loongarch/kvm/intc/ls7a_irq.c @@ -12,7 +12,6 @@ void ls7a_ioapic_lock(struct ls7a_kvm_ioapic *s, unsigned long *flags) { unsigned long tmp; - spin_lock_irqsave(&s->lock, tmp); *flags = tmp; } @@ -20,7 +19,6 @@ void ls7a_ioapic_lock(struct ls7a_kvm_ioapic *s, unsigned long *flags) void ls7a_ioapic_unlock(struct ls7a_kvm_ioapic *s, unsigned long *flags) { unsigned long tmp; - tmp = *flags; spin_unlock_irqrestore(&s->lock, tmp); } @@ -102,7 +100,6 @@ int kvm_ls7a_ioapic_set_irq(struct kvm *kvm, int irq, int level) struct ls7a_kvm_ioapic *s; struct kvm_ls7a_ioapic_state *state; uint64_t mask = 1ULL << irq; - s = ls7a_ioapic_irqchip(kvm); state = &s->ls7a_ioapic; BUG_ON(irq < 0 || irq >= LS7A_IOAPIC_NUM_PINS); @@ -142,11 +139,13 @@ static int ls7a_ioapic_reg_write(struct ls7a_kvm_ioapic *s, struct kvm_ls7a_ioapic_state *state; int64_t offset_tmp; uint64_t offset; - uint64_t data, old; + uint64_t data, old, himask, lowmask; offset = addr & 0xfff; kvm = s->kvm; state = &(s->ls7a_ioapic); + lowmask = 0xFFFFFFFFUL; + himask = lowmask << 32; if (offset & (len - 1)) { pr_info("%s(%d):unaligned address access %llx size %d\n", @@ -194,6 +193,80 @@ static int ls7a_ioapic_reg_write(struct ls7a_kvm_ioapic *s, WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); break; } + } else if (len == 4) { + data = *(uint32_t *)val; + switch (offset) { + case LS7A_INT_MASK_OFFSET: + old = state->int_mask & lowmask; + state->int_mask = (state->int_mask & himask) | data; + if (old & ~data) + kvm_ls7a_ioapic_raise(kvm, old & ~data); + if (~old & data) + kvm_ls7a_ioapic_lower(kvm, ~old & data); + break; + case LS7A_INT_MASK_OFFSET + 4: + data = data << 32; + old = state->int_mask & himask; + state->int_mask = (state->int_mask & lowmask) | data; + if (old & ~data) + kvm_ls7a_ioapic_raise(kvm, old & ~data); + if (~old & data) + kvm_ls7a_ioapic_lower(kvm, ~old & data); + break; + case LS7A_INT_STATUS_OFFSET: + state->intisr = (state->intisr & himask) | data; + break; + case LS7A_INT_STATUS_OFFSET + 4: + data = data << 32; + state->intisr = (state->intisr & lowmask) | data; + break; + case LS7A_INT_EDGE_OFFSET: + state->intedge = (state->intedge & himask) | data; + break; + case LS7A_INT_EDGE_OFFSET + 4: + data = data << 32; + state->intedge = (state->intedge & lowmask) | data; + break; + case LS7A_INT_CLEAR_OFFSET: + /* + * only clear edge triggered irq on writing INTCLR reg + * no effect on level triggered irq + */ + data = data & state->intedge; + state->intirr &= ~data; + kvm_ls7a_ioapic_lower(kvm, data); + state->intisr &= ~data; + break; + case LS7A_INT_CLEAR_OFFSET + 4: + data = data << 32; + data = data & state->intedge; + state->intirr &= ~data; + kvm_ls7a_ioapic_lower(kvm, data); + state->intisr &= ~data; + break; + case LS7A_INT_POL_OFFSET: + state->int_polarity = (state->int_polarity & himask) | data; + break; + case LS7A_INT_POL_OFFSET+4: + data = data << 32; + state->int_polarity = (state->int_polarity & lowmask) | data; + break; + case LS7A_HTMSI_EN_OFFSET: + state->htmsi_en = (state->htmsi_en & himask) | data; + break; + case LS7A_HTMSI_EN_OFFSET+4: + data = data << 32; + state->htmsi_en = (state->htmsi_en & lowmask) | data; + break; + case LS7A_AUTO_CTRL0_OFFSET: + case LS7A_AUTO_CTRL0_OFFSET+4: + case LS7A_AUTO_CTRL1_OFFSET: + case LS7A_AUTO_CTRL1_OFFSET+4: + break; + default: + WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); + break; + } } else if (len == 1) { data = *(unsigned char *)val; if (offset >= LS7A_HTMSI_VEC_OFFSET) { @@ -243,11 +316,13 @@ static int ls7a_ioapic_reg_read(struct ls7a_kvm_ioapic *s, uint64_t offset, offset_tmp; struct kvm *kvm; struct kvm_ls7a_ioapic_state *state; - uint64_t result = 0; + uint64_t result = 0, lowmask, himask; state = &(s->ls7a_ioapic); kvm = s->kvm; offset = addr & 0xfff; + lowmask = 0xFFFFFFFFUL; + himask = lowmask << 32; if (offset & (len - 1)) { pr_info("%s(%d):unaligned address access %llx size %d\n", __func__, __LINE__, addr, len); @@ -284,6 +359,60 @@ static int ls7a_ioapic_reg_read(struct ls7a_kvm_ioapic *s, } if (val != NULL) *(uint64_t *)val = result; + } else if (len == 4) { + switch (offset) { + case LS7A_INT_MASK_OFFSET: + result = state->int_mask & lowmask; + break; + case LS7A_INT_MASK_OFFSET + 4: + result = state->int_mask & himask; + result = result >> 32; + break; + case LS7A_INT_STATUS_OFFSET: + result = state->intisr & (~state->int_mask) & lowmask; + break; + case LS7A_INT_STATUS_OFFSET + 4: + result = state->intisr & (~state->int_mask) & himask; + result = result >> 32; + break; + case LS7A_INT_EDGE_OFFSET: + result = state->intedge & lowmask; + break; + case LS7A_INT_EDGE_OFFSET + 4: + result = state->intedge & himask; + result = result >> 32; + break; + case LS7A_INT_POL_OFFSET: + result = state->int_polarity & lowmask; + break; + case LS7A_INT_POL_OFFSET + 4: + result = state->int_polarity & himask; + result = result >> 32; + break; + case LS7A_HTMSI_EN_OFFSET: + result = state->htmsi_en & lowmask; + break; + case LS7A_HTMSI_EN_OFFSET + 4: + result = state->htmsi_en & himask; + result = result >> 32; + break; + case LS7A_AUTO_CTRL0_OFFSET: + case LS7A_AUTO_CTRL0_OFFSET + 4: + case LS7A_AUTO_CTRL1_OFFSET: + case LS7A_AUTO_CTRL1_OFFSET + 4: + break; + case LS7A_INT_ID_OFFSET: + result = LS7A_INT_ID_VAL; + break; + case LS7A_INT_ID_OFFSET + 4: + result = LS7A_INT_ID_VER; + break; + default: + WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); + break; + } + if (val != NULL) + *(uint32_t *)val = result; } else if (len == 1) { if (offset >= LS7A_HTMSI_VEC_OFFSET) { offset_tmp = offset - LS7A_HTMSI_VEC_OFFSET; @@ -440,7 +569,6 @@ int kvm_set_ls7a_ioapic(struct kvm *kvm, struct ls7a_ioapic_state *state) void kvm_destroy_ls7a_ioapic(struct kvm *kvm) { struct ls7a_kvm_ioapic *vpic = kvm->arch.v_ioapic; - if (!vpic) return; kvm_io_bus_unregister_dev(vpic->kvm, KVM_MMIO_BUS, diff --git a/arch/loongarch/kvm/intc/ls7a_irq.h b/arch/loongarch/kvm/intc/ls7a_irq.h index 5caabbcdfeaf6b862d1b30a920c4f7ad834edc27..50fa2fd340a9397107c88ed348e7fde15efc10d3 100644 --- a/arch/loongarch/kvm/intc/ls7a_irq.h +++ b/arch/loongarch/kvm/intc/ls7a_irq.h @@ -68,7 +68,7 @@ struct kvm_ls7a_ioapic_state { * 0 for high level tirgger */ u64 int_polarity; -} LS7AApicState; +}; struct ls7a_kvm_ioapic { spinlock_t lock; diff --git a/arch/loongarch/kvm/irq.h b/arch/loongarch/kvm/irq.h new file mode 100644 index 0000000000000000000000000000000000000000..344ba5ebc4b5eb8127013e1f5a5e77d1602e3962 --- /dev/null +++ b/arch/loongarch/kvm/irq.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#ifndef __LOONGHARCH_KVM_IRQ_H__ +#define __LOONGHARCH_KVM_IRQ_H__ + +static inline int irqchip_in_kernel(struct kvm *kvm) +{ + return kvm->arch.v_ioapic ? 1 : 0; +} + +#endif diff --git a/arch/loongarch/kvm/kvm_compat.c b/arch/loongarch/kvm/kvm_compat.c index f9dcfd2b57b8a9a034b05dc163a3e1a6363d04b8..39c1f995aa4bd5b21402419e7dd0c05285ef0be1 100644 --- a/arch/loongarch/kvm/kvm_compat.c +++ b/arch/loongarch/kvm/kvm_compat.c @@ -9,34 +9,11 @@ extern int _kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); -int kvm_arch_check_processor_compat(void) +int kvm_arch_check_processor_compat(void *opaque) { return 0; } -int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - bool is_dirty = false; - int r; - - mutex_lock(&kvm->slots_lock); - - r = kvm_clear_dirty_log_protect(kvm, log, &is_dirty); - - if (is_dirty) { - slots = kvm_memslots(kvm); - memslot = id_to_memslot(slots, log->slot); - - /* Let implementation handle TLB/GVA invalidation */ - kvm_flush_remote_tlbs(kvm); - } - - mutex_unlock(&kvm->slots_lock); - return r; -} - int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { return _kvm_set_spte_hva(kvm, hva, pte); diff --git a/arch/loongarch/kvm/kvm_compat.h b/arch/loongarch/kvm/kvm_compat.h index bbf73d4b2d8218c481dbc1cc03c967d7caf3e88f..5c724b3758dd89f7142034bd0c874693f9696a02 100644 --- a/arch/loongarch/kvm/kvm_compat.h +++ b/arch/loongarch/kvm/kvm_compat.h @@ -118,6 +118,12 @@ #define KVM_ESTAT_IS_WIDTH 15 #define KVM_ESTAT_IS (_ULCAST_(0x7fff) << KVM_ESTAT_IS_SHIFT) +#define KEVS_KEVW (KVM_ECFG_VS_SHIFT + KVM_ECFG_VS_WIDTH - 1) +#define KGGS_KGGW (KVM_GSTAT_GID_SHIFT + KVM_GSTAT_GID_WIDTH - 1) +#define KGTS_KGTW (KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1) +#define KCP_PK (KVM_CRMD_PG | PLV_KERN) +#define KCDS_PK ((1 << KVM_CRMD_DACM_SHIFT) | (1 << KVM_CRMD_DACF_SHIFT) | KCP_PK) + #define KVM_CSR_ERA 0x6 /* ERA */ #define KVM_CSR_BADV 0x7 /* Bad virtual address */ #define KVM_CSR_BADI 0x8 /* Bad instruction */ diff --git a/arch/loongarch/kvm/kvmcpu.h b/arch/loongarch/kvm/kvmcpu.h index 936dfc4593c6c22dae4ca1fbb0f6b2b316d058ab..66a15c7dd500700820da5ae24f111ab0a928f24e 100644 --- a/arch/loongarch/kvm/kvmcpu.h +++ b/arch/loongarch/kvm/kvmcpu.h @@ -67,11 +67,10 @@ #define KVM_LOONGSON_IRQ_CPU_FIQ 1 #define KVM_LOONGSON_CPU_IP_NUM 8 -union loongarch_instruction larch_inst; typedef int (*exit_handle_fn)(struct kvm_vcpu *); -int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst); -int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst); +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, union loongarch_instruction inst); +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, union loongarch_instruction inst); int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run); int _kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run); int _kvm_emu_idle(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/kvm/kvmcsr.h b/arch/loongarch/kvm/kvmcsr.h index 63676a4612c36cccaedad4c1da37e165c3f86890..4c92cf22a6959d744541526fe44ff0369ee4da83 100644 --- a/arch/loongarch/kvm/kvmcsr.h +++ b/arch/loongarch/kvm/kvmcsr.h @@ -19,7 +19,7 @@ unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid); void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val); void _kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long csr_mask, unsigned long val); -int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu); +int _kvm_emu_iocsr(union loongarch_instruction inst, struct kvm_run *run, struct kvm_vcpu *vcpu); static inline void kvm_save_hw_gcsr(struct loongarch_csrs *csr, u32 gid) { @@ -50,44 +50,10 @@ static inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, u32 gid, unsig unsigned long val) { unsigned long _mask = mask; - csr->csrs[gid] &= ~_mask; csr->csrs[gid] |= val & _mask; } - -static inline int GET_HW_GCSR(id, csrid, v) -{ - if (csrid == id) { - *v = (long)kvm_read_hw_gcsr(csrid); - return 0; - } -} - -static inline int GET_SW_GCSR(csr, id, csrid, v) -{ - if (csrid == id) { - *v = kvm_read_sw_gcsr(csr, id); - return 0; - } -} - -static inline int SET_HW_GCSR(csr, id, csrid, v) -{ - if (csrid == id) { - kvm_write_hw_gcsr(csr, csrid, *v); - return 0; - } -} - -static inline int SET_SW_GCSR(csr, id, csrid, v) -{ - if (csrid == id) { - kvm_write_sw_gcsr(csr, csrid, *v); - return 0; - } -} - int _kvm_init_iocsr(struct kvm *kvm); int _kvm_set_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp); int _kvm_get_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp); @@ -96,17 +62,4 @@ int _kvm_get_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp); KVM_PERFCTRL_PLV1 | \ KVM_PERFCTRL_PLV2 | \ KVM_PERFCTRL_PLV3) - -static inline void CASE_WRITE_HW_PMU(vcpu, csr, id, csrid, v) -{ - if (csrid == id) { - if (v & KVM_PMU_PLV_ENABLE) { - kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); - kvm_write_hw_gcsr(csr, csrid, v | KVM_PERFCTRL_GMOD); - vcpu->arch.aux_inuse |= KVM_LARCH_PERF; - } else - kvm_write_sw_gcsr(csr, csrid, v); - } -} - #endif /* __LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/kvm/loongarch.c b/arch/loongarch/kvm/loongarch.c index 9907f377055ac0d58c77af92e9d21be6a2e8fe70..e76a6635c4cbe658e0f7f404d78bce6862e44340 100644 --- a/arch/loongarch/kvm/loongarch.c +++ b/arch/loongarch/kvm/loongarch.c @@ -28,6 +28,7 @@ #include "kvmcpu.h" #include #include +#include #include "intc/ls3a_ipi.h" #include "intc/ls7a_irq.h" @@ -89,9 +90,40 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VM_STAT("set_ls7a_ioapic", set_ls7a_ioapic), VM_STAT("get_ls7a_ioapic", get_ls7a_ioapic), VM_STAT("set_ls3a_ext_irq", set_ls3a_ext_irq), + VM_STAT("get_ls3a_ext_irq", get_ls3a_ext_irq), + VM_STAT("ls3a_ext_irq", trigger_ls3a_ext_irq), {NULL} }; +static const struct trace_print_flags kvm_trace_symbol_exit_types[] = { + { KVM_TRACE_EXIT_INT, "Interrupt" }, + { KVM_TRACE_EXIT_TLBLD, "TLB (LD)" }, + { KVM_TRACE_EXIT_TLBST, "TLB (ST)" }, + { KVM_TRACE_EXIT_TLBI, "TLB Ifetch" }, + { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, + { KVM_TRACE_EXIT_TLBRI, "TLB RI" }, + { KVM_TRACE_EXIT_TLBXI, "TLB XI" }, + { KVM_TRACE_EXIT_TLBPE, "TLB Previlege Error" }, + { KVM_TRACE_EXIT_ADDE, "Address Error" }, + { KVM_TRACE_EXIT_UNALIGN, "Address unalign" }, + { KVM_TRACE_EXIT_ODB, "Out boundary" }, + { KVM_TRACE_EXIT_SYSCALL, "System Call" }, + { KVM_TRACE_EXIT_BP, "Breakpoint" }, + { KVM_TRACE_EXIT_INE, "Reserved Inst" }, + { KVM_TRACE_EXIT_IPE, "Inst prev error" }, + { KVM_TRACE_EXIT_FPDIS, "FPU disable" }, + { KVM_TRACE_EXIT_LSXDIS, "LSX disable" }, + { KVM_TRACE_EXIT_LASXDIS, "LASX disable" }, + { KVM_TRACE_EXIT_FPE, "FPE" }, + { KVM_TRACE_EXIT_WATCH, "DEBUG" }, + { KVM_TRACE_EXIT_GSPR, "GSPR" }, + { KVM_TRACE_EXIT_HC, "Hypercall" }, + { KVM_TRACE_EXIT_GCM, "CSR Mod" }, + { KVM_TRACE_EXIT_IDLE, "IDLE" }, + { KVM_TRACE_EXIT_CACHE, "CACHE" }, + { KVM_TRACE_EXIT_SIGNAL, "Signal" }, +}; + bool kvm_trace_guest_mode_change; static struct kvm_context __percpu *vmcs; @@ -145,11 +177,11 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu) if (st->version & 1) st->version += 1; /* first time write, random junk */ st->version += 1; - smp_wmb();/*Memory barrier for multiprocessors*/ + smp_wmb(); st->steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; - smp_wmb();/*Memory barrier for multiprocessors*/ + smp_wmb(); st->version += 1; kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); @@ -245,7 +277,6 @@ int kvm_arch_hardware_enable(void) gcfg |= KVM_GCFG_MATC_ROOT; gcfg |= KVM_GCFG_TIT; kvm_write_csr_gcfg(gcfg); - kvm_flush_tlb_all(); /* Enable using TGID */ @@ -508,13 +539,13 @@ static enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) return kvm_count_timeout(vcpu); } - static void _kvm_vcpu_init(struct kvm_vcpu *vcpu) { int i; for_each_possible_cpu(i) vcpu->arch.vpid[i] = 0; + hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); vcpu->arch.swtimer.function = kvm_swtimer_wakeup; @@ -522,7 +553,6 @@ static void _kvm_vcpu_init(struct kvm_vcpu *vcpu) vcpu->arch.lsx_enabled = true; } - int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { vcpu->arch.host_eentry = kvm_csr_readq(KVM_CSR_EENTRY); @@ -530,12 +560,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.vcpu_run = kvm_enter_guest; vcpu->arch.handle_exit = _kvm_handle_exit; vcpu->arch.host_ecfg = (kvm_read_csr_ecfg() & KVM_ECFG_VS); + /* * kvm all exceptions share one exception entry, and host <-> guest switch * also switch excfg.VS field, keep host excfg.VS info here */ vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); - if (!vcpu->arch.csr) return -ENOMEM; @@ -543,7 +573,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.last_sched_cpu = -1; vcpu->arch.last_exec_cpu = -1; _kvm_vcpu_init(vcpu); - return 0; } @@ -551,6 +580,7 @@ static void _kvm_vcpu_uninit(struct kvm_vcpu *vcpu) { int cpu; struct kvm_context *context; + /* * If the VCPU is freed and reused as another VCPU, we don't want the * matching pointer wrongly hanging around in last_vcpu. @@ -574,7 +604,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_release_pfn(cache->pfn, cache->dirty, cache); kfree(vcpu->arch.csr); } - #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, @@ -612,7 +641,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu->mmio_needed = 0; } else if (vcpu->arch.is_hypcall) { /* set return value for hypercall v0 register */ - vcpu->arch.gprs[KVM_REG_V0] = run->hypercall.ret; + vcpu->arch.gprs[KVM_REG_A0] = run->hypercall.ret; vcpu->arch.is_hypcall = 0; } @@ -1024,7 +1053,6 @@ static int _kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ret = -EINVAL; if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; - ret = get_user(v, uaddr64); } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; @@ -1130,7 +1158,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct loongarch_kvm_irqchi r = kvm_get_ls7a_ioapic(kvm, (void *)chip->data); break; case KVM_IRQCHIP_LS3A_GIPI: - if (dlen != sizeof(gipiState)) { + if (dlen != sizeof(struct gipiState)) { kvm_err("get gipi state err dlen:%d\n", dlen); goto dlen_err; } @@ -1173,7 +1201,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct loongarch_kvm_irqchi r = kvm_set_ls7a_ioapic(kvm, (void *)chip->data); break; case KVM_IRQCHIP_LS3A_GIPI: - if (dlen != sizeof(gipiState)) { + if (dlen != sizeof(struct gipiState)) { kvm_err("set gipi state err dlen:%d\n", dlen); goto dlen_err; } @@ -1352,7 +1380,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, } case KVM_CHECK_EXTENSION: { unsigned int ext; - if (copy_from_user(&ext, argp, sizeof(ext))) return -EFAULT; switch (ext) { @@ -1372,7 +1399,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, { int i; struct kvm_loongarch_vcpu_state vcpu_state; - r = -EFAULT; vcpu_state.online_vcpus = vcpu->kvm->arch.online_vcpus; @@ -1393,7 +1419,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, { int i; struct kvm_loongarch_vcpu_state vcpu_state; - r = -EFAULT; if (copy_from_user(&vcpu_state, argp, sizeof(struct kvm_loongarch_vcpu_state))) @@ -1487,7 +1512,6 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) case KVM_GET_IRQCHIP: { struct loongarch_kvm_irqchip *kchip; struct loongarch_kvm_irqchip uchip; - if (copy_from_user(&uchip, argp, sizeof(struct loongarch_kvm_irqchip))) goto out; kchip = memdup_user(argp, uchip.len); @@ -1512,7 +1536,6 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) case KVM_SET_IRQCHIP: { struct loongarch_kvm_irqchip *kchip; struct loongarch_kvm_irqchip uchip; - if (copy_from_user(&uchip, argp, sizeof(struct loongarch_kvm_irqchip))) goto out; @@ -1903,10 +1926,8 @@ void kvm_own_lasx(struct kvm_vcpu *vcpu) * Enable FP if enabled in guest, since we're restoring FP context * anyway. */ - if (_kvm_guest_has_lsx(&vcpu->arch)) { - /* Enable LSX for guest */ + if (_kvm_guest_has_lsx(&vcpu->arch)) kvm_set_csr_euen(KVM_EUEN_LSXEN); - } /* * Enable FPU if enabled in guest, since we're restoring FPU context @@ -1965,6 +1986,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); if (cpu_has_lasx && (vcpu->arch.aux_inuse & KVM_LARCH_LASX)) { + #ifdef CONFIG_CPU_HAS_LASX kvm_save_lasx(vcpu); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_LSX_LASX); @@ -1973,12 +1995,14 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) disable_lasx(); disable_lsx(); #endif + if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) kvm_clear_csr_euen(KVM_EUEN_FPEN); vcpu->arch.aux_inuse &= ~(KVM_LARCH_FPU | KVM_LARCH_LSX | KVM_LARCH_LASX); } else if (cpu_has_lsx && vcpu->arch.aux_inuse & KVM_LARCH_LSX) { + #ifdef CONFIG_CPU_HAS_LASX kvm_save_lsx(vcpu); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_LSX); @@ -1986,6 +2010,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) /* Disable LSX & FPU */ disable_lsx(); #endif + if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) kvm_clear_csr_euen(KVM_EUEN_FPEN); diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 6d10eafc783ee5aa01cf712f1abd05da78c8fca4..8aee8f2d8c15aac4488ca2de0a4e5926ed11634b 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -13,6 +13,10 @@ #include #include "kvm_compat.h" +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +#define KVM_HUGE_TLB_SUPPORT +#endif + /* * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels * for which pages need to be cached. @@ -34,7 +38,7 @@ static int kvm_tlb_flush_gpa(struct kvm_vcpu *vcpu, unsigned long gpa) static inline int kvm_pmd_huge(pmd_t pmd) { -#ifdef CONFIG_LOONGARCH_HUGE_TLB_SUPPORT +#ifdef KVM_HUGE_TLB_SUPPORT return (pmd_val(pmd) & _PAGE_HUGE) != 0; #else return 0; @@ -43,15 +47,16 @@ static inline int kvm_pmd_huge(pmd_t pmd) static inline int kvm_pud_huge(pud_t pud) { -#ifdef CONFIG_LOONGARCH_HUGE_TLB_SUPPORT +#ifdef KVM_HUGE_TLB_SUPPORT return (pud_val(pud) & _PAGE_HUGE) != 0; #else return 0; #endif +} static inline pmd_t kvm_pmd_mkhuge(pmd_t pmd) { -#ifdef CONFIG_LOONGARCH_HUGE_TLB_SUPPORT +#ifdef KVM_HUGE_TLB_SUPPORT #ifdef CONFIG_TRANSPARENT_HUGEPAGE return pmd_mkhuge(pmd); #else @@ -67,7 +72,7 @@ static inline pmd_t kvm_pmd_mkhuge(pmd_t pmd) static inline pmd_t kvm_pmd_mkclean(pmd_t pmd) { -#ifdef CONFIG_LOONGARCH_HUGE_TLB_SUPPORT +#ifdef KVM_HUGE_TLB_SUPPORT #ifdef CONFIG_TRANSPARENT_HUGEPAGE return pmd_mkclean(pmd); #else @@ -83,7 +88,7 @@ static inline pmd_t kvm_pmd_mkclean(pmd_t pmd) static inline pmd_t kvm_pmd_mkold(pmd_t pmd) { -#ifdef CONFIG_LOONGARCH_HUGE_TLB_SUPPORT +#ifdef KVM_HUGE_TLB_SUPPORT #ifdef CONFIG_TRANSPARENT_HUGEPAGE return pmd_mkold(pmd); #else @@ -149,10 +154,8 @@ static pte_t *kvm_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, BUG(); return NULL; } - p4d = p4d_offset(pgd, addr); pud = pud_offset(p4d, addr); - if (pud_none(*pud)) { pmd_t *new_pmd; @@ -163,8 +166,9 @@ static pte_t *kvm_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, pud_populate(NULL, pud, new_pmd); } pmd = pmd_offset(pud, addr); - if (kvm_pmd_huge(*pmd)) + if (kvm_pmd_huge(*pmd)) { return (pte_t *)pmd; + } if (pmd_none(*pmd)) { pte_t *new_pte; @@ -249,7 +253,6 @@ static bool kvm_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa, static bool kvm_flush_gpa_pud(pud_t *pud, unsigned long start_gpa, unsigned long end_gpa, unsigned long *data) { - p4d_t *p4d; pmd_t *pmd; unsigned long end = ~0ul; int i_min = pud_index(start_gpa); @@ -261,8 +264,7 @@ static bool kvm_flush_gpa_pud(pud_t *pud, unsigned long start_gpa, if (!pud_present(pud[i])) continue; - p4d = p4d_offset(pgd + i, 0); - pud = pud_offset(p4d, 0); + pmd = pmd_offset(pud + i, 0); if (i == i_max) end = end_gpa; @@ -280,6 +282,7 @@ static bool kvm_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa, unsigned long end_gpa, unsigned long *data) { p4d_t *p4d; + pud_t *pud; unsigned long end = ~0ul; int i_min = pgd_index(start_gpa); int i_max = pgd_index(end_gpa); @@ -386,7 +389,6 @@ static int kvm_mkclean_pmd(pmd_t *pmd, unsigned long start, unsigned long end) static int kvm_mkclean_pud(pud_t *pud, unsigned long start, unsigned long end) { int ret = 0; - p4d_t *p4d; pmd_t *pmd; unsigned long cur_end = ~0ul; int i_min = pud_index(start); @@ -397,8 +399,7 @@ static int kvm_mkclean_pud(pud_t *pud, unsigned long start, unsigned long end) if (!pud_present(pud[i])) continue; - p4d = p4d_offset(pgd + i, 0); - pud = pud_offset(p4d, 0); + pmd = pmd_offset(pud + i, 0); if (i == i_max) cur_end = end; @@ -410,6 +411,7 @@ static int kvm_mkclean_pud(pud_t *pud, unsigned long start, unsigned long end) static int kvm_mkclean_pgd(pgd_t *pgd, unsigned long start, unsigned long end) { int ret = 0; + p4d_t *p4d; pud_t *pud; unsigned long cur_end = ~0ul; int i_min = pgd_index(start); @@ -420,7 +422,8 @@ static int kvm_mkclean_pgd(pgd_t *pgd, unsigned long start, unsigned long end) if (!pgd_present(pgd[i])) continue; - pud = pud_offset(pgd + i, 0); + p4d = p4d_offset(pgd + i, 0); + pud = pud_offset(p4d, 0); if (i == i_max) cur_end = end; @@ -470,6 +473,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, gfn_t end = base_gfn + __fls(mask); kvm_mkclean_gpa_pt(kvm, start, end); + /* * FIXME: disable THP to improve vm migration success ratio, * how to know migration failure to enable THP again @@ -612,7 +616,6 @@ static int kvm_mkold_pmd(pmd_t *pmd, unsigned long start, unsigned long end) static int kvm_mkold_pud(pud_t *pud, unsigned long start, unsigned long end) { int ret = 0; - p4d_t *p4d; pmd_t *pmd; unsigned long cur_end = ~0ul; int i_min = pud_index(start); @@ -623,8 +626,7 @@ static int kvm_mkold_pud(pud_t *pud, unsigned long start, unsigned long end) if (!pud_present(pud[i])) continue; - p4d = p4d_offset(pgd + i, 0); - pud = pud_offset(p4d, 0); + pmd = pmd_offset(pud + i, 0); if (i == i_max) cur_end = end; @@ -638,6 +640,7 @@ static int kvm_mkold_pgd(pgd_t *pgd, unsigned long start, unsigned long end) { int ret = 0; p4d_t *p4d; + pud_t *pud; unsigned long cur_end = ~0ul; int i_min = pgd_index(start); int i_max = pgd_index(end); @@ -791,6 +794,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) static pud_t *kvm_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, phys_addr_t addr) { + pgd_t *pgd; p4d_t *p4d; pgd = kvm->arch.gpa_mm.pgd + pgd_index(addr); @@ -1162,7 +1166,6 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, /* PMD is not folded, adjust gfn to new boundary */ if (vma_pagesize == PMD_SIZE) gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; - mmap_read_unlock(current->mm); /* We need a minimum of cached pages ready for page table creation */ diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 3f381ced0c7bba91f3f3fde69ebcf74b83bcfb7e..1eceb144c97294a4eae6e763eee07be5958cc0ff 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -64,7 +64,7 @@ static void kvm_resume_hrtimer(struct kvm_vcpu *vcpu, ktime_t now, u64 stable_ti /* Stable timer decreased to zero or * initialize to zero, set 4 second timer - */ + */ delta = div_u64(stable_timer * MNSEC_PER_SEC, vcpu->arch.timer_mhz); expire = ktime_add_ns(now, delta); diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h index 3a0e3f914391350c9f7845cd7365696c982fe001..6ca81573e294bb6c5d16d55cd78e5d8d4577bf0f 100644 --- a/arch/loongarch/kvm/trace.h +++ b/arch/loongarch/kvm/trace.h @@ -82,37 +82,6 @@ DEFINE_EVENT(kvm_transition, kvm_out, #define KVM_TRACE_EXIT_CACHE 65 #define KVM_TRACE_EXIT_SIGNAL 66 -/* Tracepoints for VM exits */ -#define kvm_trace_symbol_exit_types \ - do { \ - { KVM_TRACE_EXIT_INT, "Interrupt" }, \ - { KVM_TRACE_EXIT_TLBLD, "TLB (LD)" }, \ - { KVM_TRACE_EXIT_TLBST, "TLB (ST)" }, \ - { KVM_TRACE_EXIT_TLBI, "TLB Ifetch" }, \ - { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \ - { KVM_TRACE_EXIT_TLBRI, "TLB RI" }, \ - { KVM_TRACE_EXIT_TLBXI, "TLB XI" }, \ - { KVM_TRACE_EXIT_TLBPE, "TLB Previlege Error" },\ - { KVM_TRACE_EXIT_ADDE, "Address Error" }, \ - { KVM_TRACE_EXIT_UNALIGN, "Address unalign" }, \ - { KVM_TRACE_EXIT_ODB, "Out boundary" }, \ - { KVM_TRACE_EXIT_SYSCALL, "System Call" }, \ - { KVM_TRACE_EXIT_BP, "Breakpoint" }, \ - { KVM_TRACE_EXIT_INE, "Reserved Inst" }, \ - { KVM_TRACE_EXIT_IPE, "Inst prev error" }, \ - { KVM_TRACE_EXIT_FPDIS, "FPU disable" }, \ - { KVM_TRACE_EXIT_LSXDIS, "LSX disable" }, \ - { KVM_TRACE_EXIT_LASXDIS, "LASX disable" }, \ - { KVM_TRACE_EXIT_FPE, "FPE" }, \ - { KVM_TRACE_EXIT_WATCH, "DEBUG" }, \ - { KVM_TRACE_EXIT_GSPR, "GSPR" }, \ - { KVM_TRACE_EXIT_HC, "Hypercall" }, \ - { KVM_TRACE_EXIT_GCM, "CSR Mod" }, \ - { KVM_TRACE_EXIT_IDLE, "IDLE" }, \ - { KVM_TRACE_EXIT_CACHE, "CACHE" }, \ - { KVM_TRACE_EXIT_SIGNAL, "Signal" } \ - } while (0) - TRACE_EVENT(kvm_exit, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), TP_ARGS(vcpu, reason), @@ -144,23 +113,19 @@ TRACE_EVENT(kvm_exit, #define KVM_TRACE_AUX_LASX 4 #define KVM_TRACE_AUX_FPU_LSX_LASX 7 -#define kvm_trace_symbol_aux_op \ - do { \ - { KVM_TRACE_AUX_RESTORE, "restore" }, \ +#define kvm_trace_symbol_aux_op \ + ({ KVM_TRACE_AUX_RESTORE, "restore" }, \ { KVM_TRACE_AUX_SAVE, "save" }, \ { KVM_TRACE_AUX_ENABLE, "enable" }, \ { KVM_TRACE_AUX_DISABLE, "disable" }, \ - { KVM_TRACE_AUX_DISCARD, "discard" } \ - } while (0) + { KVM_TRACE_AUX_DISCARD, "discard" }) #define kvm_trace_symbol_aux_state \ - do { \ - { KVM_TRACE_AUX_FPU, "FPU" }, \ + ({ KVM_TRACE_AUX_FPU, "FPU" }, \ { KVM_TRACE_AUX_LSX, "LSX" }, \ { KVM_TRACE_AUX_LASX, "LASX" }, \ { KVM_TRACE_AUX_FPU_LSX, "FPU & LSX" }, \ - { KVM_TRACE_AUX_FPU_LSX_LASX, "FPU & LSX & LASX" } \ - } while (0) + { KVM_TRACE_AUX_FPU_LSX_LASX, "FPU & LSX & LASX" }) TRACE_EVENT(kvm_aux, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index 39743337999e98f5458fe08a42d72e120ffc7d1b..94f6ca6a9df1fd70b509b45a617998dac0a064ce 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -195,7 +195,7 @@ tlb_huge_update_load: csrxchg t1, t0, LOONGARCH_CSR_TLBIDX nopage_tlb_load: - dbar 0 + dbar 0x700 csrrd ra, EXCEPTION_KS2 la.abs t0, tlb_do_page_fault_0 jr t0 @@ -363,7 +363,7 @@ tlb_huge_update_store: csrxchg t1, t0, LOONGARCH_CSR_TLBIDX nopage_tlb_store: - dbar 0 + dbar 0x700 csrrd ra, EXCEPTION_KS2 la.abs t0, tlb_do_page_fault_1 jr t0 @@ -522,7 +522,7 @@ tlb_huge_update_modify: csrxchg t1, t0, LOONGARCH_CSR_TLBIDX nopage_tlb_modify: - dbar 0 + dbar 0x700 csrrd ra, EXCEPTION_KS2 la.abs t0, tlb_do_page_fault_1 jr t0 diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index c71188ad4fa7fa297af188884401c97fc66c6b51..e1279fd3d15c5ca726ad35b473f447a7da7aa353 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -26,12 +26,14 @@ void pcibios_add_bus(struct pci_bus *bus) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct pci_config_window *cfg = bridge->bus->sysdata; - struct acpi_device *adev = to_acpi_device(cfg->parent); - struct device *bus_dev = &bridge->bus->dev; + if (!acpi_disabled) { + struct pci_config_window *cfg = bridge->bus->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct device *bus_dev = &bridge->bus->dev; - ACPI_COMPANION_SET(&bridge->dev, adev); - set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + } return 0; } diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 8076467eff4b0aa7e1c6c1c35c6d9b802cbaa3a7..956c80874f98b3d6af3f507a45b0f897c66ec31e 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -129,7 +129,7 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp) #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK)) #define pmd_page_vaddr(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK)) -#define pud_page_vaddr(pud) ((unsigned long)__va(pud_val(pud) & _TABLE_MASK)) +#define pud_pgtable(pud) ((pmd_t *)__va(pud_val(pud) & _TABLE_MASK)) #define pte_none(pte) (!pte_val(pte)) diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 39c4b87ec99e2bf7012bca17a739b61a5c222518..73a7c6151740e9b614fd120d47ca143aab531877 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -314,9 +314,9 @@ static inline void pud_clear(pud_t *pudp) #endif #ifndef __PAGETABLE_PMD_FOLDED -static inline unsigned long pud_page_vaddr(pud_t pud) +static inline pmd_t *pud_pgtable(pud_t pud) { - return pud_val(pud); + return (pmd_t *)pud_val(pud); } #define pud_phys(pud) virt_to_phys((void *)pud_val(pud)) #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 75cf84070fc9190343b6379de6a9e72efa16e138..0b6307b12c4fbf19dfaae04c4aef7a06637492c1 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -349,8 +349,8 @@ static inline void pmd_clear(pmd_t *pmd) { #if CONFIG_PGTABLE_LEVELS == 3 -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_address(pud))) -#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) +#define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud))) +#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud)) /* For 64 bit we have three level tables */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 71e2c524f1eead2d4c5e5c27a26023812e8b5869..5ebf6450f6dadb7a098f59b3d94c19bf26b5855b 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1030,9 +1030,13 @@ extern struct page *p4d_page(p4d_t p4d); /* Pointers in the page table tree are physical addresses */ #define __pgtable_ptr_val(ptr) __pa(ptr) -#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) #define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS) +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS); +} + #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 1eacff0fff02930042793416483dc06da2c37b7c..a4d475c0fc2c023ee810f1cb69f48ff4693b77d0 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -164,7 +164,11 @@ static inline void pud_clear(pud_t *pudp) #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ || (pud_val(pud) & PUD_BAD_BITS)) #define pud_present(pud) (pud_val(pud) != 0) -#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) + +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)(pud_val(pud) & ~PUD_MASKED_BITS); +} extern struct page *pud_page(pud_t pud); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index a433d8dd503db79883cb1d4127f625e7847a8db4..ad92f968dc1e829f516174f3e5f0d8f5a482acc8 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -864,7 +864,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr, continue; } - pmd_base = (pmd_t *)pud_page_vaddr(*pud); + pmd_base = pud_pgtable(*pud); remove_pmd_table(pmd_base, addr, next); free_pmd_table(pmd_base, pud); } @@ -1149,7 +1149,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr) pmd_t *pmd; int i; - pmd = (pmd_t *)pud_page_vaddr(*pud); + pmd = pud_pgtable(*pud); pud_clear(pud); flush_tlb_kernel_range(addr, addr + PUD_SIZE); diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index aefc2bfdf10494b0358ef531c899f51be083132f..bd0d903196d982b8077daea6646b04fed747d3e4 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -117,7 +117,7 @@ struct page *pud_page(pud_t pud) VM_WARN_ON(!pud_huge(pud)); return pte_page(pud_pte(pud)); } - return virt_to_page(pud_page_vaddr(pud)); + return virt_to_page(pud_pgtable(pud)); } /* diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index c4c638409921e0be0d9af4b58bb0f3965e530c21..0e863f3f7187a95cfe28c232b0b6d480a917ad6f 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -60,9 +60,9 @@ static inline void pud_clear(pud_t *pudp) set_pud(pudp, __pud(0)); } -static inline unsigned long pud_page_vaddr(pud_t pud) +static inline pmd_t *pud_pgtable(pud_t pud) { - return (unsigned long)pfn_to_virt((pud_val(pud) & _PAGE_CHG_MASK) >> _PAGE_PFN_SHIFT); + return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT); } static inline struct page *pud_page(pud_t pud) diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h index 82d74472dfcdab0fc96be442892951ef5220dab4..56bf35c2f29c2b832170d67468c1a23d5f27383e 100644 --- a/arch/sh/include/asm/pgtable-3level.h +++ b/arch/sh/include/asm/pgtable-3level.h @@ -32,9 +32,9 @@ typedef struct { unsigned long long pmd; } pmd_t; #define pmd_val(x) ((x).pmd) #define __pmd(x) ((pmd_t) { (x) } ) -static inline unsigned long pud_page_vaddr(pud_t pud) +static inline pmd_t *pud_pgtable(pud_t pud) { - return pud_val(pud); + return (pmd_t *)pud_val(pud); } /* only used by the stubbed out hugetlb gup code, should never be called */ diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index a5cf79c149fef574d75612db52d24a3c26b71baf..5a32b262a6b12e0887c33d82ac79559f2e9891d6 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -152,13 +152,13 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) return (unsigned long)__nocache_va(v << 4); } -static inline unsigned long pud_page_vaddr(pud_t pud) +static inline pmd_t *pud_pgtable(pud_t pud) { if (srmmu_device_memory(pud_val(pud))) { - return ~0; + return (pmd_t *)~0; } else { unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK; - return (unsigned long)__nocache_va(v << 4); + return (pmd_t *)__nocache_va(v << 4); } } diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 7ef6affa105e4f8f2e92daffd316e8dd2d659637..cac02ac301f130b8ef5ce4c182968c773100c6cf 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -845,18 +845,18 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) return ((unsigned long) __va(pfn << PAGE_SHIFT)); } -static inline unsigned long pud_page_vaddr(pud_t pud) +static inline pmd_t *pud_pgtable(pud_t pud) { pte_t pte = __pte(pud_val(pud)); unsigned long pfn; pfn = pte_pfn(pte); - return ((unsigned long) __va(pfn << PAGE_SHIFT)); + return ((pmd_t *) __va(pfn << PAGE_SHIFT)); } #define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd)) -#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) +#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud)) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pud_present(pud) (pud_val(pud) != 0U) #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index 7e6a4180db9d31c18534b9047a3977228591b70c..091bff319ccdf77e4bda0826f985e93fa95cd05c 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h @@ -84,7 +84,7 @@ static inline void pud_clear (pud_t *pud) } #define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK) -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) +#define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & PAGE_MASK)) static inline unsigned long pte_pfn(pte_t pte) { diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5e75341d968351c52c33aeec9d642b57f05221b6..dfe58eccb40d835fc0b3225c1d82ed39ad6bc836 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -842,9 +842,9 @@ static inline int pud_present(pud_t pud) return pud_flags(pud) & _PAGE_PRESENT; } -static inline unsigned long pud_page_vaddr(pud_t pud) +static inline pmd_t *pud_pgtable(pud_t pud) { - return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); + return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud)); } /* diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 62abc8215c1c2a0b77af8c7d8a06409a9f343ec6..5f4d1b2aa0ae8e63aa30730ea0cb246abe7309e7 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1127,7 +1127,7 @@ static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, unsigned long start, unsigned long end) { if (unmap_pte_range(pmd, start, end)) - if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) + if (try_to_free_pmd_page(pud_pgtable(*pud))) pud_clear(pud); } @@ -1171,7 +1171,7 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) * Try again to free the PMD page if haven't succeeded above. */ if (!pud_none(*pud)) - if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) + if (try_to_free_pmd_page(pud_pgtable(*pud))) pud_clear(pud); } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index d27cf69e811d591d7cfa0314e032f69458e1fd95..3481b35cb4ec7e05ba76848e325d189adda24964 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -797,7 +797,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr) pte_t *pte; int i; - pmd = (pmd_t *)pud_page_vaddr(*pud); + pmd = pud_pgtable(*pud); pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); if (!pmd_sv) return 0; diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 2af03bb73380e151ec69d4efd7f1f158f4f06213..113c277923773b13d265d4a18fdb8d0438d9d89e 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -41,6 +41,8 @@ struct mcfg_fixup { static struct mcfg_fixup mcfg_quirks[] = { /* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */ +#ifdef CONFIG_ARM64 + #define AL_ECAM(table_id, rev, seg, ops) \ { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops } @@ -162,6 +164,7 @@ static struct mcfg_fixup mcfg_quirks[] = { ALTRA_ECAM_QUIRK(1, 13), ALTRA_ECAM_QUIRK(1, 14), ALTRA_ECAM_QUIRK(1, 15), +#endif #ifdef CONFIG_LOONGARCH #define LOONGSON_ECAM_MCFG(table_id, seg) \ { "LOONGS", table_id, 1, seg, MCFG_BUS_ANY, &loongson_pci_ecam_ops } diff --git a/drivers/gpu/drm/loongson/Makefile b/drivers/gpu/drm/loongson/Makefile index cf6391b8eb171662babb35e98759d6ecde60e312..73c45144c0c75a865b0c5951ffdd396e04adc644 100644 --- a/drivers/gpu/drm/loongson/Makefile +++ b/drivers/gpu/drm/loongson/Makefile @@ -8,7 +8,6 @@ loongson-y := \ lsdc_pll.o \ lsdc_i2c.o \ lsdc_output.o \ - lsdc_pci_drv.o \ lsdc_debugfs.o \ lsdc-$(CONFIG_DEBUG_FS) += lsdc_debugfs.o diff --git a/drivers/gpu/drm/loongson/lsdc_crtc.c b/drivers/gpu/drm/loongson/lsdc_crtc.c index e59b75d790986c01a8f788524009f96c602b1b22..a18dc008159242abf6088f110133e3e55fe16f41 100644 --- a/drivers/gpu/drm/loongson/lsdc_crtc.c +++ b/drivers/gpu/drm/loongson/lsdc_crtc.c @@ -168,7 +168,7 @@ lsdc_crtc_helper_mode_valid(struct drm_crtc *crtc, static int lsdc_pixpll_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct lsdc_display_pipe * const dispipe = drm_crtc_to_dispipe(crtc); + struct lsdc_display_pipe * const dispipe = crtc_to_display_pipe(crtc); struct lsdc_pll * const pixpll = &dispipe->pixpll; const struct lsdc_pixpll_funcs * const pfuncs = pixpll->funcs; struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state); @@ -197,7 +197,7 @@ static int lsdc_crtc_helper_atomic_check(struct drm_crtc *crtc, static void lsdc_update_pixclk(struct drm_crtc *crtc) { - struct lsdc_display_pipe * const dispipe = drm_crtc_to_dispipe(crtc); + struct lsdc_display_pipe * const dispipe = crtc_to_display_pipe(crtc); struct lsdc_pll * const pixpll = &dispipe->pixpll; const struct lsdc_pixpll_funcs * const clkfun = pixpll->funcs; struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(crtc->state); diff --git a/drivers/gpu/drm/loongson/lsdc_debugfs.c b/drivers/gpu/drm/loongson/lsdc_debugfs.c index 1aad413ee97ca2e1b462ae81246d2ed34f95e363..8ffb181cb0486ff845bccb6bd81f9a468434aa7e 100644 --- a/drivers/gpu/drm/loongson/lsdc_debugfs.c +++ b/drivers/gpu/drm/loongson/lsdc_debugfs.c @@ -82,9 +82,10 @@ static const struct { } lsdc_regs_array[] = { REGDEF(INT), REGDEF(CRTC0_CFG), - REGDEF(CRTC0_FB_ADDR0), - REGDEF(CRTC0_FB_ADDR1), - REGDEF(CRTC0_FB_HI_ADDR), + REGDEF(CRTC0_FB0_LO_ADDR), + REGDEF(CRTC0_FB0_HI_ADDR), + REGDEF(CRTC0_FB1_LO_ADDR), + REGDEF(CRTC0_FB1_HI_ADDR), REGDEF(CRTC0_STRIDE), REGDEF(CRTC0_FB_ORIGIN), REGDEF(CRTC0_HDISPLAY), @@ -94,9 +95,10 @@ static const struct { REGDEF(CRTC0_GAMMA_INDEX), REGDEF(CRTC0_GAMMA_DATA), REGDEF(CRTC1_CFG), - REGDEF(CRTC1_FB_ADDR0), - REGDEF(CRTC1_FB_ADDR1), - REGDEF(CRTC1_FB_HI_ADDR), + REGDEF(CRTC1_FB0_LO_ADDR), + REGDEF(CRTC1_FB0_HI_ADDR), + REGDEF(CRTC1_FB1_LO_ADDR), + REGDEF(CRTC1_FB1_HI_ADDR), REGDEF(CRTC1_STRIDE), REGDEF(CRTC1_FB_ORIGIN), REGDEF(CRTC1_HDISPLAY), diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c index 306b0de0d2e80a906855073ff51a9afb6304399b..fbf8001ddc5c31bfd732c0e439eeb88ce0099f2b 100644 --- a/drivers/gpu/drm/loongson/lsdc_drv.c +++ b/drivers/gpu/drm/loongson/lsdc_drv.c @@ -11,6 +11,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -20,11 +24,26 @@ #include #include #include +#include #include "lsdc_drv.h" #include "lsdc_irq.h" #include "lsdc_output.h" #include "lsdc_debugfs.h" +#include "lsdc_i2c.h" + +static int lsdc_use_vram_helper = -1; +MODULE_PARM_DESC(use_vram_helper, "Using vram helper based driver(0 = disabled)"); +module_param_named(use_vram_helper, lsdc_use_vram_helper, int, 0644); + +static int lsdc_gamma = -1; +MODULE_PARM_DESC(gamma, "enable gamma (-1 = disabled (default), >0 = enabled)"); +module_param_named(gamma, lsdc_gamma, int, 0644); + +static int lsdc_relax_alignment = -1; +MODULE_PARM_DESC(relax_alignment, + "relax crtc stride alignment (-1 = disabled (default), >0 = enabled)"); +module_param_named(relax_alignment, lsdc_relax_alignment, int, 0644); static const struct lsdc_chip_desc dc_in_ls2k1000 = { .chip = LSDC_CHIP_2K1000, @@ -106,49 +125,9 @@ static const struct drm_mode_config_funcs lsdc_mode_config_funcs = { .mode_valid = lsdc_device_mode_valid, }; -static int lsdc_gem_cma_dumb_create(struct drm_file *file, - struct drm_device *ddev, - struct drm_mode_create_dumb *args) -{ - struct lsdc_device *ldev = to_lsdc(ddev); - const struct lsdc_chip_desc *desc = ldev->desc; - unsigned int bytes_per_pixel = (args->bpp + 7) / 8; - unsigned int pitch = bytes_per_pixel * args->width; - - /* - * The dc in ls7a1000/ls2k1000/ls2k0500 require the stride be a - * multiple of 256 bytes which is for sake of optimize dma data - * transfer. - */ - args->pitch = roundup(pitch, desc->stride_alignment); - - return drm_gem_cma_dumb_create_internal(file, ddev, args); -} - -DEFINE_DRM_GEM_CMA_FOPS(lsdc_drv_fops); - -static struct drm_driver lsdc_drm_driver_cma_stub = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .lastclose = drm_fb_helper_lastclose, - .fops = &lsdc_drv_fops, - - .name = "lsdc", - .desc = DRIVER_DESC, - .date = DRIVER_DATE, - .major = DRIVER_MAJOR, - .minor = DRIVER_MINOR, - .patchlevel = DRIVER_PATCHLEVEL, - - DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(lsdc_gem_cma_dumb_create), - -#ifdef CONFIG_DEBUG_FS - .debugfs_init = lsdc_debugfs_init, -#endif -}; - DEFINE_DRM_GEM_FOPS(lsdc_gem_fops); -static struct drm_driver lsdc_vram_driver_stub = { +static struct drm_driver lsdc_vram_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &lsdc_gem_fops, @@ -168,7 +147,7 @@ static struct drm_driver lsdc_vram_driver_stub = { static int lsdc_modeset_init(struct lsdc_device *ldev, uint32_t num_crtc) { - struct drm_device *ddev = ldev->ddev; + struct drm_device *ddev = &ldev->ddev; unsigned int i; int ret; @@ -226,7 +205,7 @@ static int lsdc_modeset_init(struct lsdc_device *ldev, uint32_t num_crtc) static int lsdc_mode_config_init(struct lsdc_device *ldev) { const struct lsdc_chip_desc * const descp = ldev->desc; - struct drm_device *ddev = ldev->ddev; + struct drm_device *ddev = &ldev->ddev; int ret; ret = drmm_mode_config_init(ddev); @@ -250,13 +229,6 @@ static int lsdc_mode_config_init(struct lsdc_device *ldev) return lsdc_modeset_init(ldev, descp->num_of_crtc); } -static void lsdc_mode_config_fini(struct drm_device *ddev) -{ - drm_atomic_helper_shutdown(ddev); - - drm_mode_config_cleanup(ddev); -} - /* * lsdc_detect_chip - a function to tell different chips apart. */ @@ -308,7 +280,7 @@ lsdc_detect_chip(struct pci_dev *pdev, const struct pci_device_id * const ent) return NULL; } -static int lsdc_remove_conflicting_framebuffers(const struct drm_driver *drv) +static int lsdc_remove_conflicting_framebuffers(void) { struct apertures_struct *ap; @@ -316,39 +288,99 @@ static int lsdc_remove_conflicting_framebuffers(const struct drm_driver *drv) if (!ap) return -ENOMEM; - /* lsdc is a pci device, but it don't have a dedicate vram bar because - * of historic reason. The display controller is ported from Loongson - * 2H series SoC which date back to 2012. - * And simplefb node may have been located anywhere in memory. - */ - ap->ranges[0].base = 0; ap->ranges[0].size = ~0; return drm_fb_helper_remove_conflicting_framebuffers(ap, "loongsondrmfb", false); } -static int lsdc_platform_probe(struct platform_device *pdev) +static int lsdc_vram_init(struct lsdc_device *ldev) +{ + const struct lsdc_chip_desc * const descp = ldev->desc; + struct pci_dev *gpu; + resource_size_t base, size; + + if (descp->chip == LSDC_CHIP_7A2000) { + /* BAR 2 of LS7A2000's GPU contain VRAM */ + gpu = pci_get_device(PCI_VENDOR_ID_LOONGSON, 0x7A25, NULL); + } else if (descp->chip == LSDC_CHIP_7A1000) { + /* BAR 2 of LS7A1000's GPU(GC1000) contain VRAM */ + gpu = pci_get_device(PCI_VENDOR_ID_LOONGSON, 0x7A15, NULL); + } else { + drm_err(&ldev->ddev, "Unknown chip, the driver need update\n"); + return -ENOENT; + } + + base = pci_resource_start(gpu, 2); + size = pci_resource_len(gpu, 2); + + ldev->vram_base = base; + ldev->vram_size = size; + + drm_info(&ldev->ddev, "vram start: 0x%llx, size: %uMB\n", + (u64)base, (u32)(size >> 20)); + + return 0; +} + +static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct lsdc_device *ldev = dev_get_drvdata(pdev->dev.parent); - struct drm_driver *driver; + struct lsdc_device *ldev; struct drm_device *ddev; + const struct lsdc_chip_desc *descp; int ret; - if (ldev->use_vram_helper) - driver = &lsdc_vram_driver_stub; + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; + + descp = lsdc_detect_chip(pdev, ent); + if (!descp) { + pr_info("unknown dc ip core, abort\n"); + return -ENOENT; + } + + lsdc_remove_conflicting_framebuffers(); + + ldev = devm_drm_dev_alloc(&pdev->dev, &lsdc_vram_driver, struct lsdc_device, ddev); + if (IS_ERR(ldev)) + return PTR_ERR(ldev); + + ldev->desc = descp; + ldev->use_vram_helper = lsdc_use_vram_helper && descp->has_vram; + + ddev = &ldev->ddev; + ddev->pdev = pdev; + + pci_set_drvdata(pdev, ddev); + + if (!descp->broken_gamma) + ldev->enable_gamma = true; else - driver = &lsdc_drm_driver_cma_stub; + ldev->enable_gamma = lsdc_gamma > 0 ? true : false; + + ldev->relax_alignment = lsdc_relax_alignment > 0 ? true : false; - lsdc_remove_conflicting_framebuffers(driver); + /* BAR 0 of the DC device contain registers base address */ + ldev->reg_base = pcim_iomap(pdev, 0, 0); + if (!ldev->reg_base) + return -ENODEV; - ddev = drm_dev_alloc(driver, &pdev->dev); - if (IS_ERR(ddev)) - return PTR_ERR(ddev); + if (ldev->use_vram_helper) { + ret = lsdc_vram_init(ldev); + if (ret) { + drm_err(ddev, "VRAM is unavailable\n"); + ldev->use_vram_helper = false; + } + } - platform_set_drvdata(pdev, ddev); - ldev->ddev = ddev; - ddev->dev_private = ldev; + ldev->irq = pdev->irq; if (ldev->use_vram_helper) { ret = drmm_vram_helper_init(ddev, ldev->vram_base, ldev->vram_size); @@ -358,13 +390,18 @@ static int lsdc_platform_probe(struct platform_device *pdev) } }; + spin_lock_init(&ldev->reglock); + ret = lsdc_mode_config_init(ldev); if (ret) { drm_dbg(ddev, "%s: %d\n", __func__, ret); goto err_kms; } - ret = devm_request_threaded_irq(&pdev->dev, ldev->irq, + drm_mode_config_reset(ddev); + + ret = devm_request_threaded_irq(&pdev->dev, + ldev->irq, lsdc_irq_handler_cb, lsdc_irq_thread_cb, IRQF_ONESHOT, NULL, @@ -378,8 +415,6 @@ static int lsdc_platform_probe(struct platform_device *pdev) if (ret) goto err_kms; - drm_mode_config_reset(ddev); - drm_kms_helper_poll_init(ddev); ret = drm_dev_register(ddev, 0); @@ -396,32 +431,137 @@ static int lsdc_platform_probe(struct platform_device *pdev) drm_dev_put(ddev); return ret; + } -static int lsdc_platform_remove(struct platform_device *pdev) +static void lsdc_pci_remove(struct pci_dev *pdev) { - struct drm_device *ddev = platform_get_drvdata(pdev); - struct lsdc_device *ldev = to_lsdc(ddev); + struct drm_device *ddev = pci_get_drvdata(pdev); drm_dev_unregister(ddev); + drm_atomic_helper_shutdown(ddev); +} - drm_kms_helper_poll_fini(ddev); +static int lsdc_drm_freeze(struct drm_device *ddev) +{ + int error; - devm_free_irq(ddev->dev, ldev->irq, ddev); + error = drm_mode_config_helper_suspend(ddev); + if (error) + return error; - lsdc_mode_config_fini(ddev); + pci_save_state(to_pci_dev(ddev->dev)); - platform_set_drvdata(pdev, NULL); + return 0; +} - drm_dev_put(ddev); +static int lsdc_drm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return drm_mode_config_helper_resume(ddev); +} + +static int lsdc_pm_freeze(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return lsdc_drm_freeze(ddev); +} + +static int lsdc_pm_thaw(struct device *dev) +{ + return lsdc_drm_resume(dev); +} + +static int lsdc_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int error; + + error = lsdc_pm_freeze(dev); + if (error) + return error; + + pci_save_state(pdev); + /* Shut down the device */ + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); return 0; } -struct platform_driver lsdc_platform_driver = { - .probe = lsdc_platform_probe, - .remove = lsdc_platform_remove, - .driver = { - .name = "lsdc", - }, +static int lsdc_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + if (pcim_enable_device(pdev)) + return -EIO; + + pci_set_power_state(pdev, PCI_D0); + + pci_restore_state(pdev); + + return lsdc_pm_thaw(dev); +} + +static const struct dev_pm_ops lsdc_pm_ops = { + .suspend = lsdc_pm_suspend, + .resume = lsdc_pm_resume, + .freeze = lsdc_pm_freeze, + .thaw = lsdc_pm_thaw, + .poweroff = lsdc_pm_freeze, + .restore = lsdc_pm_resume, +}; + +static const struct pci_device_id lsdc_pciid_list[] = { + {PCI_VENDOR_ID_LOONGSON, 0x7a06, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + (kernel_ulong_t)LSDC_CHIP_7A1000}, + {PCI_VENDOR_ID_LOONGSON, 0x7a36, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + (kernel_ulong_t)LSDC_CHIP_7A2000}, + {0, 0, 0, 0, 0, 0, 0} +}; + +static struct pci_driver lsdc_pci_driver = { + .name = DRIVER_NAME, + .id_table = lsdc_pciid_list, + .probe = lsdc_pci_probe, + .remove = lsdc_pci_remove, + .driver.pm = &lsdc_pm_ops, }; + +static int __init lsdc_drm_init(void) +{ + struct pci_dev *pdev = NULL; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { + /* + * Multiple video card workaround + * + * This integrated video card will always be selected as + * default boot device by vgaarb subsystem. + */ + if (pdev->vendor != PCI_VENDOR_ID_LOONGSON) { + pr_info("Discrete graphic card detected, abort\n"); + return 0; + } + } + + return pci_register_driver(&lsdc_pci_driver); +} +module_init(lsdc_drm_init); + +static void __exit lsdc_drm_exit(void) +{ + pci_unregister_driver(&lsdc_pci_driver); +} +module_exit(lsdc_drm_exit); + +MODULE_DEVICE_TABLE(pci, lsdc_pciid_list); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/loongson/lsdc_drv.h b/drivers/gpu/drm/loongson/lsdc_drv.h index e07b049dcd219546ea4dfd98898137b40bc9b390..f10508173ea0212747608404a33d73a981fff362 100644 --- a/drivers/gpu/drm/loongson/lsdc_drv.h +++ b/drivers/gpu/drm/loongson/lsdc_drv.h @@ -21,6 +21,7 @@ #include #include "lsdc_pll.h" +#include "lsdc_regs.h" #define DRIVER_AUTHOR "Sui Jingfeng " #define DRIVER_NAME "lsdc" @@ -57,19 +58,6 @@ struct lsdc_chip_desc { bool broken_gamma; }; -/* There is only a 1:1 mapping of encoders and connectors for lsdc */ -struct lsdc_output { - struct drm_encoder encoder; - struct drm_connector connector; - struct lsdc_i2c *li2c; -}; - -static inline struct lsdc_output * -drm_connector_to_lsdc_output(struct drm_connector *connp) -{ - return container_of(connp, struct lsdc_output, connector); -} - /* * struct lsdc_display_pipe - Abstraction of hardware display pipeline. * @crtc: CRTC control structure @@ -89,33 +77,50 @@ struct lsdc_display_pipe { struct drm_plane primary; struct drm_plane cursor; struct lsdc_pll pixpll; - struct lsdc_output *output; + struct drm_encoder encoder; + struct drm_connector connector; + struct lsdc_i2c *li2c; int index; bool available; }; static inline struct lsdc_display_pipe * -drm_crtc_to_dispipe(struct drm_crtc *crtc) +crtc_to_display_pipe(struct drm_crtc *crtc) { return container_of(crtc, struct lsdc_display_pipe, crtc); } static inline struct lsdc_display_pipe * -lsdc_cursor_to_dispipe(struct drm_plane *plane) +primary_to_display_pipe(struct drm_plane *plane) +{ + return container_of(plane, struct lsdc_display_pipe, primary); +} + +static inline struct lsdc_display_pipe * +cursor_to_display_pipe(struct drm_plane *plane) { return container_of(plane, struct lsdc_display_pipe, cursor); } +static inline struct lsdc_display_pipe * +connector_to_display_pipe(struct drm_connector *connector) +{ + return container_of(connector, struct lsdc_display_pipe, connector); +} + +static inline struct lsdc_display_pipe * +encoder_to_display_pipe(struct drm_encoder *encoder) +{ + return container_of(encoder, struct lsdc_display_pipe, encoder); +} + struct lsdc_crtc_state { struct drm_crtc_state base; struct lsdc_pll_core_values pparams; }; struct lsdc_device { - struct device *dev; - struct drm_device *ddev; - /* @dc: pointer to the platform device created at runtime */ - struct platform_device *dc; + struct drm_device ddev; /* @desc: device dependent data and feature descriptions */ const struct lsdc_chip_desc *desc; @@ -127,6 +132,9 @@ struct lsdc_device { struct lsdc_display_pipe dispipe[LSDC_NUM_CRTC]; + /* @reglock: protects concurrent register access */ + spinlock_t reglock; + /* * @num_output: count the number of active display pipe. */ @@ -158,7 +166,7 @@ struct lsdc_device { static inline struct lsdc_device *to_lsdc(struct drm_device *ddev) { - return ddev->dev_private; + return container_of(ddev, struct lsdc_device, ddev); } static inline struct lsdc_crtc_state * @@ -181,4 +189,81 @@ lsdc_detect_chip(struct pci_dev *pdev, const struct pci_device_id * const ent); extern struct platform_driver lsdc_platform_driver; +static inline u32 lsdc_rreg32(struct lsdc_device *ldev, u32 offset) +{ + return readl(ldev->reg_base + offset); +} + +static inline void lsdc_wreg32(struct lsdc_device *ldev, u32 offset, u32 val) +{ + writel(val, ldev->reg_base + offset); +} + +static inline void lsdc_ureg32_set(struct lsdc_device *ldev, + u32 offset, + u32 bit) +{ + void __iomem *addr = ldev->reg_base + offset; + u32 val = readl(addr); + + writel(val | bit, addr); +} + +static inline void lsdc_ureg32_clr(struct lsdc_device *ldev, + u32 offset, + u32 bit) +{ + void __iomem *addr = ldev->reg_base + offset; + u32 val = readl(addr); + + writel(val & ~bit, addr); +} + +static inline u32 lsdc_pipe_rreg32(struct lsdc_device *ldev, + u32 offset, + u32 pipe) +{ + return readl(ldev->reg_base + offset + pipe * CRTC_PIPE_OFFSET); +} + +#define lsdc_hdmi_rreg32 lsdc_pipe_rreg32 +#define lsdc_crtc_rreg32 lsdc_pipe_rreg32 + +static inline void lsdc_pipe_wreg32(struct lsdc_device *ldev, + u32 offset, + u32 pipe, + u32 val) +{ + writel(val, ldev->reg_base + offset + pipe * CRTC_PIPE_OFFSET); +} + +#define lsdc_hdmi_wreg32 lsdc_pipe_wreg32 +#define lsdc_crtc_wreg32 lsdc_pipe_wreg32 + +static inline void lsdc_crtc_ureg32_set(struct lsdc_device *ldev, + u32 offset, + u32 pipe, + u32 bit) +{ + void __iomem *addr; + u32 val; + + addr = ldev->reg_base + offset + pipe * CRTC_PIPE_OFFSET; + val = readl(addr); + writel(val | bit, addr); +} + +static inline void lsdc_crtc_ureg32_clr(struct lsdc_device *ldev, + u32 offset, + u32 pipe, + u32 bit) +{ + void __iomem *addr; + u32 val; + + addr = ldev->reg_base + offset + pipe * CRTC_PIPE_OFFSET; + val = readl(addr); + writel(val & ~bit, addr); +} + #endif diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.c b/drivers/gpu/drm/loongson/lsdc_i2c.c index ea64bd097a5997bcac1280a4e80d7ba051af4508..a6704054ce62137b9b24ab4026b4cbf72a1b1075 100644 --- a/drivers/gpu/drm/loongson/lsdc_i2c.c +++ b/drivers/gpu/drm/loongson/lsdc_i2c.c @@ -17,6 +17,7 @@ #include "lsdc_regs.h" #include "lsdc_i2c.h" +#include "lsdc_drv.h" /* * ls7a_gpio_i2c_set - set the state of a gpio pin, either high or low. @@ -24,10 +25,11 @@ */ static void ls7a_gpio_i2c_set(struct lsdc_i2c * const li2c, int mask, int state) { + struct lsdc_device *ldev = to_lsdc(li2c->ddev); unsigned long flags; u8 val; - spin_lock_irqsave(&li2c->reglock, flags); + spin_lock_irqsave(&ldev->reglock, flags); if (state) { /* @@ -50,7 +52,7 @@ static void ls7a_gpio_i2c_set(struct lsdc_i2c * const li2c, int mask, int state) writeb(val, li2c->dat_reg); } - spin_unlock_irqrestore(&li2c->reglock, flags); + spin_unlock_irqrestore(&ldev->reglock, flags); } /* @@ -59,10 +61,11 @@ static void ls7a_gpio_i2c_set(struct lsdc_i2c * const li2c, int mask, int state) */ static int ls7a_gpio_i2c_get(struct lsdc_i2c * const li2c, int mask) { + struct lsdc_device *ldev = to_lsdc(li2c->ddev); unsigned long flags; u8 val; - spin_lock_irqsave(&li2c->reglock, flags); + spin_lock_irqsave(&ldev->reglock, flags); /* First, set this pin as input */ val = readb(li2c->dir_reg); @@ -72,7 +75,7 @@ static int ls7a_gpio_i2c_get(struct lsdc_i2c * const li2c, int mask) /* Then, get level state from this pin */ val = readb(li2c->dat_reg); - spin_unlock_irqrestore(&li2c->reglock, flags); + spin_unlock_irqrestore(&ldev->reglock, flags); return (val & mask) ? 1 : 0; } @@ -144,8 +147,6 @@ struct lsdc_i2c *lsdc_of_create_i2c_adapter(struct device *parent, if (!li2c) return ERR_PTR(-ENOMEM); - spin_lock_init(&li2c->reglock); - ret = of_property_read_u32(i2c_np, "loongson,sda", &sda); if (ret) { dev_err(parent, "No sda pin number provided\n"); @@ -248,8 +249,7 @@ struct lsdc_i2c *lsdc_create_i2c_chan(struct drm_device *ddev, li2c->scl = 0x08; } - spin_lock_init(&li2c->reglock); - + li2c->ddev = ddev; li2c->dir_reg = reg_base + LS7A_DC_GPIO_DIR_REG; li2c->dat_reg = reg_base + LS7A_DC_GPIO_DAT_REG; diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.h b/drivers/gpu/drm/loongson/lsdc_i2c.h index 62cbf2aaab2e23065f37147384966fbd66c6d64e..8fc1efc433d56bff009459b065d221faa6d51c25 100644 --- a/drivers/gpu/drm/loongson/lsdc_i2c.h +++ b/drivers/gpu/drm/loongson/lsdc_i2c.h @@ -18,8 +18,9 @@ struct lsdc_i2c { struct i2c_adapter adapter; struct i2c_algo_bit_data bit; - /* @reglock: protects concurrent register access */ - spinlock_t reglock; + + struct drm_device *ddev; + void __iomem *dir_reg; void __iomem *dat_reg; /* pin bit mask */ diff --git a/drivers/gpu/drm/loongson/lsdc_output.c b/drivers/gpu/drm/loongson/lsdc_output.c index b84ff25d9d0d94cb2412ea9912076289fee70bf6..4733a014bea8fbc3a64be1089776907579533c42 100644 --- a/drivers/gpu/drm/loongson/lsdc_output.c +++ b/drivers/gpu/drm/loongson/lsdc_output.c @@ -24,9 +24,7 @@ static int lsdc_get_modes(struct drm_connector *connector) { unsigned int num = 0; - struct lsdc_output *lop = drm_connector_to_lsdc_output(connector); - struct lsdc_i2c *li2c = lop->li2c; - struct i2c_adapter *ddc = &li2c->adapter; + struct i2c_adapter *ddc = connector->ddc; if (ddc) { struct edid *edid; @@ -51,26 +49,46 @@ static int lsdc_get_modes(struct drm_connector *connector) } static enum drm_connector_status -lsdc_connector_detect(struct drm_connector *connector, bool force) +ls7a1000_connector_detect(struct drm_connector *connector, bool force) { - struct lsdc_output *lop = drm_connector_to_lsdc_output(connector); - struct lsdc_i2c *li2c = lop->li2c; - struct i2c_adapter *ddc = &li2c->adapter; + struct i2c_adapter *ddc = connector->ddc; - if (ddc && drm_probe_ddc(ddc)) - return connector_status_connected; + if (ddc) { + if (drm_probe_ddc(ddc)) + return connector_status_connected; + else + return connector_status_disconnected; + } - if (connector->connector_type == DRM_MODE_CONNECTOR_VIRTUAL) - return connector_status_connected; + return connector_status_unknown; +} - if (connector->connector_type == DRM_MODE_CONNECTOR_DVIA || - connector->connector_type == DRM_MODE_CONNECTOR_DVID || - connector->connector_type == DRM_MODE_CONNECTOR_DVII) - return connector_status_disconnected; +static enum drm_connector_status +ls7a2000_connector_detect(struct drm_connector *connector, bool force) +{ + struct lsdc_display_pipe *dispipe = connector_to_display_pipe(connector); + struct drm_device *ddev = connector->dev; + struct lsdc_device *ldev = to_lsdc(ddev); + u32 val; + + val = lsdc_rreg32(ldev, LSDC_HDMI_HPD_STATUS_REG); + + if (dispipe->index == 0) { + if (val & HDMI0_HPD_FLAG) + return connector_status_connected; + + if (connector->ddc) { + if (drm_probe_ddc(connector->ddc)) + return connector_status_connected; + + return connector_status_disconnected; + } + } else if (dispipe->index == 1) { + if (val & HDMI1_HPD_FLAG) + return connector_status_connected; - if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) return connector_status_disconnected; + } return connector_status_unknown; } @@ -84,9 +102,19 @@ static const struct drm_connector_helper_funcs lsdc_connector_helpers = { .get_modes = lsdc_get_modes, }; -static const struct drm_connector_funcs lsdc_connector_funcs = { +static const struct drm_connector_funcs ls7a1000_connector_funcs = { .dpms = drm_helper_connector_dpms, - .detect = lsdc_connector_detect, + .detect = ls7a1000_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = lsdc_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_funcs ls7a2000_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = ls7a2000_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = lsdc_connector_destroy, .reset = drm_atomic_helper_connector_reset, @@ -149,8 +177,8 @@ ls7a2000_hdmi_encoder_mode_set(struct drm_encoder *encoder, struct drm_device *ddev = encoder->dev; struct lsdc_device *ldev = to_lsdc(ddev); int clock = mode->clock; + int counter = 0; u32 val; - int counter; if (index == 0) { writel(0x0, ldev->reg_base + HDMI0_PLL_REG); @@ -217,12 +245,11 @@ static int lsdc_attach_bridges(struct lsdc_device *ldev, unsigned int i) { struct lsdc_display_pipe * const dispipe = &ldev->dispipe[i]; - struct drm_device *ddev = ldev->ddev; + struct drm_device *ddev = &ldev->ddev; struct drm_bridge *bridge; struct drm_panel *panel; struct drm_connector *connector; struct drm_encoder *encoder; - struct lsdc_output *output; int ret; ret = drm_of_find_panel_or_bridge(ports, i, 0, &panel, &bridge); @@ -235,11 +262,7 @@ static int lsdc_attach_bridges(struct lsdc_device *ldev, if (!bridge) return ret; - output = devm_kzalloc(ddev->dev, sizeof(*output), GFP_KERNEL); - if (!output) - return -ENOMEM; - - encoder = &output->encoder; + encoder = &dispipe->encoder; ret = drm_encoder_init(ddev, encoder, &lsdc_encoder_funcs, DRM_MODE_ENCODER_DPI, "encoder-%u", i); @@ -269,14 +292,12 @@ static int lsdc_attach_bridges(struct lsdc_device *ldev, drm_info(ddev, "bridge-%u attached to %s\n", i, encoder->name); - dispipe->output = output; - return 0; } int lsdc_attach_output(struct lsdc_device *ldev, uint32_t num_crtc) { - struct drm_device *ddev = ldev->ddev; + struct drm_device *ddev = &ldev->ddev; struct device_node *ports; struct lsdc_display_pipe *disp; unsigned int i; @@ -284,7 +305,7 @@ int lsdc_attach_output(struct lsdc_device *ldev, uint32_t num_crtc) ldev->num_output = 0; - ports = of_get_child_by_name(ldev->dev->of_node, "ports"); + ports = of_get_child_by_name(ddev->dev->of_node, "ports"); for (i = 0; i < num_crtc; i++) { struct drm_bridge *b; @@ -341,19 +362,23 @@ int lsdc_create_output(struct lsdc_device *ldev, { const struct lsdc_chip_desc * const descp = ldev->desc; struct lsdc_display_pipe * const dispipe = &ldev->dispipe[index]; - struct drm_device *ddev = ldev->ddev; + struct drm_encoder *encoder = &dispipe->encoder; + struct drm_connector *connector = &dispipe->connector; + struct drm_device *ddev = &ldev->ddev; int encoder_type = DRM_MODE_ENCODER_DPI; int connector_type = DRM_MODE_CONNECTOR_DPI; - struct lsdc_output *output; - struct drm_encoder *encoder; - struct drm_connector *connector; int ret; - output = devm_kzalloc(ddev->dev, sizeof(*output), GFP_KERNEL); - if (!output) - return -ENOMEM; - - encoder = &output->encoder; + if (descp->has_builtin_i2c) { + dispipe->li2c = lsdc_create_i2c_chan(ddev, ldev->reg_base, index); + if (IS_ERR(dispipe->li2c)) { + drm_err(ddev, "Failed to create i2c adapter\n"); + return PTR_ERR(dispipe->li2c); + } + } else { + drm_warn(ddev, "output-%u don't has ddc\n", index); + dispipe->li2c = NULL; + } if (descp->chip == LSDC_CHIP_7A2000) { encoder_type = DRM_MODE_ENCODER_TMDS; @@ -368,32 +393,31 @@ int lsdc_create_output(struct lsdc_device *ldev, return ret; } + encoder->possible_crtcs = BIT(index); + if (descp->chip == LSDC_CHIP_7A2000) drm_encoder_helper_add(encoder, &ls7a2000_hdmi_encoder_helper_funcs); - encoder->possible_crtcs = BIT(index); - - if (descp->has_builtin_i2c) { - output->li2c = lsdc_create_i2c_chan(ddev, ldev->reg_base, index); - if (IS_ERR(output->li2c)) { - drm_err(ddev, "Failed to create i2c adapter\n"); - return PTR_ERR(output->li2c); + if (descp->chip == LSDC_CHIP_7A2000) { + ret = drm_connector_init_with_ddc(ddev, + connector, + &ls7a2000_connector_funcs, + connector_type, + &dispipe->li2c->adapter); + if (ret) { + drm_err(ddev, "Init connector%d failed\n", index); + return ret; } } else { - drm_warn(ddev, "output-%u don't has ddc\n", index); - output->li2c = NULL; - } - - connector = &output->connector; - - ret = drm_connector_init_with_ddc(ddev, - connector, - &lsdc_connector_funcs, - connector_type, - &output->li2c->adapter); - if (ret) { - drm_err(ddev, "Init connector%d failed\n", index); - return ret; + ret = drm_connector_init_with_ddc(ddev, + connector, + &ls7a1000_connector_funcs, + connector_type, + &dispipe->li2c->adapter); + if (ret) { + drm_err(ddev, "Init connector%d failed\n", index); + return ret; + } } drm_connector_helper_add(connector, &lsdc_connector_helpers); @@ -403,7 +427,7 @@ int lsdc_create_output(struct lsdc_device *ldev, drm_connector_attach_encoder(connector, encoder); dispipe->available = true; - dispipe->output = output; + ldev->num_output++; return 0; diff --git a/drivers/gpu/drm/loongson/lsdc_pci_drv.c b/drivers/gpu/drm/loongson/lsdc_pci_drv.c deleted file mode 100644 index 035fb96ed3fe6c2ef2b3f2a6fe12d885c5488483..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/loongson/lsdc_pci_drv.c +++ /dev/null @@ -1,354 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * KMS driver for Loongson display controller - * Copyright (C) 2022 Loongson Corporation - */ - -/* - * Authors: - * Sui Jingfeng - */ - -#include -#include -#include -#include - -#include - -#include "lsdc_drv.h" -#include "lsdc_i2c.h" - -static int lsdc_use_vram_helper = -1; -MODULE_PARM_DESC(use_vram_helper, "Using vram helper based driver(0 = disabled)"); -module_param_named(use_vram_helper, lsdc_use_vram_helper, int, 0644); - -static int lsdc_gamma = -1; -MODULE_PARM_DESC(gamma, "enable gamma (-1 = disabled (default), >0 = enabled)"); -module_param_named(gamma, lsdc_gamma, int, 0644); - -static int lsdc_relax_alignment = -1; -MODULE_PARM_DESC(relax_alignment, - "relax crtc stride alignment (-1 = disabled (default), >0 = enabled)"); -module_param_named(relax_alignment, lsdc_relax_alignment, int, 0644); - - -static struct platform_device * -lsdc_create_platform_device(const char *name, - struct device *parent, - const struct lsdc_chip_desc *descp, - struct resource *res) -{ - struct device *dev; - struct platform_device *pdev; - int ret; - - pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE); - if (!pdev) { - dev_err(parent, "can not create platform device\n"); - return ERR_PTR(-ENOMEM); - } - - dev_info(parent, "platform device %s created\n", name); - - dev = &pdev->dev; - dev->parent = parent; - - if (descp) { - ret = platform_device_add_data(pdev, descp, sizeof(*descp)); - if (ret) { - dev_err(parent, "add platform data failed: %d\n", ret); - goto ERROR_RET; - } - } - - if (res) { - ret = platform_device_add_resources(pdev, res, 1); - if (ret) { - dev_err(parent, "add platform resources failed: %d\n", ret); - goto ERROR_RET; - } - } - - ret = platform_device_add(pdev); - if (ret) { - dev_err(parent, "add platform device failed: %d\n", ret); - goto ERROR_RET; - } - - return pdev; - -ERROR_RET: - platform_device_put(pdev); - return ERR_PTR(ret); -} - -static int lsdc_vram_init(struct lsdc_device *ldev) -{ - const struct lsdc_chip_desc * const descp = ldev->desc; - struct pci_dev *gpu; - resource_size_t base, size; - - if (descp->chip == LSDC_CHIP_7A2000) { - /* BAR 2 of LS7A2000's GPU contain VRAM */ - gpu = pci_get_device(PCI_VENDOR_ID_LOONGSON, 0x7A25, NULL); - } else if (descp->chip == LSDC_CHIP_7A1000) { - /* BAR 2 of LS7A1000's GPU(GC1000) contain VRAM */ - gpu = pci_get_device(PCI_VENDOR_ID_LOONGSON, 0x7A15, NULL); - } else { - dev_err(ldev->dev, "Unknown chip, the driver need update\n"); - return -ENOENT; - } - - if (IS_ERR_OR_NULL(gpu)) { - dev_err(ldev->dev, "Can not get VRAM\n"); - return -ENOENT; - } - - base = pci_resource_start(gpu, 2); - size = pci_resource_len(gpu, 2); - - ldev->vram_base = base; - ldev->vram_size = size; - - dev_info(ldev->dev, "vram start: 0x%llx, size: %uMB\n", - (u64)base, (u32)(size >> 20)); - - return 0; -} - -static void lsdc_of_probe(struct lsdc_device *ldev, struct device_node *np) -{ - struct device_node *ports; - - if (!np) { - ldev->has_dt = false; - ldev->has_ports_node = false; - dev_info(ldev->dev, "don't has DT support\n"); - return; - } - - ports = of_get_child_by_name(np, "ports"); - ldev->has_ports_node = ports ? true : false; - of_node_put(ports); -} - -static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct device *dev = &pdev->dev; - const struct lsdc_chip_desc *descp; - struct lsdc_device *ldev; - int ret; - - descp = lsdc_detect_chip(pdev, ent); - if (!descp) { - dev_info(dev, "unknown dc ip core, abort\n"); - return -ENOENT; - } - - ldev = devm_kzalloc(dev, sizeof(*ldev), GFP_KERNEL); - if (IS_ERR(ldev)) - return PTR_ERR(ldev); - - ldev->desc = descp; - ldev->dev = dev; - - if (lsdc_use_vram_helper > 0) - ldev->use_vram_helper = true; - else if ((lsdc_use_vram_helper < 0) && descp->has_vram) - ldev->use_vram_helper = true; - else - ldev->use_vram_helper = false; - - if (!descp->broken_gamma) - ldev->enable_gamma = true; - else - ldev->enable_gamma = lsdc_gamma > 0 ? true : false; - - ldev->relax_alignment = lsdc_relax_alignment > 0 ? true : false; - - lsdc_of_probe(ldev, dev->of_node); - - ret = pcim_enable_device(pdev); - if (ret) - return ret; - - pci_set_master(pdev); - - /* BAR 0 contains registers */ - ldev->reg_base = devm_ioremap_resource(dev, &pdev->resource[0]); - if (IS_ERR(ldev->reg_base)) - return PTR_ERR(ldev->reg_base); - - /* Create GPIO emulated i2c driver as early as possible */ - if (descp->has_builtin_i2c && ldev->has_ports_node) { - struct device_node *i2c_node; - - for_each_compatible_node(i2c_node, NULL, "loongson,gpio-i2c") { - if (!of_device_is_available(i2c_node)) - continue; - - lsdc_of_create_i2c_adapter(dev, ldev->reg_base, i2c_node); - } - } - - if (ldev->has_dt) { - /* Get the optional framebuffer memory resource */ - ret = of_reserved_mem_device_init(dev); - if (ret && (ret != -ENODEV)) - return ret; - } - - if (descp->has_vram && ldev->use_vram_helper) { - ret = lsdc_vram_init(ldev); - if (ret) { - dev_err(dev, "VRAM is unavailable\n"); - ldev->use_vram_helper = false; - } - } - - ldev->irq = pdev->irq; - - dev_set_drvdata(dev, ldev); - - if (descp->has_vram && ldev->use_vram_helper) { - struct resource res; - - memset(&res, 0, sizeof(res)); - res.flags = IORESOURCE_MEM; - res.name = "LS7A_VRAM"; - res.start = ldev->vram_base; - res.end = ldev->vram_size; - } - - ldev->dc = lsdc_create_platform_device("lsdc", dev, descp, NULL); - if (IS_ERR(ldev->dc)) - return PTR_ERR(ldev->dc); - - return platform_driver_register(&lsdc_platform_driver); -} - -static void lsdc_pci_remove(struct pci_dev *pdev) -{ - struct lsdc_device *ldev = pci_get_drvdata(pdev); - - platform_device_unregister(ldev->dc); - - pci_set_drvdata(pdev, NULL); - - pci_clear_master(pdev); - - pci_release_regions(pdev); -} - -static int lsdc_drm_suspend(struct device *dev) -{ - struct lsdc_device *ldev = dev_get_drvdata(dev); - - return drm_mode_config_helper_suspend(ldev->ddev); -} - -static int lsdc_drm_resume(struct device *dev) -{ - struct lsdc_device *ldev = dev_get_drvdata(dev); - - return drm_mode_config_helper_resume(ldev->ddev); -} - -static int lsdc_pm_freeze(struct device *dev) -{ - return lsdc_drm_suspend(dev); -} - -static int lsdc_pm_thaw(struct device *dev) -{ - return lsdc_drm_resume(dev); -} - -static int lsdc_pm_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int error; - - error = lsdc_pm_freeze(dev); - if (error) - return error; - - pci_save_state(pdev); - /* Shut down the device */ - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -static int lsdc_pm_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - if (pcim_enable_device(pdev)) - return -EIO; - - pci_set_power_state(pdev, PCI_D0); - - pci_restore_state(pdev); - - return lsdc_pm_thaw(dev); -} - -static const struct dev_pm_ops lsdc_pm_ops = { - .suspend = lsdc_pm_suspend, - .resume = lsdc_pm_resume, - .freeze = lsdc_pm_freeze, - .thaw = lsdc_pm_thaw, - .poweroff = lsdc_pm_freeze, - .restore = lsdc_pm_resume, -}; - -static const struct pci_device_id lsdc_pciid_list[] = { - {PCI_VENDOR_ID_LOONGSON, 0x7a06, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, (kernel_ulong_t)LSDC_CHIP_7A1000}, - {PCI_VENDOR_ID_LOONGSON, 0x7a36, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, (kernel_ulong_t)LSDC_CHIP_7A2000}, - {0, 0, 0, 0, 0, 0, 0} -}; - -static struct pci_driver lsdc_pci_driver = { - .name = DRIVER_NAME, - .id_table = lsdc_pciid_list, - .probe = lsdc_pci_probe, - .remove = lsdc_pci_remove, - .driver.pm = &lsdc_pm_ops, -}; - -static int __init lsdc_drm_init(void) -{ - struct pci_dev *pdev = NULL; - - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { - /* - * Multiple video card workaround - * - * This integrated video card will always be selected as - * default boot device by vgaarb subsystem. - */ - if (pdev->vendor != PCI_VENDOR_ID_LOONGSON) { - pr_info("Discrete graphic card detected, abort\n"); - return 0; - } - } - - return pci_register_driver(&lsdc_pci_driver); -} -module_init(lsdc_drm_init); - -static void __exit lsdc_drm_exit(void) -{ - pci_unregister_driver(&lsdc_pci_driver); -} -module_exit(lsdc_drm_exit); - -MODULE_DEVICE_TABLE(pci, lsdc_pciid_list); -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c index 6f65c9fd687ea7c9d3dd5dd61a1f1fc4c140de0b..085347ec804ec47c1b4839c14e3d2144aef15ba6 100644 --- a/drivers/gpu/drm/loongson/lsdc_plane.c +++ b/drivers/gpu/drm/loongson/lsdc_plane.c @@ -70,54 +70,32 @@ static void lsdc_update_fb_format(struct lsdc_device *ldev, } static void lsdc_update_fb_start_addr(struct lsdc_device *ldev, - struct drm_crtc *crtc, - u64 paddr) + unsigned int index, + u64 fb_addr) { - unsigned int index = drm_crtc_index(crtc); - u32 lo32_addr_reg; - u32 hi32_addr_reg; - u32 cfg_reg; - u32 val; + u32 lo = fb_addr & 0xFFFFFFFF; + u32 hi = (fb_addr >> 32) & 0xFF; + u32 cfg; - /* - * Find which framebuffer address register should update. - * if FB_ADDR0_REG is in using, we write the addr to FB_ADDR1_REG, - * if FB_ADDR1_REG is in using, we write the addr to FB_ADDR0_REG - */ if (index == 0) { - /* CRTC0 */ - val = readl(ldev->reg_base + LSDC_CRTC0_CFG_REG); - - cfg_reg = LSDC_CRTC0_CFG_REG; - hi32_addr_reg = LSDC_CRTC0_FB_HI_ADDR_REG; - - if (val & CFG_FB_IDX_BIT) - lo32_addr_reg = LSDC_CRTC0_FB_ADDR0_REG; - else - lo32_addr_reg = LSDC_CRTC0_FB_ADDR1_REG; + cfg = lsdc_crtc_rreg32(ldev, LSDC_CRTC0_CFG_REG, index); + if (cfg & BIT(9)) { + lsdc_wreg32(ldev, LSDC_CRTC0_FB1_LO_ADDR_REG, lo); + lsdc_wreg32(ldev, LSDC_CRTC0_FB1_HI_ADDR_REG, hi); + } else { + lsdc_wreg32(ldev, LSDC_CRTC0_FB0_LO_ADDR_REG, lo); + lsdc_wreg32(ldev, LSDC_CRTC0_FB0_HI_ADDR_REG, hi); + } } else if (index == 1) { - /* CRTC1 */ - val = readl(ldev->reg_base + LSDC_CRTC1_CFG_REG); - - cfg_reg = LSDC_CRTC1_CFG_REG; - hi32_addr_reg = LSDC_CRTC1_FB_HI_ADDR_REG; - - if (val & CFG_FB_IDX_BIT) - lo32_addr_reg = LSDC_CRTC1_FB_ADDR0_REG; - else - lo32_addr_reg = LSDC_CRTC1_FB_ADDR1_REG; + cfg = lsdc_crtc_rreg32(ldev, LSDC_CRTC1_CFG_REG, index); + if (cfg & BIT(9)) { + lsdc_wreg32(ldev, LSDC_CRTC1_FB1_LO_ADDR_REG, lo); + lsdc_wreg32(ldev, LSDC_CRTC1_FB1_HI_ADDR_REG, hi); + } else { + lsdc_wreg32(ldev, LSDC_CRTC1_FB0_LO_ADDR_REG, lo); + lsdc_wreg32(ldev, LSDC_CRTC1_FB0_HI_ADDR_REG, hi); + } } - - drm_dbg(ldev->ddev, "crtc%u scantout from 0x%llx\n", index, paddr); - - /* The bridge's bus width is 40 */ - writel(paddr, ldev->reg_base + lo32_addr_reg); - writel((paddr >> 32) & 0xFF, ldev->reg_base + hi32_addr_reg); - /* - * Then, we triger the fb switch, the switch of the framebuffer - * to be scanout will complete at the next vblank. - */ - writel(val | CFG_PAGE_FLIP_BIT, ldev->reg_base + cfg_reg); } static unsigned int lsdc_get_fb_offset(struct drm_framebuffer *fb, @@ -176,7 +154,7 @@ static void lsdc_update_stride(struct lsdc_device *ldev, else if (index == 1) writel(stride, ldev->reg_base + LSDC_CRTC1_STRIDE_REG); - drm_dbg(ldev->ddev, "update stride to %u\n", stride); + drm_dbg(&ldev->ddev, "update stride to %u\n", stride); } static void lsdc_primary_plane_atomic_update(struct drm_plane *plane, @@ -204,7 +182,7 @@ static void lsdc_primary_plane_atomic_update(struct drm_plane *plane, fb_addr = obj->paddr + fb_offset; } - lsdc_update_fb_start_addr(ldev, crtc, fb_addr); + lsdc_update_fb_start_addr(ldev, drm_crtc_index(crtc), fb_addr); lsdc_update_stride(ldev, crtc, fb->pitches[0]); @@ -276,7 +254,7 @@ static int lsdc_cursor_atomic_check(struct drm_plane *plane, static void lsdc_cursor_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state) { - struct lsdc_display_pipe * const dispipe = lsdc_cursor_to_dispipe(plane); + struct lsdc_display_pipe * const dispipe = cursor_to_display_pipe(plane); struct drm_device *ddev = plane->dev; struct lsdc_device *ldev = to_lsdc(ddev); const struct lsdc_chip_desc * const descp = ldev->desc; @@ -357,7 +335,7 @@ static void lsdc_cursor_atomic_update(struct drm_plane *plane, static void lsdc_cursor_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { - const struct lsdc_display_pipe * const dispipe = lsdc_cursor_to_dispipe(plane); + const struct lsdc_display_pipe * const dispipe = cursor_to_display_pipe(plane); struct drm_device *ddev = plane->dev; struct lsdc_device *ldev = to_lsdc(ddev); const struct lsdc_chip_desc * const descp = ldev->desc; @@ -423,7 +401,7 @@ int lsdc_plane_init(struct lsdc_device *ldev, enum drm_plane_type type, unsigned int index) { - struct drm_device *ddev = ldev->ddev; + struct drm_device *ddev = &ldev->ddev; int zpos = lsdc_plane_get_default_zpos(type); unsigned int format_count; const u32 *formats; diff --git a/drivers/gpu/drm/loongson/lsdc_regs.h b/drivers/gpu/drm/loongson/lsdc_regs.h index ffa6285530d7788aa66cceef272d765652371295..00ccac2601cd71d747d5000c8692ee9bbccbe755 100644 --- a/drivers/gpu/drm/loongson/lsdc_regs.h +++ b/drivers/gpu/drm/loongson/lsdc_regs.h @@ -77,9 +77,8 @@ enum lsdc_pixel_format { /******** CRTC0 & DVO0 ********/ #define LSDC_CRTC0_CFG_REG 0x1240 -#define LSDC_CRTC0_FB_ADDR0_REG 0x1260 -#define LSDC_CRTC0_FB_ADDR1_REG 0x1580 -#define LSDC_CRTC0_FB_HI_ADDR_REG 0x15A0 +#define LSDC_CRTC0_FB0_LO_ADDR_REG 0x1260 +#define LSDC_CRTC0_FB0_HI_ADDR_REG 0x15A0 #define LSDC_CRTC0_STRIDE_REG 0x1280 #define LSDC_CRTC0_FB_ORIGIN_REG 0x1300 #define LSDC_CRTC0_HDISPLAY_REG 0x1400 @@ -88,12 +87,13 @@ enum lsdc_pixel_format { #define LSDC_CRTC0_VSYNC_REG 0x14A0 #define LSDC_CRTC0_GAMMA_INDEX_REG 0x14E0 #define LSDC_CRTC0_GAMMA_DATA_REG 0x1500 +#define LSDC_CRTC0_FB1_LO_ADDR_REG 0x1580 +#define LSDC_CRTC0_FB1_HI_ADDR_REG 0x15C0 /******** CTRC1 & DVO1 ********/ #define LSDC_CRTC1_CFG_REG 0x1250 -#define LSDC_CRTC1_FB_ADDR0_REG 0x1270 -#define LSDC_CRTC1_FB_ADDR1_REG 0x1590 -#define LSDC_CRTC1_FB_HI_ADDR_REG 0x15C0 +#define LSDC_CRTC1_FB0_LO_ADDR_REG 0x1270 +#define LSDC_CRTC1_FB0_HI_ADDR_REG 0x15B0 #define LSDC_CRTC1_STRIDE_REG 0x1290 #define LSDC_CRTC1_FB_ORIGIN_REG 0x1310 #define LSDC_CRTC1_HDISPLAY_REG 0x1410 @@ -102,8 +102,17 @@ enum lsdc_pixel_format { #define LSDC_CRTC1_VSYNC_REG 0x14B0 #define LSDC_CRTC1_GAMMA_INDEX_REG 0x14F0 #define LSDC_CRTC1_GAMMA_DATA_REG 0x1510 +#define LSDC_CRTC1_FB1_LO_ADDR_REG 0x1590 +#define LSDC_CRTC1_FB1_HI_ADDR_REG 0x15D0 -#define LSDC_REGS_OFFSET 0x0010 +/* + * In gross, LSDC_CRTC1_XXX_REG - LSDC_CRTC0_XXX_REG = 0x10, but not all of + * the registers obey this rule, LSDC_CURSORx_XXX_REG just don't honor this. + * This is the root cause we can't untangle the code by manpulating offset + * of the register access simply. Our hardware engineers are lack experiance + * when they design this... + */ +#define CRTC_PIPE_OFFSET 0x10 /* * Hardware cursor @@ -249,4 +258,11 @@ enum lsdc_pixel_format { #define DMA_STEP_64_BYTE (2 << 16) #define DMA_STEP_32_BYTE (3 << 16) +/* LS7A2000/LS2K2000 has hpd status reg, while the two hdmi's status + * located at the one register again. + */ +#define LSDC_HDMI_HPD_STATUS_REG 0x1BA0 +#define HDMI0_HPD_FLAG BIT(0) +#define HDMI1_HPD_FLAG BIT(1) + #endif diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index ca32e73bb92414916a7f064d5ad99674740bf8c4..e99525bf09b31f55463177669b136d45a73767c3 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -58,7 +58,9 @@ static void eiointc_enable(void) static int cpu_to_eio_node(int cpu) { - return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); + + return cpu_logical_map(cpu) / cores; } static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) @@ -89,6 +91,11 @@ static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, static DEFINE_RAW_SPINLOCK(affinity_lock); +static void virt_extioi_set_irq_route(int irq, unsigned int cpu) +{ + iocsr_write8(cpu_logical_map(cpu), EIOINTC_REG_ROUTE + irq); +} + static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { unsigned int cpu; @@ -111,16 +118,22 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af vector = d->hwirq; regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); - /* Mask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), - 0x0, priv->node * CORES_PER_EIO_NODE); - - /* Set route for target vector */ - eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); - - /* Unmask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE, - 0x0, priv->node * CORES_PER_EIO_NODE); + if (cpu_has_hypervisor) { + iocsr_write32(EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1F), regaddr); + virt_extioi_set_irq_route(vector, cpu); + iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); + } else { + /* Mask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), + 0x0, priv->node * CORES_PER_EIO_NODE); + + /* Set route for target vector */ + eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); + + /* Unmask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE, + 0x0, priv->node * CORES_PER_EIO_NODE); + } irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -146,14 +159,15 @@ static int eiointc_router_init(unsigned int cpu) int i, bit; uint32_t data; uint32_t node = cpu_to_eio_node(cpu); - uint32_t index = eiointc_index(node); + int index = eiointc_index(node); + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); if (index < 0) { pr_err("Error: invalid nodemap!\n"); return -1; } - if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { + if ((cpu_logical_map(cpu) % cores) == 0) { eiointc_enable(); for (i = 0; i < VEC_COUNT / 32; i++) { @@ -170,7 +184,8 @@ static int eiointc_router_init(unsigned int cpu) for (i = 0; i < VEC_COUNT / 4; i++) { /* Route to Node-0 Core-0 */ if (index == 0) - bit = BIT(cpu_logical_map(0)); + bit = (cpu_has_hypervisor ? cpu_logical_map(0) + : BIT(cpu_logical_map(0))); else bit = (eiointc_priv[index]->node << 4) | 1; @@ -280,9 +295,6 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi { int i; - if (cpu_has_flatmode) - node = cpu_to_node(node * CORES_PER_EIO_NODE); - for (i = 0; i < MAX_IO_PICS; i++) { if (node == vec_group[i].node) { vec_group[i].parent = parent; @@ -351,8 +363,13 @@ int __init pch_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { + struct irq_domain *parent; struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; - struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group); + int node = eiointc_priv[nr_pics - 1]->node; + + if (cpu_has_flatmode) + node = cpu_to_node(node * CORES_PER_EIO_NODE); + parent = acpi_get_vec_parent(node, msi_group); if (parent) return pch_msi_acpi_init(parent, pchmsi_entry); @@ -375,6 +392,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent, int i, parent_irq; unsigned long node_map; struct eiointc_priv *priv; + int node = acpi_eiointc->node; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -412,13 +430,17 @@ int __init eiointc_acpi_init(struct irq_domain *parent, parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); - register_syscore_ops(&eiointc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, + if (nr_pics == 1) { + register_syscore_ops(&eiointc_syscore_ops); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, "irqchip/loongarch/intc:starting", eiointc_router_init, NULL); + } - acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group); - acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group); + if (cpu_has_flatmode) + node = cpu_to_node(node * CORES_PER_EIO_NODE); + acpi_set_vec_parent(node, priv->eiointc_domain, pch_group); + acpi_set_vec_parent(node, priv->eiointc_domain, msi_group); acpi_cascade_irqdomain_init(); return 0; diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index e09078e1ac0161dce262983fde4136fa2c1b057e..1d9fa3398ceaf11fe98403e7c10649475a0207a9 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -305,7 +305,8 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, pch_pic_handle[nr_pics] = domain_handle; pch_pic_priv[nr_pics++] = priv; - register_syscore_ops(&pch_pic_syscore_ops); + if (nr_pics == 1) + register_syscore_ops(&pch_pic_syscore_ops); return 0; @@ -394,6 +395,9 @@ int __init pch_pic_acpi_init(struct irq_domain *parent, int ret, vec_base; struct fwnode_handle *domain_handle; + if (find_pch_pic(acpi_pchpic->gsi_base) >= 0) + return 0; + vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ; domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address); diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 24441c1cf02c36fdfe35a83b97e93072483bc446..6288ff053de77237fbc4d0aec75743d1222cebeb 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "../pci.h" @@ -157,6 +158,91 @@ static void loongson_ohci_quirk(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_OHCI, loongson_ohci_quirk); +static void loongson_display_quirk(struct pci_dev *dev) +{ + u32 val; + u64 mask, size; + u64 max_size = 0; + int i, num; + struct pci_bus *bus = dev->bus; + + if (!dev->bus->number) { + if (!(dev->vendor == PCI_VENDOR_ID_LOONGSON && dev->device == 0x7a25)) + return; + } else { + while (!pci_is_root_bus(bus->parent)) + bus = bus->parent; + + /* ensure slot is 7a2000 */ + if (bus->self->vendor != PCI_VENDOR_ID_LOONGSON || bus->self->device < 0x7a39) + return; + } + max_size = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (dev->resource[i].flags & IORESOURCE_MEM) { + size = dev->resource[i].end - dev->resource[i].start; + if (size > max_size) { + max_size = size; + num = i; + } + } + } + mask = ~(dev->resource[num].end - dev->resource[num].start); + val = (dev->resource[num].start >> (24 - 16)) | ((mask >> 24) & 0xffff); + writel(val, (void *)0x80000efdfb000174UL); + writel(0x80000000, (void *)0x80000efdfb000170UL); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a25, loongson_display_quirk); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, loongson_display_quirk); + +static void pci_fixup_aspeed(struct pci_dev *pdev) +{ + struct pci_dev *bridge; + struct pci_bus *bus; + struct pci_dev *vdevp = NULL; + u16 config; + + bus = pdev->bus; + bridge = bus->self; + + /* Is VGA routed to us? */ + if (bridge && (pci_is_bridge(bridge))) { + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); + + /* Yes, this bridge is PCI bridge-to-bridge spec compliant, + * just return! + */ + if (config & PCI_BRIDGE_CTL_VGA) + return; + + dev_warn(&pdev->dev, "VGA bridge control is not enabled\n"); + } + + /* Just return if the system already have a default device */ + if (vga_default_device()) + return; + + /* No default vga device */ + while ((vdevp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, vdevp))) { + if (vdevp->vendor != 0x1a03) { + /* Have other vga devcie in the system, do nothing */ + dev_info(&pdev->dev, + "Another boot vga device: 0x%x:0x%x\n", + vdevp->vendor, vdevp->device); + return; + } + } + + vga_set_default_device(pdev); + + dev_info(&pdev->dev, + "Boot vga device set as 0x%x:0x%x\n", + pdev->vendor, pdev->device); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(0x1a03, 0x2000, + PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_aspeed); + static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; diff --git a/drivers/platform/loongarch/Kconfig b/drivers/platform/loongarch/Kconfig index 8925fe9b24d40b3ab7b47df40e0c12ff77d7471b..3f3b694afa37a775af231c28e32e72a9e7661a50 100644 --- a/drivers/platform/loongarch/Kconfig +++ b/drivers/platform/loongarch/Kconfig @@ -21,7 +21,6 @@ config LOONGSON_GENERIC_LAPTOP depends on BACKLIGHT_CLASS_DEVICE depends on INPUT depends on MACH_LOONGSON64 - select ACPI_VIDEO select INPUT_SPARSEKMAP default y help diff --git a/drivers/platform/loongarch/loongson_generic_laptop.c b/drivers/platform/loongarch/loongson_generic_laptop.c index dbeda9b7ae193c6c6016f8ed0e8737f0fbffde50..f180f22e27f1dd6696cf267b7cfce2fb1290eab0 100644 --- a/drivers/platform/loongarch/loongson_generic_laptop.c +++ b/drivers/platform/loongarch/loongson_generic_laptop.c @@ -235,7 +235,7 @@ static int hkey_map(void) status = acpi_evaluate_object_typed(hkey_handle, METHOD_NAME__KMAP, NULL, &buf, ACPI_TYPE_PACKAGE); if (status != AE_OK) { - dev_err(": ACPI exception: %s\n", + pr_err(": ACPI exception: %s\n", acpi_format_exception(status)); return -1; } @@ -266,13 +266,13 @@ static int event_init(struct generic_sub_driver *sub_driver) ret = hkey_map(); if (ret) { - dev_err("Fail to parse keymap from DSDT.\n"); + pr_err("Fail to parse keymap from DSDT.\n"); return ret; } ret = sparse_keymap_setup(generic_inputdev, hotkey_keycode_map, NULL); if (ret) { - dev_err("Fail to setup input device keymap\n"); + pr_err("Fail to setup input device keymap\n"); input_free_device(generic_inputdev); return ret; diff --git a/drivers/spi/spi-loongson.c b/drivers/spi/spi-loongson.c index 1d951d5f9159d81ad922246b0c0dfe16c7a588d5..c99e7acb0c4394b77f28365b29d15e077f8c97d8 100644 --- a/drivers/spi/spi-loongson.c +++ b/drivers/spi/spi-loongson.c @@ -456,15 +456,15 @@ static int loongson_spi_pci_register(struct pci_dev *pdev, /* Enable device in PCI config */ ret = pci_enable_device(pdev); if (ret < 0) { - dev_err("loongson-pci (%s): Cannot enable PCI device\n", - ci_name(pdev)); + dev_err(&pdev->dev, "loongson-pci (%s): Cannot enable PCI device\n", + pci_name(pdev)); goto err_out; } /* request the mem regions */ ret = pci_request_region(pdev, 0, "loongson-spi io"); if (ret < 0) { - dev_err("loongson-spi (%s): cannot request region 0.\n", + dev_err(&pdev->dev, "loongson-spi (%s): cannot request region 0.\n", pci_name(pdev)); goto err_out; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 180e93d9b2cc350ab1049128295bac27ba3f7247..0a3f26b66b2333c290f613e597b18a900357b987 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -35,6 +35,7 @@ #define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_DEVICE_ID_EJ168 0x7023 +#define PCI_DEVICE_ID_EJ188 0x7052 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -278,6 +279,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_BROKEN_STREAMS; } + + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ188) + xhci->quirks |= XHCI_NO_SOFT_RETRY; + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { xhci->quirks |= XHCI_TRUST_TX_LENGTH; diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index 3e13acd019aefc2fdbc9f2aede12a31e52c83b77..10789cf51d1603337c270c204d7074e71a0442f6 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -51,7 +51,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) #define __pmd(x) ((pmd_t) { __pud(x) } ) #define pud_page(pud) (pmd_page((pmd_t){ pud })) -#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) +#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud }))) /* * allocating and freeing a pmd is trivial: the 1-entry pmd is diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index a9d751fbda9e87f2ccb66638438ef868ec5b6855..7cbd15f70bf55d6477967fdbf375c9abbc33c3b7 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -49,7 +49,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) #define __pud(x) ((pud_t) { __p4d(x) }) #define p4d_page(p4d) (pud_page((pud_t){ p4d })) -#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) +#define p4d_page_vaddr(p4d) (pud_pgtable((pud_t){ p4d })) /* * allocating and freeing a pud is trivial: the 1-entry pud is diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 741cd12b2c00a2a4c1932bb6aa034d7aa1f8c7ad..91103fb70f693d246efff46f55e70a149a27f081 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -89,7 +89,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) #ifndef pmd_offset static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { - return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); + return pud_pgtable(*pud) + pmd_index(address); } #define pmd_offset pmd_offset #endif diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index eece881b9e02f78d381c1eae73b7d6aa43b2755b..01ebf529b79dadda23ebf9190541350a76b024a7 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include diff --git a/scripts/Makefile b/scripts/Makefile index 9fa609edc38d9d896fb11cffabe5a2220b6ebeb4..399ebfb22e4317e239020c8715014d0cc8b3e57d 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -25,19 +25,18 @@ HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS) HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS) ifdef CONFIG_UNWINDER_ORC -# Additional ARCH settings for x86 +ifneq ($(filter loongarch loongarch64, $(ARCH)),) +HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/loongarch/include +HOSTCFLAGS_sorttable.o += -DUNWINDER_ORC_ENABLED +HOSTLDLIBS_sorttable = -lpthread +else ifeq ($(ARCH),x86_64) ARCH := x86 endif - -# Additional ARCH settings for loongarch -ifeq ($(ARCH),loongarch64) -ARCH := loongarch -endif - -HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/$(ARCH)/include +HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/x86/include HOSTCFLAGS_sorttable.o += -DUNWINDER_ORC_ENABLED endif +endif ifdef CONFIG_BUILDTIME_MCOUNT_SORT HOSTCFLAGS_sorttable.o += -DMCOUNT_SORT_ENABLED